content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
|---|---|---|---|---|---|---|---|---|---|
getwd()
setwd("/home/uday/Desktop/Weather-Data/Weather")
chicago <- read.csv("Chicago-F.csv")
chicago
chicago <- read.csv("Chicago-F.csv",row.names=1)
chicago
houston<- read.csv("Houston-F.csv",row.names=1)
newyork <- read.csv("NewYork-F.csv",row.names = 1)
sanfrancisco <- read.csv("SanFrancisco-F.csv",row.names = 1)
is.data.frame(chicago)
#convert data frame to matrix
chicago<- as.matrix(chicago)
houston<- as.matrix(houston)
newyork <- as.matrix(newyork)
sanfrancisco <- as.matrix(sanfrancisco)
weather <- list(chicago=chicago,newyork=newyork,houston=houston,sanfrancisco=sanfrancisco)
weather
weather[3]
weather[[3]]
weather$houston
#Using Apply
apply(chicago,1,mean)
#analyse one city
apply(chicago,1,max)
apply(chicago,1,min)
#for practice columnwise
apply(chicago,2,max)
apply(chicago,2,min)
apply(chicago,2,mean)
apply(chicago,1,mean)
apply(newyork,1,mean)
apply(houston,1,mean)
apply(sanfrancisco,1,mean)
#find the mean of every row using loop
output <- NULL
for(i in 1:5)
{
output[i]<- mean(chicago[i,])
}
output
apply(chicago,1,mean)
#using lapply
?lapply
chicago
#transpose
t(chicago)
lapply(weather,t)
#another way
#list(t(weather$chicago),t(weather$newyork),t(weather$sanfrancisco),t(weather$houston))
mylist <- lapply(weather,t)
mylist
rbind(chicago,newrow=1:12)
lapply(weather,rbind,newrow=1:12)
#example 3
?rowMeans
rowMeans(chicago) #identical to apply(chicago,1,mean)
lapply(weather,rowMeans)
weather
weather$chicago[1][1]
weather[[1]][1][1]
lapply(weather,"[",1,1)
lapply(weather,"[",1,)
lapply(weather,"[",,3)
#adding our own function
lapply(weather,rowMeans)
lapply(weather,function(x) x[1,])
lapply(weather,function(x) x[5,])
lapply(weather,function(x) x[,12])
lapply(weather,function(z) z[1,]- z[2,])
|
/R/DONR/CA-3/BYOD PRAC 3/BYOD PRAC 3/SET_A_EVEN/demo11.R
|
no_license
|
ekanshbari/R-programs
|
R
| false
| false
| 1,756
|
r
|
getwd()
setwd("/home/uday/Desktop/Weather-Data/Weather")
chicago <- read.csv("Chicago-F.csv")
chicago
chicago <- read.csv("Chicago-F.csv",row.names=1)
chicago
houston<- read.csv("Houston-F.csv",row.names=1)
newyork <- read.csv("NewYork-F.csv",row.names = 1)
sanfrancisco <- read.csv("SanFrancisco-F.csv",row.names = 1)
is.data.frame(chicago)
#convert data frame to matrix
chicago<- as.matrix(chicago)
houston<- as.matrix(houston)
newyork <- as.matrix(newyork)
sanfrancisco <- as.matrix(sanfrancisco)
weather <- list(chicago=chicago,newyork=newyork,houston=houston,sanfrancisco=sanfrancisco)
weather
weather[3]
weather[[3]]
weather$houston
#Using Apply
apply(chicago,1,mean)
#analyse one city
apply(chicago,1,max)
apply(chicago,1,min)
#for practice columnwise
apply(chicago,2,max)
apply(chicago,2,min)
apply(chicago,2,mean)
apply(chicago,1,mean)
apply(newyork,1,mean)
apply(houston,1,mean)
apply(sanfrancisco,1,mean)
#find the mean of every row using loop
output <- NULL
for(i in 1:5)
{
output[i]<- mean(chicago[i,])
}
output
apply(chicago,1,mean)
#using lapply
?lapply
chicago
#transpose
t(chicago)
lapply(weather,t)
#another way
#list(t(weather$chicago),t(weather$newyork),t(weather$sanfrancisco),t(weather$houston))
mylist <- lapply(weather,t)
mylist
rbind(chicago,newrow=1:12)
lapply(weather,rbind,newrow=1:12)
#example 3
?rowMeans
rowMeans(chicago) #identical to apply(chicago,1,mean)
lapply(weather,rowMeans)
weather
weather$chicago[1][1]
weather[[1]][1][1]
lapply(weather,"[",1,1)
lapply(weather,"[",1,)
lapply(weather,"[",,3)
#adding our own function
lapply(weather,rowMeans)
lapply(weather,function(x) x[1,])
lapply(weather,function(x) x[5,])
lapply(weather,function(x) x[,12])
lapply(weather,function(z) z[1,]- z[2,])
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/kaplan_meier.R
\name{my.draw.kaplan}
\alias{my.draw.kaplan}
\title{Title}
\usage{
my.draw.kaplan(coef.l, xdata, ydata, plot.title = "", no.plot = FALSE)
}
\arguments{
\item{xdata.ix}{}
}
\description{
Title
}
|
/man/my.draw.kaplan.Rd
|
no_license
|
averissimo/glmSparseNetPaper
|
R
| false
| true
| 287
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/kaplan_meier.R
\name{my.draw.kaplan}
\alias{my.draw.kaplan}
\title{Title}
\usage{
my.draw.kaplan(coef.l, xdata, ydata, plot.title = "", no.plot = FALSE)
}
\arguments{
\item{xdata.ix}{}
}
\description{
Title
}
|
library(readxl)
library(ggplot2)
library(dplyr)
notas <- read_excel("nombres60.xlsx")
notas
# opcion 1: paquete base
plot(notas$solemne, notas$examen)
boxplot(notas$solemne)
boxplot(notas$examen)
# opcion 2; paquete ggplot2
ggplot(notas) +
geom_point(aes(x = solemne, y = examen))
ggplot(notas) +
geom_boxplot(aes(y = solemne))
ggplot(notas) +
geom_boxplot(aes(y = examen))
# media de la prueba solemne
mean(notas$solemne)
# desviación estandar del examen
sd(notas$examen)
# quantil 0.25 del examen
quantile(notas$examen, 0.25)
quantile(notas$examen, 0.75)
# azul en el examen opcion 1
notas_exazul <- filter(notas, examen >= 4)
# azul en el examen opcion 2
notas_exazul <- notas[notas$examen >= 4, ]
# cuantos azules hubo en el examen?
sum(notas$examen >= 4)
# que proporción de azules hubo en el examen?
mean(notas$examen >= 4)
# Probabilidad
# para un grupo al azar de n personas
mismo_cumple <- function(n){
cumples <- sample(1:365, n, replace=TRUE)
any(duplicated(cumples))
}
compute_prob <- function(n, B=10000){
results <- replicate(B, mismo_cumple(n))
mean(results)
}
personas <- 1:60
prob <- sapply(personas, compute_prob)
personas[prob >= 1/3]
|
/2019_2/control_3_ie.R
|
no_license
|
ricardomayerb/ico8306
|
R
| false
| false
| 1,201
|
r
|
library(readxl)
library(ggplot2)
library(dplyr)
notas <- read_excel("nombres60.xlsx")
notas
# opcion 1: paquete base
plot(notas$solemne, notas$examen)
boxplot(notas$solemne)
boxplot(notas$examen)
# opcion 2; paquete ggplot2
ggplot(notas) +
geom_point(aes(x = solemne, y = examen))
ggplot(notas) +
geom_boxplot(aes(y = solemne))
ggplot(notas) +
geom_boxplot(aes(y = examen))
# media de la prueba solemne
mean(notas$solemne)
# desviación estandar del examen
sd(notas$examen)
# quantil 0.25 del examen
quantile(notas$examen, 0.25)
quantile(notas$examen, 0.75)
# azul en el examen opcion 1
notas_exazul <- filter(notas, examen >= 4)
# azul en el examen opcion 2
notas_exazul <- notas[notas$examen >= 4, ]
# cuantos azules hubo en el examen?
sum(notas$examen >= 4)
# que proporción de azules hubo en el examen?
mean(notas$examen >= 4)
# Probabilidad
# para un grupo al azar de n personas
mismo_cumple <- function(n){
cumples <- sample(1:365, n, replace=TRUE)
any(duplicated(cumples))
}
compute_prob <- function(n, B=10000){
results <- replicate(B, mismo_cumple(n))
mean(results)
}
personas <- 1:60
prob <- sapply(personas, compute_prob)
personas[prob >= 1/3]
|
#Corona Virus Simulations
#This simulation analyzes the time a person would be in the office before they realize they are sick
#Time from infection to incubation period is modeled
# as log normal distribution
#Lauer SA, Grantz KH, Bi Q, et al. The Incubation Period of Coronavirus Disease 2019 (COVID-19) From Publicly Reported Confirmed Cases: Estimation and Application. Ann Intern Med. 2020; [Epub ahead of print 10 March 2020]. doi: https://doi.org/10.7326/M20-0504
#https://annals.org/aim/fullarticle/2762808/incubation-period-coronavirus-disease-2019-covid-19-from-publicly-reported
library(ggplot2)
library(dplyr)
#work schedule
day<-c(1,2,3,4,5,6,7)
ooo<-c(0,1,0,1,0,1,0)
schedule<-cbind(rep(day,10),rep(ooo,10))
#Work from home Simulation
# schedule - dataframe - A work schedule, 1 = in the office, 0 = work from home/weekend
# verbose - boolean - suppresses simulation output (default False)
wfh_simulation<-function(schedule,verbose=F){
#Track Simulation data
trackdays<-NULL
#Simulate time til symptomatic, random infection date, time in office
for(t in 1:15000){
#incubation period
incu_pd<-round(rlnorm(1,
meanlog = 1.621,
sdlog = 0.418))
#plot(density(prob_incu))
#date of infection
dxdt<-sample(1:7,1)
x<-0
cur_dt<-dxdt
daysinwork<-0
for(i in 1:incu_pd){
daysinwork<-schedule[cur_dt,2]+daysinwork
if(verbose == T){
print(paste("Day of week",schedule[cur_dt,1],
", workday?:",schedule[cur_dt,2]))
}
#Count next day
cur_dt<-cur_dt+1
}
#store simulation
trackdays<-c(trackdays,daysinwork)
}
return(trackdays)
}
day<-c(1,2,3,4,5,6,7)
#work schedule1
ooo<-c(0,1,1,1,1,1,0)
s0111110<-cbind(rep(day,10),rep(ooo,10))
#work schedule2
ooo<-c(0,0,1,1,1,1,0)
s0011110<-cbind(rep(day,10),rep(ooo,10))
#work schedule3
ooo<-c(0,1,0,1,1,1,0)
s0101110<-cbind(rep(day,10),rep(ooo,10))
#work schedule4
ooo<-c(0,1,1,0,1,1,0)
s0110110<-cbind(rep(day,10),rep(ooo,10))
#work schedule5
ooo<-c(0,1,1,1,0,1,0)
s0111010<-cbind(rep(day,10),rep(ooo,10))
#work schedule6
ooo<-c(0,1,1,1,1,0,0)
s0111100<-cbind(rep(day,10),rep(ooo,10))
#additional days
ooo<-c(0,1,1,1,0,0,0)
s0111000<-cbind(rep(day,10),rep(ooo,10))
ooo<-c(0,1,1,0,0,0,0)
s0110000<-cbind(rep(day,10),rep(ooo,10))
#testing order of the day.
dat_s0111110<-(wfh_simulation(s0111110))
dat_s0011110<-(wfh_simulation(s0011110))
dat_s0101110<-(wfh_simulation(s0101110))
dat_s0110110<-(wfh_simulation(s0110110))
dat_s0111010<-(wfh_simulation(s0111010))
dat_s0111100<-(wfh_simulation(s0111100))
#testing additional days out of the office
dat_s0111000<-(wfh_simulation(s0111000))
dat_s0110000<-(wfh_simulation(s0110000))
#Summary Statistics
summary((dat_s0111110))
summary((dat_s0011110))
summary((dat_s0101110))
summary((dat_s0110110))
summary((dat_s0111010))
summary((dat_s0111100))
sd(dat_s0111110)
sd(dat_s0011110)
sd(dat_s0101110)
sd(dat_s0110110)
sd(dat_s0111010)
#Summary statistics - Less time in the office
summary((dat_s0111110))
summary((dat_s0111100))
summary((dat_s0111000))
summary((dat_s0110000))
#Significance testing;
#Create dataset
simdat<-as.data.frame(
rbind(
cbind(1,dat_s0111110),
cbind(2,dat_s0011110),
cbind(3,dat_s0101110),
cbind(4,dat_s0110110),
cbind(5,dat_s0111010),
cbind(6,dat_s0111100)))
names(simdat)<-c("group","count")
simdat$group<-as.factor(simdat$group)
# 5 days vs 4 days in the office
t.test(simdat[simdat$group!=1,2],simdat[simdat$group==1,2])
#Mean time of of the office regardless of day
summary(simdat[simdat$group!=1,2])
#ANOVA Global
summary(aov(count ~ group,simdat))
#Pairwise Post-hocs
pairwise.t.test(simdat$count, simdat$group, data=simdat, p.adj = "holm")
#visualize the distrubtions
p <- simdat %>%
ggplot(aes(x=count, fill=group)) +
geom_histogram(position = 'identity')+facet_wrap(~group)
|
/Corona Virus Simulation - Working From Home.R
|
no_license
|
Adamishere/WFH_COV19
|
R
| false
| false
| 4,036
|
r
|
#Corona Virus Simulations
#This simulation analyzes the time a person would be in the office before they realize they are sick
#Time from infection to incubation period is modeled
# as log normal distribution
#Lauer SA, Grantz KH, Bi Q, et al. The Incubation Period of Coronavirus Disease 2019 (COVID-19) From Publicly Reported Confirmed Cases: Estimation and Application. Ann Intern Med. 2020; [Epub ahead of print 10 March 2020]. doi: https://doi.org/10.7326/M20-0504
#https://annals.org/aim/fullarticle/2762808/incubation-period-coronavirus-disease-2019-covid-19-from-publicly-reported
library(ggplot2)
library(dplyr)
#work schedule
day<-c(1,2,3,4,5,6,7)
ooo<-c(0,1,0,1,0,1,0)
schedule<-cbind(rep(day,10),rep(ooo,10))
#Work from home Simulation
# schedule - dataframe - A work schedule, 1 = in the office, 0 = work from home/weekend
# verbose - boolean - suppresses simulation output (default False)
wfh_simulation<-function(schedule,verbose=F){
#Track Simulation data
trackdays<-NULL
#Simulate time til symptomatic, random infection date, time in office
for(t in 1:15000){
#incubation period
incu_pd<-round(rlnorm(1,
meanlog = 1.621,
sdlog = 0.418))
#plot(density(prob_incu))
#date of infection
dxdt<-sample(1:7,1)
x<-0
cur_dt<-dxdt
daysinwork<-0
for(i in 1:incu_pd){
daysinwork<-schedule[cur_dt,2]+daysinwork
if(verbose == T){
print(paste("Day of week",schedule[cur_dt,1],
", workday?:",schedule[cur_dt,2]))
}
#Count next day
cur_dt<-cur_dt+1
}
#store simulation
trackdays<-c(trackdays,daysinwork)
}
return(trackdays)
}
day<-c(1,2,3,4,5,6,7)
#work schedule1
ooo<-c(0,1,1,1,1,1,0)
s0111110<-cbind(rep(day,10),rep(ooo,10))
#work schedule2
ooo<-c(0,0,1,1,1,1,0)
s0011110<-cbind(rep(day,10),rep(ooo,10))
#work schedule3
ooo<-c(0,1,0,1,1,1,0)
s0101110<-cbind(rep(day,10),rep(ooo,10))
#work schedule4
ooo<-c(0,1,1,0,1,1,0)
s0110110<-cbind(rep(day,10),rep(ooo,10))
#work schedule5
ooo<-c(0,1,1,1,0,1,0)
s0111010<-cbind(rep(day,10),rep(ooo,10))
#work schedule6
ooo<-c(0,1,1,1,1,0,0)
s0111100<-cbind(rep(day,10),rep(ooo,10))
#additional days
ooo<-c(0,1,1,1,0,0,0)
s0111000<-cbind(rep(day,10),rep(ooo,10))
ooo<-c(0,1,1,0,0,0,0)
s0110000<-cbind(rep(day,10),rep(ooo,10))
#testing order of the day.
dat_s0111110<-(wfh_simulation(s0111110))
dat_s0011110<-(wfh_simulation(s0011110))
dat_s0101110<-(wfh_simulation(s0101110))
dat_s0110110<-(wfh_simulation(s0110110))
dat_s0111010<-(wfh_simulation(s0111010))
dat_s0111100<-(wfh_simulation(s0111100))
#testing additional days out of the office
dat_s0111000<-(wfh_simulation(s0111000))
dat_s0110000<-(wfh_simulation(s0110000))
#Summary Statistics
summary((dat_s0111110))
summary((dat_s0011110))
summary((dat_s0101110))
summary((dat_s0110110))
summary((dat_s0111010))
summary((dat_s0111100))
sd(dat_s0111110)
sd(dat_s0011110)
sd(dat_s0101110)
sd(dat_s0110110)
sd(dat_s0111010)
#Summary statistics - Less time in the office
summary((dat_s0111110))
summary((dat_s0111100))
summary((dat_s0111000))
summary((dat_s0110000))
#Significance testing;
#Create dataset
simdat<-as.data.frame(
rbind(
cbind(1,dat_s0111110),
cbind(2,dat_s0011110),
cbind(3,dat_s0101110),
cbind(4,dat_s0110110),
cbind(5,dat_s0111010),
cbind(6,dat_s0111100)))
names(simdat)<-c("group","count")
simdat$group<-as.factor(simdat$group)
# 5 days vs 4 days in the office
t.test(simdat[simdat$group!=1,2],simdat[simdat$group==1,2])
#Mean time of of the office regardless of day
summary(simdat[simdat$group!=1,2])
#ANOVA Global
summary(aov(count ~ group,simdat))
#Pairwise Post-hocs
pairwise.t.test(simdat$count, simdat$group, data=simdat, p.adj = "holm")
#visualize the distrubtions
p <- simdat %>%
ggplot(aes(x=count, fill=group)) +
geom_histogram(position = 'identity')+facet_wrap(~group)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/smwrGraphs-package.R
\docType{package}
\name{smwrGraphs-package}
\alias{smwrGraphs-package}
\alias{smwrGraphs}
\title{Graphing Functions}
\description{
Functions to create high-quality graphs.\cr These graphs meet or nearly meet
the publication standards for illustrations of the U.S. Geological Survey
(USGS) (U.S. Geological Survey, written commun., 2012). They are intended to
be a suite of integrated functions that make producing graphs and figures
relatively easy by passing much information about the plots between
functions so the user does not need to manage graphical information.
}
\details{
\tabular{ll}{
Package: \tab smwrGraphs\cr
Type: \tab Package\cr
License: \tab CC0\cr
Depends: \tab smwrBase (>= 1.0.0), methods\cr
Imports: \tab KernSmooth, akima, lubridate\cr
Suggests: \tab smwrData (>= 1.0.0), dataRetrieval\cr }
The functions in the \code{smwrGraphs} package
are an integrated suite of functions that facilitate the production of
graphs that nearly meet USGS publication standards for illustrations (U.S.
Geological Survey, written commun., 2012). Those standards include line
weight, tick placement, labels, font size, and layout of the explanation.
The font used in production very closely matches the standard Univers
Condensed, and was selected because of its broad availability on many
computer platforms.\cr
Use of base \code{R} or other graphics functions can result in inconsistent
lineweights, font sizes and styles, and can require manual manipulation of
the explanation. The Programmer's Guide section in Lorenz (2015) shows
examples of calls to lower level graphics functions in base \code{R}
that produce consistent graphics products.\cr
Functions to set up and initialize the \code{smwrGraphs} environment:\cr
\code{\link{preSurface}}\cr
\code{\link{setGD}}\cr
\code{\link{setGraph}}\cr
\code{\link{setKnitr}}\cr
\code{\link{setLayout}}\cr
\code{\link{setPDF}}\cr
\code{\link{setPage}}\cr
\code{\link{setPNG}}\cr
\code{\link{setRStudio}}\cr
\code{\link{setRtMargin}}\cr
\code{\link{setSplom}}\cr
\code{\link{setSweave}}\cr
\code{\link{setTopMargin}}\cr\cr
Main plotting functions:\cr
\code{\link{areaPlot}}\cr
\code{\link{biPlot}}\cr
\code{\link{boxPlot}}\cr
\code{\link{colorPlot}}\cr
\code{\link{condition}}\cr
\code{\link{contourPlot}}\cr
\code{\link{corGram}}\cr
\code{\link{dendGram}}\cr
\code{\link{dotPlot}}\cr
\code{\link{ecdfPlot}}\cr
\code{\link{histGram}}\cr
\code{\link{piperPlot}}\cr
\code{\link{probPlot}}\cr
\code{\link{qqPlot}}\cr
\code{\link{reportGraph}}\cr
\code{\link{scalePlot}}\cr
\code{\link{seasonPlot}}\cr
\code{\link{splomPlot}}\cr
\code{\link{seriesPlot}}\cr
\code{\link{stiffPlot}}\cr
\code{\link{surfacePlot}} \cr
\code{\link{ternaryPlot}}\cr
\code{\link{timePlot}}\cr
\code{\link{transPlot}}\cr
\code{\link{xyPlot}}\cr
\code{\link{condition}}\cr\cr
Functions to add features to a plot:\cr
\code{\link{addAnnotation}}\cr
\code{\link{addArea}}\cr
\code{\link{addAxisLabels}}\cr
\code{\link{addCaption}}\cr
\code{\link{addCI}}\cr
\code{\link{addErrorBars}}\cr
\code{\link{addExplanation}}\cr
\code{\link{addGrid}}\cr
\code{\link{addLabel}}\cr
\code{\link{addMinorTicks}}\cr
\code{\link{addPiper}}\cr
\code{\link{addSLR}}\cr
\code{\link{addSmooth}}\cr
\code{\link{addStiff}}\cr
\code{\link{addTable}}\cr
\code{\link{addTernary}}\cr
\code{\link{addTitle}}\cr
\code{\link{addXY}}\cr
\code{\link{labelPoints}}\cr
\code{\link{refLine}}\cr\cr
Data Manipulation Functions for Graphs:\cr
\code{\link{cov2Ellipse}}\cr
\code{\link{dataEllipse}}\cr
\code{\link{hull}}\cr
\code{\link{interpLine}}\cr
\code{\link{paraSpline}}\cr\cr
# Color palettes:\cr
\code{\link{blueRed.colors}}\cr
\code{\link{coolWarm.colors}}\cr
\code{\link{greenRed.colors}}\cr
\code{\link{pastel.colors}}\cr
\code{\link{redBlue.colors}}\cr
\code{\link{redGreen.colors}}\cr
\code{\link{warmCool.colors}}\cr\cr
Selected Miscellaneous Functions:\cr
\code{\link{copyDemo}}\cr
\code{\link{strip.blanks}}\cr
}
\examples{
# For these examples, print to console
.pager <- options("pager")
options(pager="console")
# See the demo for examples of how to use the functions in this library.
demo(package="smwrGraphs")
# A simple listing of the vignettes in this package:
vignette(package="smwrGraphs")
options(.pager)
}
\references{
Lorenz, D.L., Diekoff, A.L, smwrGraphs---an R package for
graphing hydrologic data, version 1.1.2.\cr
U.S. Geological Survey, 2012, Author\verb{'}s guide to standards for U.S.
Geological Survey page-size illustrations, 37 p.
\url{https://pubs.er.usgs.gov/publication/ofr20161188}
}
\author{
Dave Lorenz
}
\keyword{package}
|
/man/smwrGraphs-package.Rd
|
permissive
|
ldecicco-USGS/smwrGraphs
|
R
| false
| true
| 4,685
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/smwrGraphs-package.R
\docType{package}
\name{smwrGraphs-package}
\alias{smwrGraphs-package}
\alias{smwrGraphs}
\title{Graphing Functions}
\description{
Functions to create high-quality graphs.\cr These graphs meet or nearly meet
the publication standards for illustrations of the U.S. Geological Survey
(USGS) (U.S. Geological Survey, written commun., 2012). They are intended to
be a suite of integrated functions that make producing graphs and figures
relatively easy by passing much information about the plots between
functions so the user does not need to manage graphical information.
}
\details{
\tabular{ll}{
Package: \tab smwrGraphs\cr
Type: \tab Package\cr
License: \tab CC0\cr
Depends: \tab smwrBase (>= 1.0.0), methods\cr
Imports: \tab KernSmooth, akima, lubridate\cr
Suggests: \tab smwrData (>= 1.0.0), dataRetrieval\cr }
The functions in the \code{smwrGraphs} package
are an integrated suite of functions that facilitate the production of
graphs that nearly meet USGS publication standards for illustrations (U.S.
Geological Survey, written commun., 2012). Those standards include line
weight, tick placement, labels, font size, and layout of the explanation.
The font used in production very closely matches the standard Univers
Condensed, and was selected because of its broad availability on many
computer platforms.\cr
Use of base \code{R} or other graphics functions can result in inconsistent
lineweights, font sizes and styles, and can require manual manipulation of
the explanation. The Programmer's Guide section in Lorenz (2015) shows
examples of calls to lower level graphics functions in base \code{R}
that produce consistent graphics products.\cr
Functions to set up and initialize the \code{smwrGraphs} environment:\cr
\code{\link{preSurface}}\cr
\code{\link{setGD}}\cr
\code{\link{setGraph}}\cr
\code{\link{setKnitr}}\cr
\code{\link{setLayout}}\cr
\code{\link{setPDF}}\cr
\code{\link{setPage}}\cr
\code{\link{setPNG}}\cr
\code{\link{setRStudio}}\cr
\code{\link{setRtMargin}}\cr
\code{\link{setSplom}}\cr
\code{\link{setSweave}}\cr
\code{\link{setTopMargin}}\cr\cr
Main plotting functions:\cr
\code{\link{areaPlot}}\cr
\code{\link{biPlot}}\cr
\code{\link{boxPlot}}\cr
\code{\link{colorPlot}}\cr
\code{\link{condition}}\cr
\code{\link{contourPlot}}\cr
\code{\link{corGram}}\cr
\code{\link{dendGram}}\cr
\code{\link{dotPlot}}\cr
\code{\link{ecdfPlot}}\cr
\code{\link{histGram}}\cr
\code{\link{piperPlot}}\cr
\code{\link{probPlot}}\cr
\code{\link{qqPlot}}\cr
\code{\link{reportGraph}}\cr
\code{\link{scalePlot}}\cr
\code{\link{seasonPlot}}\cr
\code{\link{splomPlot}}\cr
\code{\link{seriesPlot}}\cr
\code{\link{stiffPlot}}\cr
\code{\link{surfacePlot}} \cr
\code{\link{ternaryPlot}}\cr
\code{\link{timePlot}}\cr
\code{\link{transPlot}}\cr
\code{\link{xyPlot}}\cr
\code{\link{condition}}\cr\cr
Functions to add features to a plot:\cr
\code{\link{addAnnotation}}\cr
\code{\link{addArea}}\cr
\code{\link{addAxisLabels}}\cr
\code{\link{addCaption}}\cr
\code{\link{addCI}}\cr
\code{\link{addErrorBars}}\cr
\code{\link{addExplanation}}\cr
\code{\link{addGrid}}\cr
\code{\link{addLabel}}\cr
\code{\link{addMinorTicks}}\cr
\code{\link{addPiper}}\cr
\code{\link{addSLR}}\cr
\code{\link{addSmooth}}\cr
\code{\link{addStiff}}\cr
\code{\link{addTable}}\cr
\code{\link{addTernary}}\cr
\code{\link{addTitle}}\cr
\code{\link{addXY}}\cr
\code{\link{labelPoints}}\cr
\code{\link{refLine}}\cr\cr
Data Manipulation Functions for Graphs:\cr
\code{\link{cov2Ellipse}}\cr
\code{\link{dataEllipse}}\cr
\code{\link{hull}}\cr
\code{\link{interpLine}}\cr
\code{\link{paraSpline}}\cr\cr
# Color palettes:\cr
\code{\link{blueRed.colors}}\cr
\code{\link{coolWarm.colors}}\cr
\code{\link{greenRed.colors}}\cr
\code{\link{pastel.colors}}\cr
\code{\link{redBlue.colors}}\cr
\code{\link{redGreen.colors}}\cr
\code{\link{warmCool.colors}}\cr\cr
Selected Miscellaneous Functions:\cr
\code{\link{copyDemo}}\cr
\code{\link{strip.blanks}}\cr
}
\examples{
# For these examples, print to console
.pager <- options("pager")
options(pager="console")
# See the demo for examples of how to use the functions in this library.
demo(package="smwrGraphs")
# A simple listing of the vignettes in this package:
vignette(package="smwrGraphs")
options(.pager)
}
\references{
Lorenz, D.L., Diekoff, A.L, smwrGraphs---an R package for
graphing hydrologic data, version 1.1.2.\cr
U.S. Geological Survey, 2012, Author\verb{'}s guide to standards for U.S.
Geological Survey page-size illustrations, 37 p.
\url{https://pubs.er.usgs.gov/publication/ofr20161188}
}
\author{
Dave Lorenz
}
\keyword{package}
|
power <- read.table("household_power_consumption.txt",skip=1,sep=";")
names(power) <- c("Date","Time","Global_active_power","Global_reactive_power","Voltage","Global_intensity","Sub_metering_1","Sub_metering_2","Sub_metering_3")
subpower <- subset(power,power$Date=="1/2/2007" | power$Date =="2/2/2007")
subpower$Date <- as.Date(subpower$Date, format="%d/%m/%Y")
subpower$Time <- strptime(subpower$Time, format="%H:%M:%S")
subpower[1:1440,"Time"] <- format(subpower[1:1440,"Time"],"2007-02-01 %H:%M:%S")
subpower[1441:2880,"Time"] <- format(subpower[1441:2880,"Time"],"2007-02-02 %H:%M:%S")
plot(subpower$Time,as.numeric(as.character(subpower$Global_active_power)),type="l",xlab="",ylab="Global Active Power (kilowatts)")
title(main="Global Active Power Vs Time")
|
/exploratory-master/plot2.R
|
no_license
|
sakibb019/explority
|
R
| false
| false
| 768
|
r
|
power <- read.table("household_power_consumption.txt",skip=1,sep=";")
names(power) <- c("Date","Time","Global_active_power","Global_reactive_power","Voltage","Global_intensity","Sub_metering_1","Sub_metering_2","Sub_metering_3")
subpower <- subset(power,power$Date=="1/2/2007" | power$Date =="2/2/2007")
subpower$Date <- as.Date(subpower$Date, format="%d/%m/%Y")
subpower$Time <- strptime(subpower$Time, format="%H:%M:%S")
subpower[1:1440,"Time"] <- format(subpower[1:1440,"Time"],"2007-02-01 %H:%M:%S")
subpower[1441:2880,"Time"] <- format(subpower[1441:2880,"Time"],"2007-02-02 %H:%M:%S")
plot(subpower$Time,as.numeric(as.character(subpower$Global_active_power)),type="l",xlab="",ylab="Global Active Power (kilowatts)")
title(main="Global Active Power Vs Time")
|
### ----- REGRESION AVANZADA ----- ###
# --- Prof. Luis E. Nieto Barajas --- #
#--- Usar espejo CRAN del ITAM ---
options(repos="http://cran.itam.mx/")
#--- Funciones utiles ---
prob<-function(x){
out<-min(length(x[x>0])/length(x),length(x[x<0])/length(x))
out
}
#--- Ilustracion del proceso de inferencia ---
#-Proceso de aprendizaje normal-normal-
xbar<-40.9533
sig2<-4
n<-3
th0<-39
sig20<-219.47
y<-seq(35,45,length.out=200)
f0y<-dnorm(y,th0,sqrt(sig20))
liky<-dnorm(y,xbar,sqrt(sig2/n))
sig21<-1/(n/sig2+1/sig20)
th1<-sig21*(n/sig2*xbar+th0/sig20)
f1y<-dnorm(y,th1,sqrt(sig21))
ymax<-max(f0y,liky,f1y)
plot(y,f0y,ylim=c(0,ymax),type="l")
lines(y,liky,lty=2,col=2)
lines(y,f1y,lty=3,col=3)
#-Proceso de aprendizaje bernoulli-beta-
#Simulacion de datos Bernoulli
theta0 <- 0.6
n <- 150
x<-rbinom(n,1,theta0)
hist(x,freq=FALSE)
#Distribucion inicial para theta
a <- 1
b <- 1
theta<-seq(0,1,,100)
plot(theta,dbeta(theta,a,b),type="l")
#Distribucion final
a1 <- a + sum(x)
b1 <- b + n - sum(x)
plot(theta,dbeta(theta,a1,b1),type="l")
#Ambas
theta<-seq(0,1,,100)
ymax <- max(dbeta(theta,a,b),dbeta(theta,a1,b1))
plot(theta,dbeta(theta,a,b),type="l",ylim=c(0,ymax))
lines(theta,dbeta(theta,a1,b1),col=2)
abline(v=theta0,col=4)
#Aproximacion normal asintotica
mu <- (a1-1)/(a1+b1-2)
sig2 <- (a1-1)*(b1-1)/(a1+b1-2)^3
lines(theta,dnorm(theta,mu,sqrt(sig2)),col=3)
# --- Aproximaci�n Monte Carlo ---
#-Ejemplo 1-
x<-seq(-2,4,,1000)
f<-function(x){
out <- 5-(x-1)^2
out <- ifelse (x < -1 | x>3,0,out)
out
}
plot(x,f(x)*3/44,type="l",ylim=c(0,0.5))
lines(x,dnorm(x,0,1),lty=2,col=2)
lines(x,dnorm(x,1,2/3),lty=3,col=3)
lines(x,dnorm(x,1,1),lty=4,col=4)
lines(x,dnorm(x,1,2),lty=5,col=5)
N<-100000
#Caso 1: S=Normal estandar
mu<-0
sig<-1
#N<-10000
y<-rnorm(N,mu,sig)
I1<-mean(f(y)/dnorm(y,mu,sig))
eI1<-sd(f(y)/dnorm(y,0,1))/sqrt(N)
print(c(I1,eI1))
#Caso 2: S=Normal no estandar
mu<-1
sig<-2/3
#N<-10000
y<-rnorm(N,mu,sig)
I2<-mean(f(y)/dnorm(y,mu,sig))
eI2<-sd(f(y)/dnorm(y,mu,sig))/sqrt(N)
print(c(I2,eI2))
#Caso 3: S=Normal no estandar
mu<-1
sig<-1
#N<-10000
y<-rnorm(N,mu,sig)
I3<-mean(f(y)/dnorm(y,mu,sig))
eI3<-sd(f(y)/dnorm(y,mu,sig))/sqrt(N)
print(c(I3,eI3))
#Caso 4: S=Normal no estandar
mu<-1
sig<-2
#N<-10000
y<-rnorm(N,mu,sig)
I4<-mean(f(y)/dnorm(y,mu,sig))
eI4<-sd(f(y)/dnorm(y,mu,sig))/sqrt(N)
print(c(I4,eI4))
#-Ejemplo 2-
f<-function(x){
out<-ifelse(x<0,0,x)
out<-ifelse(x>1,0,out)
out
}
x<-seq(-1,2,,100)
plot(x,f(x),type="l",ylim=c(0,1.2))
N<-1000000
#Caso 1: S=Uniforme
lines(x,dunif(x,0,1),col=2,lty=2)
y<-runif(N,0,1)
I1<-mean(f(y)/dunif(y,0,1))
eI1<-sd(f(y)/dunif(y,0,1))/sqrt(N)
print(c(I1,eI1))
#Caso 2: S=Exponencial
lines(x,dexp(x,1),col=3,lty=3)
y<-rexp(N,1)
I2<-mean(f(y)/dexp(y,1))
eI2<-sd(f(y)/dexp(y,1))/sqrt(N)
print(c(I2,eI2))
#Caso 3: S=Normal
lines(x,dnorm(x,0.5,1/3),col=4,lty=4)
y<-rnorm(N,0.5,1/3)
I3<-mean(f(y)/dnorm(y,0.5,1/3))
eI3<-sd(f(y)/dnorm(y,0.5,1/3))/sqrt(N)
print(c(I3,eI3))
#-Muestreador de Gibbs-
install.packages("bayesm")
library(bayesm)
out<-rbiNormGibbs(rho=0.95)
out<-rbiNormGibbs(rho=-0.5)
###########################################
install.packages("R2OpenBUGS")
install.packages("R2jags")
library(R2OpenBUGS)
library(R2jags)
#-Working directory-
wdir<-"c:/temp/RegAva/"
setwd(wdir)
#--- Ejemplo 1---
#-Reading data-
n<-10
credito<-c(rep(1,n/2),rep(0,n/2))
credito<-c(rep(1,n*0.9),rep(0,n*0.1))
credito<-c(rep(0,n*0.9),rep(1,n*0.1))
#-Defining data-
data<-list("n"=n,"x"=credito)
#-Defining inits-
inits<-function(){list(theta=0.9999,x1=rep(0,2))}
inits<-function(){list(lambda=0)}
inits<-function(){list(theta=0.5,eta=1)}
#-Selecting parameters to monitor-
parameters<-c("theta","x1")
parameters<-c("theta","eta")
#-Running code-
#OpenBUGS
ej1.sim<-bugs(data,inits,parameters,model.file="Ej1.txt",
n.iter=5000,n.chains=1,n.burnin=0)
#JAGS
ej1.sim<-jags(data,inits,parameters,model.file="Ej1.txt",
n.iter=5000,n.chains=1,n.burnin=500,n.thin=1)
#-Monitoring chain-
#Traza de la cadena
traceplot(ej1.sim)
#Cadena
#OpenBUGS
out<-ej1.sim$sims.list
#JAGS
out<-ej1.sim$BUGSoutput$sims.list
z<-out$theta
par(mfrow=c(2,2))
plot(z,type="l")
plot(cumsum(z)/(1:length(z)),type="l")
hist(z,freq=FALSE)
acf(z)
#Resumen (estimadores)
#OpenBUGS
out.sum<-ej1.sim$summary
#JAGS
out.sum<-ej1.sim$BUGSoutput$summary
print(out.sum)
#DIC
#OpenBUGS
out.dic<-ej1.sim$DIC
#JAGS
out.dic<-ej1.sim$BUGSoutput$DIC
print(out.dic)
#---------------------#
#Mezcla de betas
w<-seq(0.01,0.99,,100)
pp<-0.3
fw<-pp*dbeta(w,10,10)+(1-pp)*dbeta(w,5,0.05)
par(mfrow=c(1,1))
plot(w,fw,type="l")
#--- Ejemplo 2---
#-Reading data-
utilidad<-c(212, 207, 210,
196, 223, 193,
196, 210, 202, 221)
n<-length(utilidad)
#-Defining data-
data<-list("n"=n,"x"=utilidad)
#-Defining inits-
inits<-function(){list(mu=0,sig=1,x1=0)}
#-Selecting parameters to monitor-
parameters<-c("mu","sig","x1")
#-Running code-
#OpenBUGS
ej2.sim<-bugs(data,inits,parameters,model.file="Ej2.txt",
n.iter=5000,n.chains=1,n.burnin=500)
#JAGS
ej2.sim<-jags(data,inits,parameters,model.file="Ej2.txt",
n.iter=5000,n.chains=1,n.burnin=500)
#-Monitoring chain-
#Traza de la cadena
traceplot(ej2.sim)
#Cadena
#OpenBUGS
out<-ej2.sim$sims.list
#JAGS
out<-ej2.sim$BUGSoutput$sims.list
z<-out$x1
par(mfrow=c(2,2))
plot(z,type="l")
plot(cumsum(z)/(1:length(z)),type="l")
hist(z,freq=FALSE)
acf(z)
#Resumen (estimadores)
#OpenBUGS
out.sum<-ej2.sim$summary
#JAGS
out.sum<-ej2.sim$BUGSoutput$summary
print(out.sum)
#DIC
#OpenBUGS
out.dic<-ej2.sim$DIC
#JAGS
out.dic<-ej2.sim$BUGSoutput$DIC
print(out.dic)
#--- Ejemplo 3 ---
#-Reading data-
calif<-read.table("http://allman.rhon.itam.mx/~lnieto/index_archivos/calificaciones.txt",header=TRUE)
n<-nrow(calif)
plot(calif$MO,calif$SP)
#-Defining data-
data<-list("n"=n,"y"=calif$SP,"x"=calif$MO)
#-Defining inits-
inits<-function(){list(beta=rep(0,2),tau=1,yf=rep(0,n))}
inits<-function(){list(beta=rep(0,6),tau=1,yf=rep(0,n))}
#-Selecting parameters to monitor-
parameters<-c("beta","tau","yf")
#-Running code-
#OpenBUGS
ej3.sim<-bugs(data,inits,parameters,model.file="Ej3.txt",
n.iter=10000,n.chains=1,n.burnin=1000)
ej3a.sim<-bugs(data,inits,parameters,model.file="Ej3a.txt",
n.iter=100000,n.chains=1,n.burnin=10000,n.thin=5)
ej3b.sim<-bugs(data,inits,parameters,model.file="Ej3b.txt",
n.iter=10000,n.chains=1,n.burnin=1000)
#JAGS
ej3.sim<-jags(data,inits,parameters,model.file="Ej3.txt",
n.iter=10000,n.chains=1,n.burnin=1000,n.thin=1)
ej3b.sim<-jags(data,inits,parameters,model.file="Ej3b.txt",
n.iter=10000,n.chains=1,n.burnin=1000,n.thin=1)
#-Monitoring chain-
#Traza de la cadena
traceplot(ej3.sim)
#Cadena
#OpenBUGS
out<-ej3.sim$sims.list
out<-ej3b.sim$sims.list
#JAGS
out<-ej3.sim$BUGSoutput$sims.list
out<-ej3b.sim$BUGSoutput$sims.list
z<-out$beta[,2]
par(mfrow=c(2,2))
plot(z,type="l")
plot(cumsum(z)/(1:length(z)),type="l")
hist(z,freq=FALSE)
acf(z)
z<-out$beta
par(mfrow=c(1,1))
plot(z)
z<-out$beta
pairs(z)
#Resumen (estimadores)
#OpenBUGS
out.sum<-ej3.sim$summary
out.sum<-ej3b.sim$summary
#JAGS
out.sum<-ej3.sim$BUGSoutput$summary
#Tabla resumen
out.sum.t<-out.sum[grep("beta",rownames(out.sum)),c(1,3,7)]
out.sum.t<-cbind(out.sum.t,apply(out$beta,2,prob))
dimnames(out.sum.t)[[2]][4]<-"prob"
print(out.sum.t)
#DIC
out.dic<-ej3b.sim$DIC
out.dic<-ej3.sim$BUGSoutput$DIC
print(out.dic)
#Predictions
out.yf<-out.sum[grep("yf",rownames(out.sum)),]
or<-order(calif$MO)
ymin<-min(calif$SP,out.yf[,c(1,3,7)])
ymax<-max(calif$SP,out.yf[,c(1,3,7)])
par(mfrow=c(1,1))
plot(calif$MO,calif$SP,ylim=c(ymin,ymax))
lines(calif$MO[or],out.yf[or,1],lwd=2,col=2)
lines(calif$MO[or],out.yf[or,3],lty=2,col=2)
lines(calif$MO[or],out.yf[or,7],lty=2,col=2)
plot(calif$SP,out.yf[,1])
R2<-(cor(calif$SP,out.yf[,1]))^2
print(R2)
#--- Ejemplo 4 ---
#TAREA
#--- Ejemplo 5 ---
#-Reading data-
mortality<-read.table("http://allman.rhon.itam.mx/~lnieto/index_archivos/mortality.txt",header=TRUE)
n<-nrow(mortality)
plot(mortality)
plot(mortality$x,mortality$y/mortality$n)
m<-1
nef<-c(100)
xf<-c(200)
#-Defining data-
data<-list("n"=n,"ne"=mortality$n,"y"=mortality$y,"x"=mortality$x,"m"=m,"nef"=nef,"xf"=xf)
#-Defining inits-
inits<-function(){list(beta=rep(0,2),yf1=rep(1,n),yf2=1)}
#-Selecting parameters to monitor-
parameters<-c("beta","lambda","yf1","yf2")
parameters<-c("beta","p","yf1","yf2")
#-Running code-
#OpenBUGS
ej5a.sim<-bugs(data,inits,parameters,model.file="Ej5a.txt",
n.iter=50000,n.chains=1,n.burnin=5000)
ej5aa.sim<-bugs(data,inits,parameters,model.file="Ej5aa.txt",
n.iter=50000,n.chains=1,n.burnin=5000)
ej5b.sim<-bugs(data,inits,parameters,model.file="Ej5b.txt",
n.iter=50000,n.chains=1,n.burnin=5000)
ej5c.sim<-bugs(data,inits,parameters,model.file="Ej5c.txt",
n.iter=50000,n.chains=1,n.burnin=5000)
ej5d.sim<-bugs(data,inits,parameters,model.file="Ej5d.txt",
n.iter=50000,n.chains=1,n.burnin=5000)
#JAGS
ej5a.sim<-jags(data,inits,parameters,model.file="Ej5a.txt",
n.iter=50000,n.chains=1,n.burnin=5000)
ej5b.sim<-jags(data,inits,parameters,model.file="Ej5b.txt",
n.iter=50000,n.chains=1,n.burnin=5000)
ej5c.sim<-jags(data,inits,parameters,model.file="Ej5c.txt",
n.iter=50000,n.chains=1,n.burnin=5000)
#-Monitoring chain-
#Traza de la cadena
traceplot(ej5.sim)
#Cadena
#OpenBUGS
out<-ej5c.sim$sims.list
#JAGS
out<-ej5.sim$BUGSoutput$sims.list
z<-out$beta[,2]
par(mfrow=c(2,2))
plot(z,type="l")
plot(cumsum(z)/(1:length(z)),type="l")
hist(z,freq=FALSE)
acf(z)
z<-out$beta
par(mfrow=c(1,1))
plot(z)
#Resumen (estimadores)
#OpenBUGS
out.sum<-ej5c.sim$summary
#JAGS
out.sum<-ej5.sim$BUGSoutput$summary
#Tabla resumen
out.sum.t<-out.sum[grep("beta",rownames(out.sum)),c(1,3,7)]
out.sum.t<-cbind(out.sum.t,apply(out$beta,2,prob))
dimnames(out.sum.t)[[2]][4]<-"prob"
print(out.sum.t)
#DIC
out.dic<-ej5c.sim$DIC
out.dic<-ej5.sim$BUGSoutput$DIC
print(out.dic)
#Predictions
out.yf<-out.sum[grep("yf1",rownames(out.sum)),]
or<-order(mortality$x)
ymin<-min(mortality$y,out.yf[,c(1,3,7)])
ymax<-max(mortality$y,out.yf[,c(1,3,7)])
par(mfrow=c(1,1))
plot(mortality$x,mortality$y,ylim=c(ymin,ymax))
#Modelo 1
lines(mortality$x[or],out.yf[or,1],lwd=2,col=2)
lines(mortality$x[or],out.yf[or,3],lty=2,col=2)
lines(mortality$x[or],out.yf[or,7],lty=2,col=2)
#Modelo 2
lines(mortality$x[or],out.yf[or,1],lwd=2,col=3)
lines(mortality$x[or],out.yf[or,3],lty=2,col=3)
lines(mortality$x[or],out.yf[or,7],lty=2,col=3)
#Modelo 3
lines(mortality$x[or],out.yf[or,1],lwd=2,col=4)
lines(mortality$x[or],out.yf[or,3],lty=2,col=4)
lines(mortality$x[or],out.yf[or,7],lty=2,col=4)
#Modelo 4
lines(mortality$x[or],out.yf[or,1],lwd=2,col=5)
lines(mortality$x[or],out.yf[or,3],lty=2,col=5)
lines(mortality$x[or],out.yf[or,7],lty=2,col=5)
#Modelo 5
lines(mortality$x[or],out.yf[or,1],lwd=2,col=6)
lines(mortality$x[or],out.yf[or,3],lty=2,col=6)
lines(mortality$x[or],out.yf[or,7],lty=2,col=6)
plot(mortality$y,out.yf[,1])
abline(a=0,b=1)
cor(mortality$y,out.yf[,1])
#Estimacion de tasas
out.tasa<-out.sum[grep("lambda",rownames(out.sum)),]
out.tasa<-out.sum[grep("p",rownames(out.sum)),]
or<-order(mortality$x)
ymin<-min(mortality$y/mortality$n,out.tasa[,c(1,3,7)])
ymax<-max(mortality$y/mortality$n,out.tasa[,c(1,3,7)])
par(mfrow=c(1,1))
plot(mortality$x,mortality$y/mortality$n,ylim=c(ymin,ymax))
#Modelo 1
lines(mortality$x[or],out.tasa[or,1],lwd=2,col=2)
lines(mortality$x[or],out.tasa[or,3],lty=2,col=2)
lines(mortality$x[or],out.tasa[or,7],lty=2,col=2)
#Modelo 2
lines(mortality$x[or],out.tasa[or,1],lwd=2,col=3)
lines(mortality$x[or],out.tasa[or,3],lty=2,col=3)
lines(mortality$x[or],out.tasa[or,7],lty=2,col=3)
#Modelo 3
lines(mortality$x[or],out.tasa[or,1],lwd=2,col=4)
lines(mortality$x[or],out.tasa[or,3],lty=2,col=4)
lines(mortality$x[or],out.tasa[or,7],lty=2,col=4)
#Modelo 4
lines(mortality$x[or],out.tasa[or,1],lwd=2,col=5)
lines(mortality$x[or],out.tasa[or,3],lty=2,col=5)
lines(mortality$x[or],out.tasa[or,7],lty=2,col=5)
#Modelo 5
lines(mortality$x[or],out.tasa[or,1],lwd=2,col=6)
lines(mortality$x[or],out.tasa[or,3],lty=2,col=6)
lines(mortality$x[or],out.tasa[or,7],lty=2,col=6)
#--- Ejemplo 6 ---
#-Reading data-
desastres<-read.table("http://allman.rhon.itam.mx/~lnieto/index_archivos/desastres.txt",header=TRUE)
n<-nrow(desastres)
plot(desastres,type="l")
plot(desastres[2:n,2]-desastres[1:(n-1),2],type="l")
plot(log(desastres[2:n,2])-log(desastres[1:(n-1),2]),type="l")
#-Defining data-
data<-list("n"=n,"y"=desastres$No.Desastres,"x"=desastres$Anho)
data<-list("n"=n,"y"=c(desastres$No.Desastres[1:(n-6)],rep(NA,6)),"x"=desastres$Anho)
#-Defining inits-
inits<-function(){list(beta=rep(0,2),yf1=rep(1,n))}
inits<-function(){list(beta=rep(0,2),aux=1,aux2=1,yf1=rep(1,n))}
inits<-function(){list(beta=rep(0,2),aux2=1,yf1=rep(1,n),tau.y=1)}
inits<-function(){list(beta=rep(0,n),tau.b=1,yf1=rep(1,n))}
inits<-function(){list(mu=rep(1,n),tau.b=1,yf1=rep(1,n))}
#-Selecting parameters to monitor-
parameters<-c("beta","yf1","mu")
parameters<-c("beta","yf1","mu","tau")
parameters<-c("beta","yf1","mu","tau","tau.y")
parameters<-c("beta","yf1","mu","r")
parameters<-c("beta","yf1","mu","tau","r")
parameters<-c("tau.b","yf1","mu")
#-Running code-
#OpenBUGS
ej6a.sim<-bugs(data,inits,parameters,model.file="Ej6a.txt",
n.iter=50000,n.chains=1,n.burnin=5000)
ej6bb.sim<-bugs(data,inits,parameters,model.file="Ej6bb.txt",
n.iter=50000,n.chains=1,n.burnin=5000)
ej6c.sim<-bugs(data,inits,parameters,model.file="Ej6c.txt",
n.iter=50000,n.chains=1,n.burnin=5000)
ej6d.sim<-bugs(data,inits,parameters,model.file="Ej6d.txt",
n.iter=50000,n.chains=1,n.burnin=5000)
#JAGS
ej6a.sim<-jags(data,inits,parameters,model.file="Ej6a.txt",
n.iter=50000,n.chains=1,n.burnin=5000)
ej6b.sim<-jags(data,inits,parameters,model.file="Ej6b.txt",
n.iter=50000,n.chains=1,n.burnin=5000)
ej6c.sim<-jags(data,inits,parameters,model.file="Ej6c.txt",
n.iter=5000,n.chains=1,n.burnin=500)
#-Monitoring chain-
#Traza de la cadena
traceplot(ej6.sim)
#Cadena
#OpenBUGS
out<-ej6bb.sim$sims.list
#JAGS
out<-ej6a.sim$BUGSoutput$sims.list
z<-out$mu[,1]
par(mfrow=c(2,2))
plot(z,type="l")
plot(cumsum(z)/(1:length(z)),type="l")
hist(z,freq=FALSE)
acf(z)
z<-out$beta
par(mfrow=c(1,1))
plot(z)
#Resumen (estimadores)
#OpenBUGS
out.sum<-ej6bb.sim$summary
#JAGS
out.sum<-ej6a.sim$BUGSoutput$summary
#Tabla resumen
out.sum.t<-out.sum[grep("beta",rownames(out.sum)),c(1,3,7)]
out.sum.t<-cbind(out.sum.t,apply(out$beta,2,prob))
dimnames(out.sum.t)[[2]][4]<-"prob"
print(out.sum.t)
#DIC
out.dic<-ej6bb.sim$DIC
out.dic<-ej6a.sim$BUGSoutput$DIC
print(out.dic)
#Predictions
out.yf<-out.sum[grep("yf1",rownames(out.sum)),]
ymin<-min(desastres[,2],out.yf[,c(1,3,7)])
ymax<-max(desastres[,2],out.yf[,c(1,3,7)])
par(mfrow=c(1,1))
plot(desastres,type="l",col="grey80",ylim=c(ymin,ymax))
lines(desastres[,1],out.yf[,1],lwd=2,col=2)
lines(desastres[,1],out.yf[,3],lty=2,col=2)
lines(desastres[,1],out.yf[,7],lty=2,col=2)
lines(desastres[,1],out.yf[,5],lwd=2,col=4)
#Medias
out.mu<-out.sum[grep("mu",rownames(out.sum)),]
par(mfrow=c(1,1))
plot(desastres,type="l",col="grey80")
lines(desastres[,1],out.mu[,1],lwd=2,col=2)
lines(desastres[,1],out.mu[,3],lty=2,col=2)
lines(desastres[,1],out.mu[,7],lty=2,col=2)
#--- Ejemplo 7 ---
#-Reading data-
milk<-read.table("http://allman.rhon.itam.mx/~lnieto/index_archivos/milk.txt",header=TRUE)
milk$t<-1970:1982
n<-nrow(milk)
or<-order(milk$x)
plot(milk$x[or],milk$y[or],type="l")
text(milk$x[or],milk$y[or],labels=milk$t[or],cex=0.5,col=2)
plot(milk$t,milk$y,type="l")
plot(milk$t,milk$x,type="l")
#-Defining data-
m<-2
data<-list("n"=n,"m"=m,"y"=milk$y,"x"=milk$x,"t"=milk$t)
data<-list("n"=n,"m"=m,"y"=milk$y,"x"=milk$x/max(milk$x),"t"=milk$t/max(milk$t))
data<-list("n"=n,"m"=m,"y"=scale(milk$y)[1:n],"x"=scale(milk$x)[1:n],"t"=scale(milk$t)[1:n])
data<-list("n"=n,"m"=m,"y"=c(scale(milk$y)[1:(n-2)],NA,NA),"x"=scale(milk$x)[1:n],"t"=scale(milk$t)[1:n])
#-Defining inits-
inits<-function(){list(alpha=0,beta=0,tau=1,yf1=rep(1,n))}
inits<-function(){list(beta=rep(0,5),tau=1,yf1=rep(1,n))}
inits<-function(){list(beta=rep(0,n+m),tau.y=1,tau.b=1,yf1=rep(0,n+m))}
inits<-function(){list(beta=rep(0,n+m),tau.y=1,yf1=rep(0,n+m))}
inits<-function(){list(alpha=rep(0,n+m),beta=rep(0,n+m),tau.y=1,tau.b=1,tau.a=1,yf1=rep(0,n+m))}
inits<-function(){list(beta=rep(0,n+m),tau.y=1,tau.b=1,yf1=rep(0,n+m),g=0)}
inits<-function(){list(beta=rep(0,n),tau.y=1,tau.b=1,yf1=rep(0,n),g=1)}
inits<-function(){list(beta=rep(0,n),tau.y=1,tau.b=1,yf1=rep(0,n))}
inits<-function(){list(beta=rep(0,n),yf1=rep(0,n))}
#-Selecting parameters to monitor-
parameters<-c("beta","tau","yf1")
parameters<-c("beta","tau.y","tau.b","yf1","g")
parameters<-c("beta","tau.y","tau.b","yf1")
parameters<-c("alpha","beta","tau.y","tau.b","tau.a","yf1")
#parameters<-c("beta","tau.y","tau.b","yf1")
parameters<-c("beta","yf1")
#-Running code-
#OpenBUGS
ej7o.sim<-bugs(data,inits,parameters,model.file="Ej7o.txt",
n.iter=50000,n.chains=1,n.burnin=5000)
ej7a.sim<-bugs(data,inits,parameters,model.file="Ej7a.txt",
n.iter=50000,n.chains=1,n.burnin=5000)
ej7b.sim<-bugs(data,inits,parameters,model.file="Ej7b.txt",
n.iter=50000,n.chains=1,n.burnin=5000)
ej7c.sim<-bugs(data,inits,parameters,model.file="Ej7c.txt",
n.iter=50000,n.chains=1,n.burnin=5000,debug=TRUE)
ej7d.sim<-bugs(data,inits,parameters,model.file="Ej7d.txt",
n.iter=50000,n.chains=1,n.burnin=5000)
#JAGS
ej7a.sim<-jags(data,inits,parameters,model.file="Ej7a.txt",
n.iter=50000,n.chains=1,n.burnin=5000)
ej7b.sim<-jags(data,inits,parameters,model.file="Ej7b.txt",
n.iter=50000,n.chains=1,n.burnin=5000)
ej7c.sim<-jags(data,inits,parameters,model.file="Ej7c.txt",
n.iter=50000,n.chains=1,n.burnin=5000)
#-Monitoring chain-
#Traza de la cadena
traceplot(ej7.sim)
#Cadena
#OpenBUGS
out<-ej7d.sim$sims.list
#JAGS
out<-ej7a.sim$BUGSoutput$sims.list
z<-out$beta
par(mfrow=c(2,2))
plot(z,type="l")
plot(cumsum(z)/(1:length(z)),type="l")
hist(z,freq=FALSE)
acf(z)
#Resumen (estimadores)
#OpenBUGS
out.sum<-ej7d.sim$summary
#JAGS
out.sum<-ej7a.sim$BUGSoutput$summary
#Tabla resumen
out.sum.t<-out.sum[grep("beta",rownames(out.sum)),c(1,3,7)]
out.sum.t<-cbind(out.sum.t,apply(out$beta,2,prob))
dimnames(out.sum.t)[[2]][4]<-"prob"
print(out.sum.t)
#DIC
out.dic<-ej7d.sim$DIC
out.dic<-ej7a.sim$BUGSoutput$DIC
print(out.dic)
#Predictions
out.yf<-out.sum[grep("yf1",rownames(out.sum)),]
ymin<-min(data$y,out.yf[,c(1,3,7)])
ymax<-max(data$y,out.yf[,c(1,3,7)])
xmin<-min(data$t)
xmax<-max(data$t+m)
#x vs. y
par(mfrow=c(1,1))
plot(data$x,data$y,type="p",col="grey50",ylim=c(ymin,ymax))
points(data$x,out.yf[,1],col=2,pch=16,cex=0.5)
segments(data$x,out.yf[,3],data$x,out.yf[,7],col=2)
#t vs y
par(mfrow=c(1,1))
plot(data$t,data$y,type="b",col="grey80",ylim=c(ymin,ymax),xlim=c(xmin,xmax))
lines(data$t,out.yf[1:n,1],col=2)
lines(data$t,out.yf[1:n,3],col=2,lty=2)
lines(data$t,out.yf[1:n,7],col=2,lty=2)
lines(data$t[n]:(data$t[n]+m),out.yf[n:(n+m),1],col=4)
lines(data$t[n]:(data$t[n]+m),out.yf[n:(n+m),3],col=4,lty=2)
lines(data$t[n]:(data$t[n]+m),out.yf[n:(n+m),7],col=4,lty=2)
#betas
out.beta<-out.sum[grep("beta",rownames(out.sum)),]
ymin<-min(out.beta[,c(1,3,7)])
ymax<-max(out.beta[,c(1,3,7)])
plot(out.beta[,1],type="l",ylim=c(ymin,ymax))
lines(out.beta[,3],lty=2)
lines(out.beta[,7],lty=2)
#--- Ejemplo 8 ---
#-Reading data-
mercado<-read.table("http://allman.rhon.itam.mx/~lnieto/index_archivos/mercado.txt",header=TRUE)
mercado.ts<-ts(mercado,start=c(1990,1),end=c(1991,52),frequency=52)
n<-nrow(mercado)
mercado$Tiempo<-1:n
plot(mercado.ts)
pairs(mercado)
cor(mercado)
#-Defining data-
data<-list("n"=n,"y"=mercado$SHARE,"x1"=mercado$PRICE,"x2"=mercado$OPROM,"x3"=mercado$CPROM)
data<-list("n"=n,"y"=scale(mercado$SHARE)[1:n],"x1"=scale(mercado$PRICE)[1:n],"x2"=scale(mercado$OPROM)[1:n],"x3"=scale(mercado$CPROM)[1:n])
data<-list("n"=n,"y"=c(mercado$SHARE[1:(n-4)],NA,NA,NA,NA),"x1"=mercado$PRICE,"x2"=mercado$OPROM,"x3"=mercado$CPROM)
#-Defining inits-
inits<-function(){list(alpha=0,beta=rep(0,3),tau=1,yf1=rep(1,n))}
inits<-function(){list(alpha=rep(0,n),beta=matrix(0,nrow=3,ncol=n),tau=1,tau.a=1,tau.b=rep(1,3),yf1=rep(1,n))}
inits<-function(){list(alpha=0,beta=matrix(0,nrow=3,ncol=n),tau=1,yf1=rep(1,n))}
#-Selecting parameters to monitor-
parameters<-c("alpha","beta","tau","yf1")
#-Running code-
#OpenBUGS
ej8a.sim<-bugs(data,inits,parameters,model.file="Ej8a.txt",
n.iter=10000,n.chains=1,n.burnin=1000)
ej8b.sim<-bugs(data,inits,parameters,model.file="Ej8b.txt",
n.iter=10000,n.chains=1,n.burnin=1000)
ej8c.sim<-bugs(data,inits,parameters,model.file="Ej8c.txt",
n.iter=10000,n.chains=1,n.burnin=1000)
#JAGS
ej8a.sim<-jags(data,inits,parameters,model.file="Ej8a.txt",
n.iter=5000,n.chains=1,n.burnin=500)
ej8b.sim<-jags(data,inits,parameters,model.file="Ej8b.txt",
n.iter=10000,n.chains=1,n.burnin=1000,n.thin=10)
#-Monitoring chain-
#Cadena
#OpenBUGS
out<-ej8c.sim$sims.list
#JAGS
out<-ej8a.sim$BUGSoutput$sims.list
z<-out$alpha
par(mfrow=c(2,2))
plot(z,type="l")
plot(cumsum(z)/(1:length(z)),type="l")
hist(z,freq=FALSE)
acf(z)
#Resumen (estimadores)
#OpenBUGS
out.sum<-ej8c.sim$summary
#JAGS
out.sum<-ej8b.sim$BUGSoutput$summary
#Tabla resumen
out.sum.t<-out.sum[grep("beta",rownames(out.sum)),c(1,3,7)]
out.sum.t<-cbind(out.sum.t,apply(out$beta,2,prob))
dimnames(out.sum.t)[[2]][4]<-"prob"
print(out.sum.t)
#DIC
out.dic<-ej8c.sim$DIC
#out.dic<-ej8b.sim$BUGSoutput$DIC
print(out.dic)
#Predictions
out.yf<-out.sum[grep("yf1",rownames(out.sum)),]
y<-data$y
ymin<-min(y,out.yf[,c(1,3,7)])
ymax<-max(y,out.yf[,c(1,3,7)])
#x1 vs. y
x<-data$x1
par(mfrow=c(1,1))
plot(x,y,type="p",col="grey50",ylim=c(ymin,ymax))
points(x,out.yf[,1],col=2,pch=16,cex=0.5)
segments(x,out.yf[,3],x,out.yf[,7],col=2)
#x2 vs. y
x<-data$x2
par(mfrow=c(1,1))
plot(x,y,type="p",col="grey50",ylim=c(ymin,ymax))
points(x,out.yf[,1],col=2,pch=16,cex=0.5)
segments(x,out.yf[,3],x,out.yf[,7],col=2)
#x3 vs. y
x<-data$x3
par(mfrow=c(1,1))
plot(x,y,type="p",col="grey50",ylim=c(ymin,ymax))
points(x,out.yf[,1],col=2,pch=16,cex=0.5)
segments(x,out.yf[,3],x,out.yf[,7],col=2)
#t vs. y
x<-mercado$Tiempo
par(mfrow=c(1,1))
plot(x,y,type="p",col="grey50",ylim=c(ymin,ymax))
points(x,out.yf[,1],col=2,pch=16,cex=0.5)
segments(x,out.yf[,3],x,out.yf[,7],col=2)
par(mfrow=c(1,1))
plot(x,y,type="l",col="grey50",ylim=c(ymin,ymax))
lines(x,out.yf[,1],col=2,cex=0.5)
lines(x,out.yf[,3],col=2,lty=2)
lines(x,out.yf[,7],col=2,lty=2)
#betas
out.beta<-out.sum[grep("beta",rownames(out.sum)),]
plot(out.beta[1:104,1],type="l")
plot(out.beta[105:208,1],type="l")
plot(out.beta[209:312,1],type="l")
#alpha
out.alpha<-out.sum[grep("alpha",rownames(out.sum)),]
plot(out.alpha[,1],type="l")
#--- Ejemplo 9 ---
#-Reading data-
leucemia<-read.table("http://allman.rhon.itam.mx/~lnieto/index_archivos/leucemia.txt",header=TRUE)
n<-nrow(leucemia)
par(mfrow=c(2,2))
plot(leucemia$Obs)
plot(leucemia$Obs/leucemia$Pops*10000)
plot(leucemia$Obs/leucemia$Esp)
abline(h=1,col=2)
#-Defining data-
data<-list("n"=n,"y"=leucemia$Obs,"ne"=leucemia$Pops/10000)
data<-list("n"=n,"y"=leucemia$Obs,"ne"=leucemia$Pops/10000,"C"=leucemia$Cancer,"P"=leucemia$Place,"A"=leucemia$Age)
#-Defining inits-
inits<-function(){list(theta=1,yf1=rep(1,n))}
inits<-function(){list(theta=rep(1,n),yf1=rep(1,n))}
inits<-function(){list(theta=rep(1,n),a=1,b=1,yf1=rep(1,n))}
inits<-function(){list(alpha=0,beta=rep(0,2),gama=rep(0,2),delta=rep(0,2),yf1=rep(1,n))}
#-Selecting parameters to monitor-
parameters<-c("theta","yf1")
parameters<-c("theta","eta","yf1")
parameters<-c("alpha.adj","beta.adj","gama.adj","delta.adj","yf1")
#-Running code-
#OpenBUGS
ej9a.sim<-bugs(data,inits,parameters,model.file="Ej9a.txt",
n.iter=50000,n.chains=1,n.burnin=5000)
ej9b.sim<-bugs(data,inits,parameters,model.file="Ej9b.txt",
n.iter=50000,n.chains=1,n.burnin=5000)
ej9c.sim<-bugs(data,inits,parameters,model.file="Ej9c.txt",
n.iter=100000,n.chains=1,n.burnin=10000)
ej9d.sim<-bugs(data,inits,parameters,model.file="Ej9d.txt",
n.iter=50000,n.chains=1,n.burnin=5000)
#JAGS
ej9a.sim<-jags(data,inits,parameters,model.file="Ej9a.txt",
n.iter=50000,n.chains=1,n.burnin=5000)
ej9b.sim<-jags(data,inits,parameters,model.file="Ej9b.txt",
n.iter=50000,n.chains=1,n.burnin=5000)
ej9c.sim<-jags(data,inits,parameters,model.file="Ej9c.txt",
n.iter=100000,n.chains=1,n.burnin=10000)
ej9d.sim<-jags(data,inits,parameters,model.file="Ej9d.txt",
n.iter=50000,n.chains=1,n.burnin=5000)
#-Monitoring chain-
#Traza de la cadena
traceplot(ej9.sim)
#Cadena
#OpenBUGS
outa<-ej9a.sim$sims.list
outb<-ej9b.sim$sims.list
outc<-ej9c.sim$sims.list
outd<-ej9d.sim$sims.list
#JAGS
outa<-ej9a.sim$BUGSoutput$sims.list
outb<-ej9b.sim$BUGSoutput$sims.list
outc<-ej9c.sim$BUGSoutput$sims.list
outc<-ej9d.sim$BUGSoutput$sims.list
z<-outa$theta
par(mfrow=c(2,2))
plot(z,type="l")
plot(cumsum(z)/(1:length(z)),type="l")
hist(z,freq=FALSE)
acf(z)
#Resumen (estimadores)
#OpenBUGS
outa.sum<-ej9a.sim$summary
outb.sum<-ej9b.sim$summary
outc.sum<-ej9c.sim$summary
outd.sum<-ej9d.sim$summary
#JAGS
outa.sum<-ej9a.sim$BUGSoutput$summary
outb.sum<-ej9b.sim$BUGSoutput$summary
outc.sum<-ej9c.sim$BUGSoutput$summary
outd.sum<-ej9d.sim$BUGSoutput$summary
#Tabla resumen
out<-outb
out.sum<-outb.sum
out.sum.t<-out.sum[grep("theta",rownames(out.sum)),c(1,3,7)]
out.sum.t<-cbind(out.sum.t,apply(out$theta,2,prob))
dimnames(out.sum.t)[[2]][4]<-"prob"
print(out.sum.t)
#DIC
#OpenBUGS
outa.dic<-ej9a.sim$DIC
outb.dic<-ej9b.sim$DIC
outc.dic<-ej9c.sim$DIC
outd.dic<-ej9d.sim$DIC
#JAGS
outa.dic<-ej9a.sim$BUGSoutput$DIC
outb.dic<-ej9b.sim$BUGSoutput$DIC
outc.dic<-ej9c.sim$BUGSoutput$DIC
outd.dic<-ej9d.sim$BUGSoutput$DIC
print(outa.dic)
print(outb.dic)
print(outc.dic)
print(outd.dic)
#Estimaciones
outa.p<-outa.sum[grep("theta",rownames(outa.sum)),]
outb.p<-outb.sum[grep("theta",rownames(outb.sum)),]
outc.p<-outc.sum[grep("theta",rownames(outc.sum)),]
outc.eta<-outc.sum[grep("eta",rownames(outc.sum)),]
#x vs. y
xmin<-0
xmax<-10
ymin<-0
ymax<-5
par(mfrow=c(1,1))
plot(leucemia$Obs/leucemia$Pops*10000,type="p",col="grey50",xlim=c(xmin,xmax),ylim=c(ymin,ymax))
#
out.p<-outb.p
points(out.p[,1],col=2,pch=16,cex=0.5)
segments(1:8,out.p[,3],1:8,out.p[,7],col=2)
#
out.p<-outc.p
points((1:8)+0.2,out.p[,1],col=4,pch=16,cex=0.5)
segments((1:8)+0.2,out.p[,3],(1:8)+0.2,out.p[,7],col=4)
#
points(xmax-0.2,sum(leucemia$Obs)/sum(leucemia$Pops)*10000)
#
out.p<-outa.p
points(xmax-0.2,out.p[1],col=3,pch=16,cex=0.5)
segments(xmax-0.2,out.p[3],xmax-0.2,out.p[7],col=3)
#
out.p<-outc.eta
points(xmax,out.p[1],col=4,pch=16,cex=0.5)
segments(xmax,out.p[3],xmax,out.p[7],col=4)
#--- Ejemplo 10 ---
#-Reading data-
reclama<-read.table("http://allman.rhon.itam.mx/~lnieto/index_archivos/reclama.txt",header=TRUE)
n<-nrow(reclama)
par(mfrow=c(2,2))
plot(reclama$r)
plot(reclama$n,ylim=c(0,max(reclama$n)))
plot(reclama$r/reclama$n)
#-Defining data-
data<-list("n"=n,"y"=reclama$r,"ne"=reclama$n)
#-Defining inits-
inits<-function(){list(p=0.5,yf1=rep(1,n))}
inits<-function(){list(p=rep(0.5,n),yf1=rep(1,n))}
inits<-function(){list(p=rep(0.5,n),a=1,b=1,yf1=rep(1,n))}
#-Selecting parameters to monitor-
parameters<-c("p","yf1")
parameters<-c("p","eta","yf1")
#-Running code-
#OpenBUGS
ej10a.sim<-bugs(data,inits,parameters,model.file="Ej10a.txt",
n.iter=50000,n.chains=1,n.burnin=5000)
ej10b.sim<-bugs(data,inits,parameters,model.file="Ej10b.txt",
n.iter=50000,n.chains=1,n.burnin=5000)
ej10c.sim<-bugs(data,inits,parameters,model.file="Ej10c.txt",
n.iter=100000,n.chains=1,n.burnin=10000)
#JAGS
ej10a.sim<-jags(data,inits,parameters,model.file="Ej10a.txt",
n.iter=50000,n.chains=1,n.burnin=5000)
ej10b.sim<-jags(data,inits,parameters,model.file="Ej10b.txt",
n.iter=50000,n.chains=1,n.burnin=5000)
ej10c.sim<-jags(data,inits,parameters,model.file="Ej10c.txt",
n.iter=100000,n.chains=1,n.burnin=10000)
#-Monitoring chain-
#Traza de la cadena
traceplot(ej10.sim)
#Cadena
#OpenBUGS
outa<-ej10a.sim$sims.list
outb<-ej10b.sim$sims.list
outc<-ej10c.sim$sims.list
#JAGS
outa<-ej10a.sim$BUGSoutput$sims.list
outb<-ej10b.sim$BUGSoutput$sims.list
outc<-ej10c.sim$BUGSoutput$sims.list
z<-outb$p[,2]
par(mfrow=c(2,2))
plot(z,type="l")
plot(cumsum(z)/(1:length(z)),type="l")
hist(z,freq=FALSE)
acf(z)
#Resumen (estimadores)
#OpenBUGS
outa.sum<-ej10a.sim$summary
outb.sum<-ej10b.sim$summary
outc.sum<-ej10c.sim$summary
#JAGS
outa.sum<-ej10a.sim$BUGSoutput$summary
outb.sum<-ej10b.sim$BUGSoutput$summary
outc.sum<-ej10c.sim$BUGSoutput$summary
#Tabla resumen
out.sum.t<-out.sum[grep("p",rownames(out.sum)),c(1,3,7)]
out.sum.t<-cbind(out.sum.t,apply(out$p,2,prob))
dimnames(out.sum.t)[[2]][4]<-"prob"
print(out.sum.t)
#DIC
#OpenBUGS
outa.dic<-ej10a.sim$DIC
outb.dic<-ej10b.sim$DIC
outc.dic<-ej10c.sim$DIC
#JAGS
outa.dic<-ej10a.sim$BUGSoutput$DIC
outb.dic<-ej10b.sim$BUGSoutput$DIC
outc.dic<-ej10c.sim$BUGSoutput$DIC
print(outa.dic)
print(outb.dic)
print(outc.dic)
#Estimaciones
outa.p<-outa.sum[grep("p",rownames(outa.sum)),]
outb.p<-outb.sum[grep("p",rownames(outb.sum)),]
outc.p<-outc.sum[grep("p",rownames(outc.sum)),]
outc.eta<-outc.sum[grep("eta",rownames(outc.sum)),]
#x vs. y
xmin<-0
xmax<-12
ymin<-0
ymax<-1
par(mfrow=c(1,1))
plot(reclama$r/reclama$n,type="p",col="grey50",xlim=c(xmin,xmax),ylim=c(ymin,ymax))
#
out.p<-outb.p
points(out.p[,1],col=2,pch=16,cex=0.5)
segments(1:10,out.p[,3],1:10,out.p[,7],col=2)
#
out.p<-outc.p
points((1:10)+0.2,out.p[,1],col=4,pch=16,cex=0.5)
segments((1:10)+0.2,out.p[,3],(1:10)+0.2,out.p[,7],col=4)
#
points(xmax-0.2,sum(reclama$r)/sum(reclama$n))
#
out.p<-outa.p
points(xmax-0.2,out.p[1],col=3,pch=16,cex=0.5)
segments(xmax-0.2,out.p[3],xmax-0.2,out.p[7],col=3)
#
out.p<-outc.eta
points(xmax,out.p[1],col=4,pch=16,cex=0.5)
segments(xmax,out.p[3],xmax,out.p[7],col=4)
|
/cRA1.R
|
no_license
|
abraham314/GLM2018
|
R
| false
| false
| 30,362
|
r
|
### ----- REGRESION AVANZADA ----- ###
# --- Prof. Luis E. Nieto Barajas --- #
#--- Usar espejo CRAN del ITAM ---
options(repos="http://cran.itam.mx/")
#--- Funciones utiles ---
prob<-function(x){
out<-min(length(x[x>0])/length(x),length(x[x<0])/length(x))
out
}
#--- Ilustracion del proceso de inferencia ---
#-Proceso de aprendizaje normal-normal-
xbar<-40.9533
sig2<-4
n<-3
th0<-39
sig20<-219.47
y<-seq(35,45,length.out=200)
f0y<-dnorm(y,th0,sqrt(sig20))
liky<-dnorm(y,xbar,sqrt(sig2/n))
sig21<-1/(n/sig2+1/sig20)
th1<-sig21*(n/sig2*xbar+th0/sig20)
f1y<-dnorm(y,th1,sqrt(sig21))
ymax<-max(f0y,liky,f1y)
plot(y,f0y,ylim=c(0,ymax),type="l")
lines(y,liky,lty=2,col=2)
lines(y,f1y,lty=3,col=3)
#-Proceso de aprendizaje bernoulli-beta-
#Simulacion de datos Bernoulli
theta0 <- 0.6
n <- 150
x<-rbinom(n,1,theta0)
hist(x,freq=FALSE)
#Distribucion inicial para theta
a <- 1
b <- 1
theta<-seq(0,1,,100)
plot(theta,dbeta(theta,a,b),type="l")
#Distribucion final
a1 <- a + sum(x)
b1 <- b + n - sum(x)
plot(theta,dbeta(theta,a1,b1),type="l")
#Ambas
theta<-seq(0,1,,100)
ymax <- max(dbeta(theta,a,b),dbeta(theta,a1,b1))
plot(theta,dbeta(theta,a,b),type="l",ylim=c(0,ymax))
lines(theta,dbeta(theta,a1,b1),col=2)
abline(v=theta0,col=4)
#Aproximacion normal asintotica
mu <- (a1-1)/(a1+b1-2)
sig2 <- (a1-1)*(b1-1)/(a1+b1-2)^3
lines(theta,dnorm(theta,mu,sqrt(sig2)),col=3)
# --- Aproximaci�n Monte Carlo ---
#-Ejemplo 1-
x<-seq(-2,4,,1000)
f<-function(x){
out <- 5-(x-1)^2
out <- ifelse (x < -1 | x>3,0,out)
out
}
plot(x,f(x)*3/44,type="l",ylim=c(0,0.5))
lines(x,dnorm(x,0,1),lty=2,col=2)
lines(x,dnorm(x,1,2/3),lty=3,col=3)
lines(x,dnorm(x,1,1),lty=4,col=4)
lines(x,dnorm(x,1,2),lty=5,col=5)
N<-100000
#Caso 1: S=Normal estandar
mu<-0
sig<-1
#N<-10000
y<-rnorm(N,mu,sig)
I1<-mean(f(y)/dnorm(y,mu,sig))
eI1<-sd(f(y)/dnorm(y,0,1))/sqrt(N)
print(c(I1,eI1))
#Caso 2: S=Normal no estandar
mu<-1
sig<-2/3
#N<-10000
y<-rnorm(N,mu,sig)
I2<-mean(f(y)/dnorm(y,mu,sig))
eI2<-sd(f(y)/dnorm(y,mu,sig))/sqrt(N)
print(c(I2,eI2))
#Caso 3: S=Normal no estandar
mu<-1
sig<-1
#N<-10000
y<-rnorm(N,mu,sig)
I3<-mean(f(y)/dnorm(y,mu,sig))
eI3<-sd(f(y)/dnorm(y,mu,sig))/sqrt(N)
print(c(I3,eI3))
#Caso 4: S=Normal no estandar
mu<-1
sig<-2
#N<-10000
y<-rnorm(N,mu,sig)
I4<-mean(f(y)/dnorm(y,mu,sig))
eI4<-sd(f(y)/dnorm(y,mu,sig))/sqrt(N)
print(c(I4,eI4))
#-Ejemplo 2-
f<-function(x){
out<-ifelse(x<0,0,x)
out<-ifelse(x>1,0,out)
out
}
x<-seq(-1,2,,100)
plot(x,f(x),type="l",ylim=c(0,1.2))
N<-1000000
#Caso 1: S=Uniforme
lines(x,dunif(x,0,1),col=2,lty=2)
y<-runif(N,0,1)
I1<-mean(f(y)/dunif(y,0,1))
eI1<-sd(f(y)/dunif(y,0,1))/sqrt(N)
print(c(I1,eI1))
#Caso 2: S=Exponencial
lines(x,dexp(x,1),col=3,lty=3)
y<-rexp(N,1)
I2<-mean(f(y)/dexp(y,1))
eI2<-sd(f(y)/dexp(y,1))/sqrt(N)
print(c(I2,eI2))
#Caso 3: S=Normal
lines(x,dnorm(x,0.5,1/3),col=4,lty=4)
y<-rnorm(N,0.5,1/3)
I3<-mean(f(y)/dnorm(y,0.5,1/3))
eI3<-sd(f(y)/dnorm(y,0.5,1/3))/sqrt(N)
print(c(I3,eI3))
#-Muestreador de Gibbs-
install.packages("bayesm")
library(bayesm)
out<-rbiNormGibbs(rho=0.95)
out<-rbiNormGibbs(rho=-0.5)
###########################################
install.packages("R2OpenBUGS")
install.packages("R2jags")
library(R2OpenBUGS)
library(R2jags)
#-Working directory-
wdir<-"c:/temp/RegAva/"
setwd(wdir)
#--- Ejemplo 1---
#-Reading data-
n<-10
credito<-c(rep(1,n/2),rep(0,n/2))
credito<-c(rep(1,n*0.9),rep(0,n*0.1))
credito<-c(rep(0,n*0.9),rep(1,n*0.1))
#-Defining data-
data<-list("n"=n,"x"=credito)
#-Defining inits-
inits<-function(){list(theta=0.9999,x1=rep(0,2))}
inits<-function(){list(lambda=0)}
inits<-function(){list(theta=0.5,eta=1)}
#-Selecting parameters to monitor-
parameters<-c("theta","x1")
parameters<-c("theta","eta")
#-Running code-
#OpenBUGS
ej1.sim<-bugs(data,inits,parameters,model.file="Ej1.txt",
n.iter=5000,n.chains=1,n.burnin=0)
#JAGS
ej1.sim<-jags(data,inits,parameters,model.file="Ej1.txt",
n.iter=5000,n.chains=1,n.burnin=500,n.thin=1)
#-Monitoring chain-
#Traza de la cadena
traceplot(ej1.sim)
#Cadena
#OpenBUGS
out<-ej1.sim$sims.list
#JAGS
out<-ej1.sim$BUGSoutput$sims.list
z<-out$theta
par(mfrow=c(2,2))
plot(z,type="l")
plot(cumsum(z)/(1:length(z)),type="l")
hist(z,freq=FALSE)
acf(z)
#Resumen (estimadores)
#OpenBUGS
out.sum<-ej1.sim$summary
#JAGS
out.sum<-ej1.sim$BUGSoutput$summary
print(out.sum)
#DIC
#OpenBUGS
out.dic<-ej1.sim$DIC
#JAGS
out.dic<-ej1.sim$BUGSoutput$DIC
print(out.dic)
#---------------------#
#Mezcla de betas
w<-seq(0.01,0.99,,100)
pp<-0.3
fw<-pp*dbeta(w,10,10)+(1-pp)*dbeta(w,5,0.05)
par(mfrow=c(1,1))
plot(w,fw,type="l")
#--- Ejemplo 2---
#-Reading data-
utilidad<-c(212, 207, 210,
196, 223, 193,
196, 210, 202, 221)
n<-length(utilidad)
#-Defining data-
data<-list("n"=n,"x"=utilidad)
#-Defining inits-
inits<-function(){list(mu=0,sig=1,x1=0)}
#-Selecting parameters to monitor-
parameters<-c("mu","sig","x1")
#-Running code-
#OpenBUGS
ej2.sim<-bugs(data,inits,parameters,model.file="Ej2.txt",
n.iter=5000,n.chains=1,n.burnin=500)
#JAGS
ej2.sim<-jags(data,inits,parameters,model.file="Ej2.txt",
n.iter=5000,n.chains=1,n.burnin=500)
#-Monitoring chain-
#Traza de la cadena
traceplot(ej2.sim)
#Cadena
#OpenBUGS
out<-ej2.sim$sims.list
#JAGS
out<-ej2.sim$BUGSoutput$sims.list
z<-out$x1
par(mfrow=c(2,2))
plot(z,type="l")
plot(cumsum(z)/(1:length(z)),type="l")
hist(z,freq=FALSE)
acf(z)
#Resumen (estimadores)
#OpenBUGS
out.sum<-ej2.sim$summary
#JAGS
out.sum<-ej2.sim$BUGSoutput$summary
print(out.sum)
#DIC
#OpenBUGS
out.dic<-ej2.sim$DIC
#JAGS
out.dic<-ej2.sim$BUGSoutput$DIC
print(out.dic)
#--- Ejemplo 3 ---
#-Reading data-
calif<-read.table("http://allman.rhon.itam.mx/~lnieto/index_archivos/calificaciones.txt",header=TRUE)
n<-nrow(calif)
plot(calif$MO,calif$SP)
#-Defining data-
data<-list("n"=n,"y"=calif$SP,"x"=calif$MO)
#-Defining inits-
inits<-function(){list(beta=rep(0,2),tau=1,yf=rep(0,n))}
inits<-function(){list(beta=rep(0,6),tau=1,yf=rep(0,n))}
#-Selecting parameters to monitor-
parameters<-c("beta","tau","yf")
#-Running code-
#OpenBUGS
ej3.sim<-bugs(data,inits,parameters,model.file="Ej3.txt",
n.iter=10000,n.chains=1,n.burnin=1000)
ej3a.sim<-bugs(data,inits,parameters,model.file="Ej3a.txt",
n.iter=100000,n.chains=1,n.burnin=10000,n.thin=5)
ej3b.sim<-bugs(data,inits,parameters,model.file="Ej3b.txt",
n.iter=10000,n.chains=1,n.burnin=1000)
#JAGS
ej3.sim<-jags(data,inits,parameters,model.file="Ej3.txt",
n.iter=10000,n.chains=1,n.burnin=1000,n.thin=1)
ej3b.sim<-jags(data,inits,parameters,model.file="Ej3b.txt",
n.iter=10000,n.chains=1,n.burnin=1000,n.thin=1)
#-Monitoring chain-
#Traza de la cadena
traceplot(ej3.sim)
#Cadena
#OpenBUGS
out<-ej3.sim$sims.list
out<-ej3b.sim$sims.list
#JAGS
out<-ej3.sim$BUGSoutput$sims.list
out<-ej3b.sim$BUGSoutput$sims.list
z<-out$beta[,2]
par(mfrow=c(2,2))
plot(z,type="l")
plot(cumsum(z)/(1:length(z)),type="l")
hist(z,freq=FALSE)
acf(z)
z<-out$beta
par(mfrow=c(1,1))
plot(z)
z<-out$beta
pairs(z)
#Resumen (estimadores)
#OpenBUGS
out.sum<-ej3.sim$summary
out.sum<-ej3b.sim$summary
#JAGS
out.sum<-ej3.sim$BUGSoutput$summary
#Tabla resumen
out.sum.t<-out.sum[grep("beta",rownames(out.sum)),c(1,3,7)]
out.sum.t<-cbind(out.sum.t,apply(out$beta,2,prob))
dimnames(out.sum.t)[[2]][4]<-"prob"
print(out.sum.t)
#DIC
out.dic<-ej3b.sim$DIC
out.dic<-ej3.sim$BUGSoutput$DIC
print(out.dic)
#Predictions
out.yf<-out.sum[grep("yf",rownames(out.sum)),]
or<-order(calif$MO)
ymin<-min(calif$SP,out.yf[,c(1,3,7)])
ymax<-max(calif$SP,out.yf[,c(1,3,7)])
par(mfrow=c(1,1))
plot(calif$MO,calif$SP,ylim=c(ymin,ymax))
lines(calif$MO[or],out.yf[or,1],lwd=2,col=2)
lines(calif$MO[or],out.yf[or,3],lty=2,col=2)
lines(calif$MO[or],out.yf[or,7],lty=2,col=2)
plot(calif$SP,out.yf[,1])
R2<-(cor(calif$SP,out.yf[,1]))^2
print(R2)
#--- Ejemplo 4 ---
#TAREA
#--- Ejemplo 5 ---
#-Reading data-
mortality<-read.table("http://allman.rhon.itam.mx/~lnieto/index_archivos/mortality.txt",header=TRUE)
n<-nrow(mortality)
plot(mortality)
plot(mortality$x,mortality$y/mortality$n)
m<-1
nef<-c(100)
xf<-c(200)
#-Defining data-
data<-list("n"=n,"ne"=mortality$n,"y"=mortality$y,"x"=mortality$x,"m"=m,"nef"=nef,"xf"=xf)
#-Defining inits-
inits<-function(){list(beta=rep(0,2),yf1=rep(1,n),yf2=1)}
#-Selecting parameters to monitor-
parameters<-c("beta","lambda","yf1","yf2")
parameters<-c("beta","p","yf1","yf2")
#-Running code-
#OpenBUGS
ej5a.sim<-bugs(data,inits,parameters,model.file="Ej5a.txt",
n.iter=50000,n.chains=1,n.burnin=5000)
ej5aa.sim<-bugs(data,inits,parameters,model.file="Ej5aa.txt",
n.iter=50000,n.chains=1,n.burnin=5000)
ej5b.sim<-bugs(data,inits,parameters,model.file="Ej5b.txt",
n.iter=50000,n.chains=1,n.burnin=5000)
ej5c.sim<-bugs(data,inits,parameters,model.file="Ej5c.txt",
n.iter=50000,n.chains=1,n.burnin=5000)
ej5d.sim<-bugs(data,inits,parameters,model.file="Ej5d.txt",
n.iter=50000,n.chains=1,n.burnin=5000)
#JAGS
ej5a.sim<-jags(data,inits,parameters,model.file="Ej5a.txt",
n.iter=50000,n.chains=1,n.burnin=5000)
ej5b.sim<-jags(data,inits,parameters,model.file="Ej5b.txt",
n.iter=50000,n.chains=1,n.burnin=5000)
ej5c.sim<-jags(data,inits,parameters,model.file="Ej5c.txt",
n.iter=50000,n.chains=1,n.burnin=5000)
#-Monitoring chain-
#Traza de la cadena
traceplot(ej5.sim)
#Cadena
#OpenBUGS
out<-ej5c.sim$sims.list
#JAGS
out<-ej5.sim$BUGSoutput$sims.list
z<-out$beta[,2]
par(mfrow=c(2,2))
plot(z,type="l")
plot(cumsum(z)/(1:length(z)),type="l")
hist(z,freq=FALSE)
acf(z)
z<-out$beta
par(mfrow=c(1,1))
plot(z)
#Resumen (estimadores)
#OpenBUGS
out.sum<-ej5c.sim$summary
#JAGS
out.sum<-ej5.sim$BUGSoutput$summary
#Tabla resumen
out.sum.t<-out.sum[grep("beta",rownames(out.sum)),c(1,3,7)]
out.sum.t<-cbind(out.sum.t,apply(out$beta,2,prob))
dimnames(out.sum.t)[[2]][4]<-"prob"
print(out.sum.t)
#DIC
out.dic<-ej5c.sim$DIC
out.dic<-ej5.sim$BUGSoutput$DIC
print(out.dic)
#Predictions
out.yf<-out.sum[grep("yf1",rownames(out.sum)),]
or<-order(mortality$x)
ymin<-min(mortality$y,out.yf[,c(1,3,7)])
ymax<-max(mortality$y,out.yf[,c(1,3,7)])
par(mfrow=c(1,1))
plot(mortality$x,mortality$y,ylim=c(ymin,ymax))
#Modelo 1
lines(mortality$x[or],out.yf[or,1],lwd=2,col=2)
lines(mortality$x[or],out.yf[or,3],lty=2,col=2)
lines(mortality$x[or],out.yf[or,7],lty=2,col=2)
#Modelo 2
lines(mortality$x[or],out.yf[or,1],lwd=2,col=3)
lines(mortality$x[or],out.yf[or,3],lty=2,col=3)
lines(mortality$x[or],out.yf[or,7],lty=2,col=3)
#Modelo 3
lines(mortality$x[or],out.yf[or,1],lwd=2,col=4)
lines(mortality$x[or],out.yf[or,3],lty=2,col=4)
lines(mortality$x[or],out.yf[or,7],lty=2,col=4)
#Modelo 4
lines(mortality$x[or],out.yf[or,1],lwd=2,col=5)
lines(mortality$x[or],out.yf[or,3],lty=2,col=5)
lines(mortality$x[or],out.yf[or,7],lty=2,col=5)
#Modelo 5
lines(mortality$x[or],out.yf[or,1],lwd=2,col=6)
lines(mortality$x[or],out.yf[or,3],lty=2,col=6)
lines(mortality$x[or],out.yf[or,7],lty=2,col=6)
plot(mortality$y,out.yf[,1])
abline(a=0,b=1)
cor(mortality$y,out.yf[,1])
#Estimacion de tasas
out.tasa<-out.sum[grep("lambda",rownames(out.sum)),]
out.tasa<-out.sum[grep("p",rownames(out.sum)),]
or<-order(mortality$x)
ymin<-min(mortality$y/mortality$n,out.tasa[,c(1,3,7)])
ymax<-max(mortality$y/mortality$n,out.tasa[,c(1,3,7)])
par(mfrow=c(1,1))
plot(mortality$x,mortality$y/mortality$n,ylim=c(ymin,ymax))
#Modelo 1
lines(mortality$x[or],out.tasa[or,1],lwd=2,col=2)
lines(mortality$x[or],out.tasa[or,3],lty=2,col=2)
lines(mortality$x[or],out.tasa[or,7],lty=2,col=2)
#Modelo 2
lines(mortality$x[or],out.tasa[or,1],lwd=2,col=3)
lines(mortality$x[or],out.tasa[or,3],lty=2,col=3)
lines(mortality$x[or],out.tasa[or,7],lty=2,col=3)
#Modelo 3
lines(mortality$x[or],out.tasa[or,1],lwd=2,col=4)
lines(mortality$x[or],out.tasa[or,3],lty=2,col=4)
lines(mortality$x[or],out.tasa[or,7],lty=2,col=4)
#Modelo 4
lines(mortality$x[or],out.tasa[or,1],lwd=2,col=5)
lines(mortality$x[or],out.tasa[or,3],lty=2,col=5)
lines(mortality$x[or],out.tasa[or,7],lty=2,col=5)
#Modelo 5
lines(mortality$x[or],out.tasa[or,1],lwd=2,col=6)
lines(mortality$x[or],out.tasa[or,3],lty=2,col=6)
lines(mortality$x[or],out.tasa[or,7],lty=2,col=6)
#--- Ejemplo 6 ---
#-Reading data-
desastres<-read.table("http://allman.rhon.itam.mx/~lnieto/index_archivos/desastres.txt",header=TRUE)
n<-nrow(desastres)
plot(desastres,type="l")
plot(desastres[2:n,2]-desastres[1:(n-1),2],type="l")
plot(log(desastres[2:n,2])-log(desastres[1:(n-1),2]),type="l")
#-Defining data-
data<-list("n"=n,"y"=desastres$No.Desastres,"x"=desastres$Anho)
data<-list("n"=n,"y"=c(desastres$No.Desastres[1:(n-6)],rep(NA,6)),"x"=desastres$Anho)
#-Defining inits-
inits<-function(){list(beta=rep(0,2),yf1=rep(1,n))}
inits<-function(){list(beta=rep(0,2),aux=1,aux2=1,yf1=rep(1,n))}
inits<-function(){list(beta=rep(0,2),aux2=1,yf1=rep(1,n),tau.y=1)}
inits<-function(){list(beta=rep(0,n),tau.b=1,yf1=rep(1,n))}
inits<-function(){list(mu=rep(1,n),tau.b=1,yf1=rep(1,n))}
#-Selecting parameters to monitor-
parameters<-c("beta","yf1","mu")
parameters<-c("beta","yf1","mu","tau")
parameters<-c("beta","yf1","mu","tau","tau.y")
parameters<-c("beta","yf1","mu","r")
parameters<-c("beta","yf1","mu","tau","r")
parameters<-c("tau.b","yf1","mu")
#-Running code-
#OpenBUGS
ej6a.sim<-bugs(data,inits,parameters,model.file="Ej6a.txt",
n.iter=50000,n.chains=1,n.burnin=5000)
ej6bb.sim<-bugs(data,inits,parameters,model.file="Ej6bb.txt",
n.iter=50000,n.chains=1,n.burnin=5000)
ej6c.sim<-bugs(data,inits,parameters,model.file="Ej6c.txt",
n.iter=50000,n.chains=1,n.burnin=5000)
ej6d.sim<-bugs(data,inits,parameters,model.file="Ej6d.txt",
n.iter=50000,n.chains=1,n.burnin=5000)
#JAGS
ej6a.sim<-jags(data,inits,parameters,model.file="Ej6a.txt",
n.iter=50000,n.chains=1,n.burnin=5000)
ej6b.sim<-jags(data,inits,parameters,model.file="Ej6b.txt",
n.iter=50000,n.chains=1,n.burnin=5000)
ej6c.sim<-jags(data,inits,parameters,model.file="Ej6c.txt",
n.iter=5000,n.chains=1,n.burnin=500)
#-Monitoring chain-
#Traza de la cadena
traceplot(ej6.sim)
#Cadena
#OpenBUGS
out<-ej6bb.sim$sims.list
#JAGS
out<-ej6a.sim$BUGSoutput$sims.list
z<-out$mu[,1]
par(mfrow=c(2,2))
plot(z,type="l")
plot(cumsum(z)/(1:length(z)),type="l")
hist(z,freq=FALSE)
acf(z)
z<-out$beta
par(mfrow=c(1,1))
plot(z)
#Resumen (estimadores)
#OpenBUGS
out.sum<-ej6bb.sim$summary
#JAGS
out.sum<-ej6a.sim$BUGSoutput$summary
#Tabla resumen
out.sum.t<-out.sum[grep("beta",rownames(out.sum)),c(1,3,7)]
out.sum.t<-cbind(out.sum.t,apply(out$beta,2,prob))
dimnames(out.sum.t)[[2]][4]<-"prob"
print(out.sum.t)
#DIC
out.dic<-ej6bb.sim$DIC
out.dic<-ej6a.sim$BUGSoutput$DIC
print(out.dic)
#Predictions
out.yf<-out.sum[grep("yf1",rownames(out.sum)),]
ymin<-min(desastres[,2],out.yf[,c(1,3,7)])
ymax<-max(desastres[,2],out.yf[,c(1,3,7)])
par(mfrow=c(1,1))
plot(desastres,type="l",col="grey80",ylim=c(ymin,ymax))
lines(desastres[,1],out.yf[,1],lwd=2,col=2)
lines(desastres[,1],out.yf[,3],lty=2,col=2)
lines(desastres[,1],out.yf[,7],lty=2,col=2)
lines(desastres[,1],out.yf[,5],lwd=2,col=4)
#Medias
out.mu<-out.sum[grep("mu",rownames(out.sum)),]
par(mfrow=c(1,1))
plot(desastres,type="l",col="grey80")
lines(desastres[,1],out.mu[,1],lwd=2,col=2)
lines(desastres[,1],out.mu[,3],lty=2,col=2)
lines(desastres[,1],out.mu[,7],lty=2,col=2)
#--- Ejemplo 7 ---
#-Reading data-
milk<-read.table("http://allman.rhon.itam.mx/~lnieto/index_archivos/milk.txt",header=TRUE)
milk$t<-1970:1982
n<-nrow(milk)
or<-order(milk$x)
plot(milk$x[or],milk$y[or],type="l")
text(milk$x[or],milk$y[or],labels=milk$t[or],cex=0.5,col=2)
plot(milk$t,milk$y,type="l")
plot(milk$t,milk$x,type="l")
#-Defining data-
m<-2
data<-list("n"=n,"m"=m,"y"=milk$y,"x"=milk$x,"t"=milk$t)
data<-list("n"=n,"m"=m,"y"=milk$y,"x"=milk$x/max(milk$x),"t"=milk$t/max(milk$t))
data<-list("n"=n,"m"=m,"y"=scale(milk$y)[1:n],"x"=scale(milk$x)[1:n],"t"=scale(milk$t)[1:n])
data<-list("n"=n,"m"=m,"y"=c(scale(milk$y)[1:(n-2)],NA,NA),"x"=scale(milk$x)[1:n],"t"=scale(milk$t)[1:n])
#-Defining inits-
inits<-function(){list(alpha=0,beta=0,tau=1,yf1=rep(1,n))}
inits<-function(){list(beta=rep(0,5),tau=1,yf1=rep(1,n))}
inits<-function(){list(beta=rep(0,n+m),tau.y=1,tau.b=1,yf1=rep(0,n+m))}
inits<-function(){list(beta=rep(0,n+m),tau.y=1,yf1=rep(0,n+m))}
inits<-function(){list(alpha=rep(0,n+m),beta=rep(0,n+m),tau.y=1,tau.b=1,tau.a=1,yf1=rep(0,n+m))}
inits<-function(){list(beta=rep(0,n+m),tau.y=1,tau.b=1,yf1=rep(0,n+m),g=0)}
inits<-function(){list(beta=rep(0,n),tau.y=1,tau.b=1,yf1=rep(0,n),g=1)}
inits<-function(){list(beta=rep(0,n),tau.y=1,tau.b=1,yf1=rep(0,n))}
inits<-function(){list(beta=rep(0,n),yf1=rep(0,n))}
#-Selecting parameters to monitor-
parameters<-c("beta","tau","yf1")
parameters<-c("beta","tau.y","tau.b","yf1","g")
parameters<-c("beta","tau.y","tau.b","yf1")
parameters<-c("alpha","beta","tau.y","tau.b","tau.a","yf1")
#parameters<-c("beta","tau.y","tau.b","yf1")
parameters<-c("beta","yf1")
#-Running code-
#OpenBUGS
ej7o.sim<-bugs(data,inits,parameters,model.file="Ej7o.txt",
n.iter=50000,n.chains=1,n.burnin=5000)
ej7a.sim<-bugs(data,inits,parameters,model.file="Ej7a.txt",
n.iter=50000,n.chains=1,n.burnin=5000)
ej7b.sim<-bugs(data,inits,parameters,model.file="Ej7b.txt",
n.iter=50000,n.chains=1,n.burnin=5000)
ej7c.sim<-bugs(data,inits,parameters,model.file="Ej7c.txt",
n.iter=50000,n.chains=1,n.burnin=5000,debug=TRUE)
ej7d.sim<-bugs(data,inits,parameters,model.file="Ej7d.txt",
n.iter=50000,n.chains=1,n.burnin=5000)
#JAGS
ej7a.sim<-jags(data,inits,parameters,model.file="Ej7a.txt",
n.iter=50000,n.chains=1,n.burnin=5000)
ej7b.sim<-jags(data,inits,parameters,model.file="Ej7b.txt",
n.iter=50000,n.chains=1,n.burnin=5000)
ej7c.sim<-jags(data,inits,parameters,model.file="Ej7c.txt",
n.iter=50000,n.chains=1,n.burnin=5000)
#-Monitoring chain-
#Traza de la cadena
traceplot(ej7.sim)
#Cadena
#OpenBUGS
out<-ej7d.sim$sims.list
#JAGS
out<-ej7a.sim$BUGSoutput$sims.list
z<-out$beta
par(mfrow=c(2,2))
plot(z,type="l")
plot(cumsum(z)/(1:length(z)),type="l")
hist(z,freq=FALSE)
acf(z)
#Resumen (estimadores)
#OpenBUGS
out.sum<-ej7d.sim$summary
#JAGS
out.sum<-ej7a.sim$BUGSoutput$summary
#Tabla resumen
out.sum.t<-out.sum[grep("beta",rownames(out.sum)),c(1,3,7)]
out.sum.t<-cbind(out.sum.t,apply(out$beta,2,prob))
dimnames(out.sum.t)[[2]][4]<-"prob"
print(out.sum.t)
#DIC
out.dic<-ej7d.sim$DIC
out.dic<-ej7a.sim$BUGSoutput$DIC
print(out.dic)
#Predictions
out.yf<-out.sum[grep("yf1",rownames(out.sum)),]
ymin<-min(data$y,out.yf[,c(1,3,7)])
ymax<-max(data$y,out.yf[,c(1,3,7)])
xmin<-min(data$t)
xmax<-max(data$t+m)
#x vs. y
par(mfrow=c(1,1))
plot(data$x,data$y,type="p",col="grey50",ylim=c(ymin,ymax))
points(data$x,out.yf[,1],col=2,pch=16,cex=0.5)
segments(data$x,out.yf[,3],data$x,out.yf[,7],col=2)
#t vs y
par(mfrow=c(1,1))
plot(data$t,data$y,type="b",col="grey80",ylim=c(ymin,ymax),xlim=c(xmin,xmax))
lines(data$t,out.yf[1:n,1],col=2)
lines(data$t,out.yf[1:n,3],col=2,lty=2)
lines(data$t,out.yf[1:n,7],col=2,lty=2)
lines(data$t[n]:(data$t[n]+m),out.yf[n:(n+m),1],col=4)
lines(data$t[n]:(data$t[n]+m),out.yf[n:(n+m),3],col=4,lty=2)
lines(data$t[n]:(data$t[n]+m),out.yf[n:(n+m),7],col=4,lty=2)
#betas
out.beta<-out.sum[grep("beta",rownames(out.sum)),]
ymin<-min(out.beta[,c(1,3,7)])
ymax<-max(out.beta[,c(1,3,7)])
plot(out.beta[,1],type="l",ylim=c(ymin,ymax))
lines(out.beta[,3],lty=2)
lines(out.beta[,7],lty=2)
#--- Ejemplo 8 ---
#-Reading data-
mercado<-read.table("http://allman.rhon.itam.mx/~lnieto/index_archivos/mercado.txt",header=TRUE)
mercado.ts<-ts(mercado,start=c(1990,1),end=c(1991,52),frequency=52)
n<-nrow(mercado)
mercado$Tiempo<-1:n
plot(mercado.ts)
pairs(mercado)
cor(mercado)
#-Defining data-
data<-list("n"=n,"y"=mercado$SHARE,"x1"=mercado$PRICE,"x2"=mercado$OPROM,"x3"=mercado$CPROM)
data<-list("n"=n,"y"=scale(mercado$SHARE)[1:n],"x1"=scale(mercado$PRICE)[1:n],"x2"=scale(mercado$OPROM)[1:n],"x3"=scale(mercado$CPROM)[1:n])
data<-list("n"=n,"y"=c(mercado$SHARE[1:(n-4)],NA,NA,NA,NA),"x1"=mercado$PRICE,"x2"=mercado$OPROM,"x3"=mercado$CPROM)
#-Defining inits-
inits<-function(){list(alpha=0,beta=rep(0,3),tau=1,yf1=rep(1,n))}
inits<-function(){list(alpha=rep(0,n),beta=matrix(0,nrow=3,ncol=n),tau=1,tau.a=1,tau.b=rep(1,3),yf1=rep(1,n))}
inits<-function(){list(alpha=0,beta=matrix(0,nrow=3,ncol=n),tau=1,yf1=rep(1,n))}
#-Selecting parameters to monitor-
parameters<-c("alpha","beta","tau","yf1")
#-Running code-
#OpenBUGS
ej8a.sim<-bugs(data,inits,parameters,model.file="Ej8a.txt",
n.iter=10000,n.chains=1,n.burnin=1000)
ej8b.sim<-bugs(data,inits,parameters,model.file="Ej8b.txt",
n.iter=10000,n.chains=1,n.burnin=1000)
ej8c.sim<-bugs(data,inits,parameters,model.file="Ej8c.txt",
n.iter=10000,n.chains=1,n.burnin=1000)
#JAGS
ej8a.sim<-jags(data,inits,parameters,model.file="Ej8a.txt",
n.iter=5000,n.chains=1,n.burnin=500)
ej8b.sim<-jags(data,inits,parameters,model.file="Ej8b.txt",
n.iter=10000,n.chains=1,n.burnin=1000,n.thin=10)
#-Monitoring chain-
#Cadena
#OpenBUGS
out<-ej8c.sim$sims.list
#JAGS
out<-ej8a.sim$BUGSoutput$sims.list
z<-out$alpha
par(mfrow=c(2,2))
plot(z,type="l")
plot(cumsum(z)/(1:length(z)),type="l")
hist(z,freq=FALSE)
acf(z)
#Resumen (estimadores)
#OpenBUGS
out.sum<-ej8c.sim$summary
#JAGS
out.sum<-ej8b.sim$BUGSoutput$summary
#Tabla resumen
out.sum.t<-out.sum[grep("beta",rownames(out.sum)),c(1,3,7)]
out.sum.t<-cbind(out.sum.t,apply(out$beta,2,prob))
dimnames(out.sum.t)[[2]][4]<-"prob"
print(out.sum.t)
#DIC
out.dic<-ej8c.sim$DIC
#out.dic<-ej8b.sim$BUGSoutput$DIC
print(out.dic)
#Predictions
out.yf<-out.sum[grep("yf1",rownames(out.sum)),]
y<-data$y
ymin<-min(y,out.yf[,c(1,3,7)])
ymax<-max(y,out.yf[,c(1,3,7)])
#x1 vs. y
x<-data$x1
par(mfrow=c(1,1))
plot(x,y,type="p",col="grey50",ylim=c(ymin,ymax))
points(x,out.yf[,1],col=2,pch=16,cex=0.5)
segments(x,out.yf[,3],x,out.yf[,7],col=2)
#x2 vs. y
x<-data$x2
par(mfrow=c(1,1))
plot(x,y,type="p",col="grey50",ylim=c(ymin,ymax))
points(x,out.yf[,1],col=2,pch=16,cex=0.5)
segments(x,out.yf[,3],x,out.yf[,7],col=2)
#x3 vs. y
x<-data$x3
par(mfrow=c(1,1))
plot(x,y,type="p",col="grey50",ylim=c(ymin,ymax))
points(x,out.yf[,1],col=2,pch=16,cex=0.5)
segments(x,out.yf[,3],x,out.yf[,7],col=2)
#t vs. y
x<-mercado$Tiempo
par(mfrow=c(1,1))
plot(x,y,type="p",col="grey50",ylim=c(ymin,ymax))
points(x,out.yf[,1],col=2,pch=16,cex=0.5)
segments(x,out.yf[,3],x,out.yf[,7],col=2)
par(mfrow=c(1,1))
plot(x,y,type="l",col="grey50",ylim=c(ymin,ymax))
lines(x,out.yf[,1],col=2,cex=0.5)
lines(x,out.yf[,3],col=2,lty=2)
lines(x,out.yf[,7],col=2,lty=2)
#betas
out.beta<-out.sum[grep("beta",rownames(out.sum)),]
plot(out.beta[1:104,1],type="l")
plot(out.beta[105:208,1],type="l")
plot(out.beta[209:312,1],type="l")
#alpha
out.alpha<-out.sum[grep("alpha",rownames(out.sum)),]
plot(out.alpha[,1],type="l")
#--- Ejemplo 9 ---
#-Reading data-
leucemia<-read.table("http://allman.rhon.itam.mx/~lnieto/index_archivos/leucemia.txt",header=TRUE)
n<-nrow(leucemia)
par(mfrow=c(2,2))
plot(leucemia$Obs)
plot(leucemia$Obs/leucemia$Pops*10000)
plot(leucemia$Obs/leucemia$Esp)
abline(h=1,col=2)
#-Defining data-
data<-list("n"=n,"y"=leucemia$Obs,"ne"=leucemia$Pops/10000)
data<-list("n"=n,"y"=leucemia$Obs,"ne"=leucemia$Pops/10000,"C"=leucemia$Cancer,"P"=leucemia$Place,"A"=leucemia$Age)
#-Defining inits-
inits<-function(){list(theta=1,yf1=rep(1,n))}
inits<-function(){list(theta=rep(1,n),yf1=rep(1,n))}
inits<-function(){list(theta=rep(1,n),a=1,b=1,yf1=rep(1,n))}
inits<-function(){list(alpha=0,beta=rep(0,2),gama=rep(0,2),delta=rep(0,2),yf1=rep(1,n))}
#-Selecting parameters to monitor-
parameters<-c("theta","yf1")
parameters<-c("theta","eta","yf1")
parameters<-c("alpha.adj","beta.adj","gama.adj","delta.adj","yf1")
#-Running code-
#OpenBUGS
ej9a.sim<-bugs(data,inits,parameters,model.file="Ej9a.txt",
n.iter=50000,n.chains=1,n.burnin=5000)
ej9b.sim<-bugs(data,inits,parameters,model.file="Ej9b.txt",
n.iter=50000,n.chains=1,n.burnin=5000)
ej9c.sim<-bugs(data,inits,parameters,model.file="Ej9c.txt",
n.iter=100000,n.chains=1,n.burnin=10000)
ej9d.sim<-bugs(data,inits,parameters,model.file="Ej9d.txt",
n.iter=50000,n.chains=1,n.burnin=5000)
#JAGS
ej9a.sim<-jags(data,inits,parameters,model.file="Ej9a.txt",
n.iter=50000,n.chains=1,n.burnin=5000)
ej9b.sim<-jags(data,inits,parameters,model.file="Ej9b.txt",
n.iter=50000,n.chains=1,n.burnin=5000)
ej9c.sim<-jags(data,inits,parameters,model.file="Ej9c.txt",
n.iter=100000,n.chains=1,n.burnin=10000)
ej9d.sim<-jags(data,inits,parameters,model.file="Ej9d.txt",
n.iter=50000,n.chains=1,n.burnin=5000)
#-Monitoring chain-
#Traza de la cadena
traceplot(ej9.sim)
#Cadena
#OpenBUGS
outa<-ej9a.sim$sims.list
outb<-ej9b.sim$sims.list
outc<-ej9c.sim$sims.list
outd<-ej9d.sim$sims.list
#JAGS
outa<-ej9a.sim$BUGSoutput$sims.list
outb<-ej9b.sim$BUGSoutput$sims.list
outc<-ej9c.sim$BUGSoutput$sims.list
outc<-ej9d.sim$BUGSoutput$sims.list
z<-outa$theta
par(mfrow=c(2,2))
plot(z,type="l")
plot(cumsum(z)/(1:length(z)),type="l")
hist(z,freq=FALSE)
acf(z)
#Resumen (estimadores)
#OpenBUGS
outa.sum<-ej9a.sim$summary
outb.sum<-ej9b.sim$summary
outc.sum<-ej9c.sim$summary
outd.sum<-ej9d.sim$summary
#JAGS
outa.sum<-ej9a.sim$BUGSoutput$summary
outb.sum<-ej9b.sim$BUGSoutput$summary
outc.sum<-ej9c.sim$BUGSoutput$summary
outd.sum<-ej9d.sim$BUGSoutput$summary
#Tabla resumen
out<-outb
out.sum<-outb.sum
out.sum.t<-out.sum[grep("theta",rownames(out.sum)),c(1,3,7)]
out.sum.t<-cbind(out.sum.t,apply(out$theta,2,prob))
dimnames(out.sum.t)[[2]][4]<-"prob"
print(out.sum.t)
#DIC
#OpenBUGS
outa.dic<-ej9a.sim$DIC
outb.dic<-ej9b.sim$DIC
outc.dic<-ej9c.sim$DIC
outd.dic<-ej9d.sim$DIC
#JAGS
outa.dic<-ej9a.sim$BUGSoutput$DIC
outb.dic<-ej9b.sim$BUGSoutput$DIC
outc.dic<-ej9c.sim$BUGSoutput$DIC
outd.dic<-ej9d.sim$BUGSoutput$DIC
print(outa.dic)
print(outb.dic)
print(outc.dic)
print(outd.dic)
#Estimaciones
outa.p<-outa.sum[grep("theta",rownames(outa.sum)),]
outb.p<-outb.sum[grep("theta",rownames(outb.sum)),]
outc.p<-outc.sum[grep("theta",rownames(outc.sum)),]
outc.eta<-outc.sum[grep("eta",rownames(outc.sum)),]
#x vs. y
xmin<-0
xmax<-10
ymin<-0
ymax<-5
par(mfrow=c(1,1))
plot(leucemia$Obs/leucemia$Pops*10000,type="p",col="grey50",xlim=c(xmin,xmax),ylim=c(ymin,ymax))
#
out.p<-outb.p
points(out.p[,1],col=2,pch=16,cex=0.5)
segments(1:8,out.p[,3],1:8,out.p[,7],col=2)
#
out.p<-outc.p
points((1:8)+0.2,out.p[,1],col=4,pch=16,cex=0.5)
segments((1:8)+0.2,out.p[,3],(1:8)+0.2,out.p[,7],col=4)
#
points(xmax-0.2,sum(leucemia$Obs)/sum(leucemia$Pops)*10000)
#
out.p<-outa.p
points(xmax-0.2,out.p[1],col=3,pch=16,cex=0.5)
segments(xmax-0.2,out.p[3],xmax-0.2,out.p[7],col=3)
#
out.p<-outc.eta
points(xmax,out.p[1],col=4,pch=16,cex=0.5)
segments(xmax,out.p[3],xmax,out.p[7],col=4)
#--- Ejemplo 10 ---
#-Reading data-
reclama<-read.table("http://allman.rhon.itam.mx/~lnieto/index_archivos/reclama.txt",header=TRUE)
n<-nrow(reclama)
par(mfrow=c(2,2))
plot(reclama$r)
plot(reclama$n,ylim=c(0,max(reclama$n)))
plot(reclama$r/reclama$n)
#-Defining data-
data<-list("n"=n,"y"=reclama$r,"ne"=reclama$n)
#-Defining inits-
inits<-function(){list(p=0.5,yf1=rep(1,n))}
inits<-function(){list(p=rep(0.5,n),yf1=rep(1,n))}
inits<-function(){list(p=rep(0.5,n),a=1,b=1,yf1=rep(1,n))}
#-Selecting parameters to monitor-
parameters<-c("p","yf1")
parameters<-c("p","eta","yf1")
#-Running code-
#OpenBUGS
ej10a.sim<-bugs(data,inits,parameters,model.file="Ej10a.txt",
n.iter=50000,n.chains=1,n.burnin=5000)
ej10b.sim<-bugs(data,inits,parameters,model.file="Ej10b.txt",
n.iter=50000,n.chains=1,n.burnin=5000)
ej10c.sim<-bugs(data,inits,parameters,model.file="Ej10c.txt",
n.iter=100000,n.chains=1,n.burnin=10000)
#JAGS
ej10a.sim<-jags(data,inits,parameters,model.file="Ej10a.txt",
n.iter=50000,n.chains=1,n.burnin=5000)
ej10b.sim<-jags(data,inits,parameters,model.file="Ej10b.txt",
n.iter=50000,n.chains=1,n.burnin=5000)
ej10c.sim<-jags(data,inits,parameters,model.file="Ej10c.txt",
n.iter=100000,n.chains=1,n.burnin=10000)
#-Monitoring chain-
#Traza de la cadena
traceplot(ej10.sim)
#Cadena
#OpenBUGS
outa<-ej10a.sim$sims.list
outb<-ej10b.sim$sims.list
outc<-ej10c.sim$sims.list
#JAGS
outa<-ej10a.sim$BUGSoutput$sims.list
outb<-ej10b.sim$BUGSoutput$sims.list
outc<-ej10c.sim$BUGSoutput$sims.list
z<-outb$p[,2]
par(mfrow=c(2,2))
plot(z,type="l")
plot(cumsum(z)/(1:length(z)),type="l")
hist(z,freq=FALSE)
acf(z)
#Resumen (estimadores)
#OpenBUGS
outa.sum<-ej10a.sim$summary
outb.sum<-ej10b.sim$summary
outc.sum<-ej10c.sim$summary
#JAGS
outa.sum<-ej10a.sim$BUGSoutput$summary
outb.sum<-ej10b.sim$BUGSoutput$summary
outc.sum<-ej10c.sim$BUGSoutput$summary
#Tabla resumen
out.sum.t<-out.sum[grep("p",rownames(out.sum)),c(1,3,7)]
out.sum.t<-cbind(out.sum.t,apply(out$p,2,prob))
dimnames(out.sum.t)[[2]][4]<-"prob"
print(out.sum.t)
#DIC
#OpenBUGS
outa.dic<-ej10a.sim$DIC
outb.dic<-ej10b.sim$DIC
outc.dic<-ej10c.sim$DIC
#JAGS
outa.dic<-ej10a.sim$BUGSoutput$DIC
outb.dic<-ej10b.sim$BUGSoutput$DIC
outc.dic<-ej10c.sim$BUGSoutput$DIC
print(outa.dic)
print(outb.dic)
print(outc.dic)
#Estimaciones
outa.p<-outa.sum[grep("p",rownames(outa.sum)),]
outb.p<-outb.sum[grep("p",rownames(outb.sum)),]
outc.p<-outc.sum[grep("p",rownames(outc.sum)),]
outc.eta<-outc.sum[grep("eta",rownames(outc.sum)),]
#x vs. y
xmin<-0
xmax<-12
ymin<-0
ymax<-1
par(mfrow=c(1,1))
plot(reclama$r/reclama$n,type="p",col="grey50",xlim=c(xmin,xmax),ylim=c(ymin,ymax))
#
out.p<-outb.p
points(out.p[,1],col=2,pch=16,cex=0.5)
segments(1:10,out.p[,3],1:10,out.p[,7],col=2)
#
out.p<-outc.p
points((1:10)+0.2,out.p[,1],col=4,pch=16,cex=0.5)
segments((1:10)+0.2,out.p[,3],(1:10)+0.2,out.p[,7],col=4)
#
points(xmax-0.2,sum(reclama$r)/sum(reclama$n))
#
out.p<-outa.p
points(xmax-0.2,out.p[1],col=3,pch=16,cex=0.5)
segments(xmax-0.2,out.p[3],xmax-0.2,out.p[7],col=3)
#
out.p<-outc.eta
points(xmax,out.p[1],col=4,pch=16,cex=0.5)
segments(xmax,out.p[3],xmax,out.p[7],col=4)
|
Length<-c(20,21,22,23,21,20)
Speed<-c(12,14,12,16,20,21)
Algae<-c(40,45,45,80,75,65)
NO3<-c(2.25,2.15,1.75,1.95,1.95,2.75)
BOD<-c(200,180,135,120,110,120)
mf<-data.frame(Length,Speed,Algae,NO3,BOD)
mf
plot(Length~BOD,data=mf, main='plot1')
plot.new()
plot.new()
plot(Length ~ NO3,data=mf,main='plot2')
|
/multivariate.R
|
no_license
|
GauthamSampath/dsr
|
R
| false
| false
| 316
|
r
|
Length<-c(20,21,22,23,21,20)
Speed<-c(12,14,12,16,20,21)
Algae<-c(40,45,45,80,75,65)
NO3<-c(2.25,2.15,1.75,1.95,1.95,2.75)
BOD<-c(200,180,135,120,110,120)
mf<-data.frame(Length,Speed,Algae,NO3,BOD)
mf
plot(Length~BOD,data=mf, main='plot1')
plot.new()
plot.new()
plot(Length ~ NO3,data=mf,main='plot2')
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/affine.R
\name{readAffine}
\alias{readAffine}
\title{Read an affine matrix from a file}
\usage{
readAffine(fileName, source = NULL, target = NULL, type = NULL)
}
\arguments{
\item{fileName}{A string giving the file name to read the affine matrix
from.}
\item{source}{The source image for the transformation. If \code{NULL}, the
file will be searched for a comment specifying the path to a NIfTI file.}
\item{target}{The target image for the transformation. If \code{NULL}, the
file will be searched for a comment specifying the path to a NIfTI file.}
\item{type}{The type of the affine matrix, which describes what convention
is it is stored with. Currently valid values are \code{"niftyreg"} and
\code{"fsl"} (for FSL FLIRT). If \code{NULL}, the function will look in
the file for a comment specifying the type.}
}
\value{
An matrix with class \code{"affine"}, converted to the NiftyReg
convention and with \code{source} and \code{target} attributes set
appropriately.
}
\description{
This function is used to read a 4x4 numeric matrix representing an affine
transformation from a file. It is a wrapper around \code{read.table} which
additionally ensures that required attributes are set. The type of the
matrix must be specified, as there are differing conventions across
software packages.
}
\examples{
print(readAffine(system.file("extdata","affine.txt",package="RNiftyReg")))
}
\seealso{
\code{\link{read.table}}, \code{\link{writeAffine}}
}
\author{
Jon Clayden <code@clayden.org>
}
|
/man/readAffine.Rd
|
no_license
|
jonclayden/RNiftyReg
|
R
| false
| true
| 1,575
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/affine.R
\name{readAffine}
\alias{readAffine}
\title{Read an affine matrix from a file}
\usage{
readAffine(fileName, source = NULL, target = NULL, type = NULL)
}
\arguments{
\item{fileName}{A string giving the file name to read the affine matrix
from.}
\item{source}{The source image for the transformation. If \code{NULL}, the
file will be searched for a comment specifying the path to a NIfTI file.}
\item{target}{The target image for the transformation. If \code{NULL}, the
file will be searched for a comment specifying the path to a NIfTI file.}
\item{type}{The type of the affine matrix, which describes what convention
is it is stored with. Currently valid values are \code{"niftyreg"} and
\code{"fsl"} (for FSL FLIRT). If \code{NULL}, the function will look in
the file for a comment specifying the type.}
}
\value{
An matrix with class \code{"affine"}, converted to the NiftyReg
convention and with \code{source} and \code{target} attributes set
appropriately.
}
\description{
This function is used to read a 4x4 numeric matrix representing an affine
transformation from a file. It is a wrapper around \code{read.table} which
additionally ensures that required attributes are set. The type of the
matrix must be specified, as there are differing conventions across
software packages.
}
\examples{
print(readAffine(system.file("extdata","affine.txt",package="RNiftyReg")))
}
\seealso{
\code{\link{read.table}}, \code{\link{writeAffine}}
}
\author{
Jon Clayden <code@clayden.org>
}
|
# R code for the render function
rmarkdown::render("steakArticleParams.Rmd",
params = list(region = "Mountain"))
# R code to create the custom function
render_report <-function(regionvar){
template <-"steakArticleParams.Rmd"
outfile <-sprintf("steakArticle_%s.html",regionvar)
parameters <-list(region = regionvar)
rmarkdown::render(template,
output_file=outfile,
params=parameters)
invisible(TRUE)
}
render_report("Pacific")
# R code to use the custom function with purrr
library(purrr)
params_list <- list(list("East North Central",
"East South Central", "Middle Atlantic",
"Mountain", "New England", "Pacific",
"South Atlantic", "West North Central",
"West South Central"))
purrr::pmap(params_list,render_report)
|
/Automated report.R
|
no_license
|
Raylc/ReproducibleTemplates
|
R
| false
| false
| 893
|
r
|
# R code for the render function
rmarkdown::render("steakArticleParams.Rmd",
params = list(region = "Mountain"))
# R code to create the custom function
render_report <-function(regionvar){
template <-"steakArticleParams.Rmd"
outfile <-sprintf("steakArticle_%s.html",regionvar)
parameters <-list(region = regionvar)
rmarkdown::render(template,
output_file=outfile,
params=parameters)
invisible(TRUE)
}
render_report("Pacific")
# R code to use the custom function with purrr
library(purrr)
params_list <- list(list("East North Central",
"East South Central", "Middle Atlantic",
"Mountain", "New England", "Pacific",
"South Atlantic", "West North Central",
"West South Central"))
purrr::pmap(params_list,render_report)
|
library(shiny)
ui <- basicPage(
plotOutput("plot1", click = "plot_click", width = 400),
verbatimTextOutput("info")
)
server <- function(input, output) {
output$plot1 <- renderPlot({
plot(mtcars$wt, mtcars$mpg)
})
output$info <- renderText({
paste0("x=", input$plot_click$x, "\n",
"y=", input$plot_click$y)
})
}
shinyApp(ui, server)
|
/shiny/small_examples/02-click.R
|
no_license
|
SuperUsersDK/SU0237
|
R
| false
| false
| 367
|
r
|
library(shiny)
ui <- basicPage(
plotOutput("plot1", click = "plot_click", width = 400),
verbatimTextOutput("info")
)
server <- function(input, output) {
output$plot1 <- renderPlot({
plot(mtcars$wt, mtcars$mpg)
})
output$info <- renderText({
paste0("x=", input$plot_click$x, "\n",
"y=", input$plot_click$y)
})
}
shinyApp(ui, server)
|
#' Title: GME Case
#' Author: Maria Fernanda Pernillo
#' email: mpernillodominguez2019@student.hult.edu
#' Date: March 10 2021
# WD
setwd("~/Personal/hult_NLP_student/cases/session II/WallStreetBets")
# Libs
library(tidytext)
library(dplyr)
library(stringr)
library(tibble)
library(ggplot2)
library(tm)
library(lexicon)
library(echarts4r)
library(tidyr)
library(corpus)
library(ggthemes)
#Get sentiments from 3 separate libraries
get_sentiments("afinn")
get_sentiments("bing")
get_sentiments("nrc")
#Call Supporting Functions
options(stringsAsFactors = FALSE)
Sys.setlocale('LC_ALL','C')
tryTolower <- function(x){
y = NA
try_error = tryCatch(tolower(x), error = function(e) e)
if (!inherits(try_error, 'error'))
y = tolower(x)
return(y)
}
cleanCorpus<-function(corpus, customStopwords){
corpus <- tm_map(corpus, content_transformer(qdapRegex::rm_url))
corpus <- tm_map(corpus, content_transformer(tryTolower))
corpus <- tm_map(corpus, removeWords, customStopwords)
corpus <- tm_map(corpus, removePunctuation)
corpus <- tm_map(corpus, removeNumbers)
corpus <- tm_map(corpus, stripWhitespace)
return(corpus)
}
# Build the stop words function
stops <- c(stopwords('SMART'), 'just','like', 'shit', 'fucking', 'fuck', 'make', 'big','put','gamestop', 'gme',
'thing','made','thing', 'wsb')
# Data
#txt <- read.csv(file.choose('CASE_gme.csv'))
#txt$allText <- paste(txt$comment, txt$comment)
# Read in Data, clean & organize
text <- read.csv('CASE_gme.csv')
txtCorpus <- VCorpus(VectorSource(text$comment))
txtCorpus <- cleanCorpus(txtCorpus, stops)
gmeTDM <- TermDocumentMatrix(txtCorpus)
gmeTDMm <- as.matrix(gmeTDM)
# Frequency Data Frame
gmeSums <- rowSums(gmeTDMm)
gmeFreq <- data.frame(word=names(gmeSums),frequency=gmeSums)
# Review a section
gmeFreq[50:55,]
# Remove the row attributes meta family
rownames(gmeFreq) <- NULL
gmeFreq[50:55,]
# Simple barplot; values greater than 15
topWords <- subset(gmeFreq, gmeFreq$frequency >= 15)
topWords <- topWords[order(topWords$frequency, decreasing=F),]
# Chg to factor for ggplot
topWords$word <- factor(topWords$word,
levels=unique(as.character(topWords$word)))
ggplot(topWords, aes(x=word, y=frequency)) +
geom_bar(stat="identity", fill='darkred') +
coord_flip()+ theme_gdocs() +
geom_text(aes(label=frequency), colour="white",hjust=1.25, size=3.0)
##Inspect word associations with term CITRON
associations <- findAssocs(gmeTDM, 'citron', 0.30) #from line 50
associations
# Organize the word associations
assocDF <- data.frame(terms=names(associations[[1]]),
value=unlist(associations))
assocDF$terms <- factor(assocDF$terms, levels=assocDF$terms)
rownames(assocDF) <- NULL
assocDF
# Make a dot plot
ggplot(assocDF, aes(y=terms)) +
geom_point(aes(x=value), data=assocDF, col='#c00c00') +
theme_gdocs() +
geom_text(aes(x=value,label=value), colour="red",hjust="inward", vjust ="inward" , size=3)
## Inspect word associations with term NOK
associations <- findAssocs(gmeTDM, 'nok', 0.30) #from line 50
associations
# Organize the word associations
assocDF <- data.frame(terms=names(associations[[1]]),
value=unlist(associations))
assocDF$terms <- factor(assocDF$terms, levels=assocDF$terms)
rownames(assocDF) <- NULL
assocDF
# Make a dot plot
ggplot(assocDF, aes(y=terms)) +
geom_point(aes(x=value), data=assocDF, col='#c00c00') +
theme_gdocs() +
geom_text(aes(x=value,label=value), colour="red",hjust="inward", vjust ="inward" , size=3)
## Inspect word associations with term RETAIL
associations <- findAssocs(gmeTDM, 'retail', 0.40) #from line 50
associations
# Organize the word associations
assocDF <- data.frame(terms=names(associations[[1]]),
value=unlist(associations))
assocDF$terms <- factor(assocDF$terms, levels=assocDF$terms)
rownames(assocDF) <- NULL
assocDF
# Make a dot plot
ggplot(assocDF, aes(y=terms)) +
geom_point(aes(x=value), data=assocDF, col='#c00c00') +
theme_gdocs() +
geom_text(aes(x=value,label=value), colour="red",hjust="inward", vjust ="inward" , size=3)
## Inspect word associations with term SHORT
associations <- findAssocs(gmeTDM, 'short', 0.40) #from line 50
associations
# Organize the word associations
assocDF <- data.frame(terms=names(associations[[1]]),
value=unlist(associations))
assocDF$terms <- factor(assocDF$terms, levels=assocDF$terms)
rownames(assocDF) <- NULL
assocDF
# Make a dot plot
ggplot(assocDF, aes(y=terms)) +
geom_point(aes(x=value), data=assocDF, col='#c00c00') +
theme_gdocs() +
geom_text(aes(x=value,label=value), colour="red",hjust="inward", vjust ="inward" , size=3)
## Inspect word associations with term SHORT
associations <- findAssocs(gmeTDM, 'tesla', 0.40) #from line 50
associations
# Organize the word associations
assocDF <- data.frame(terms=names(associations[[1]]),
value=unlist(associations))
assocDF$terms <- factor(assocDF$terms, levels=assocDF$terms)
rownames(assocDF) <- NULL
assocDF
# Make a dot plot
ggplot(assocDF, aes(y=terms)) +
geom_point(aes(x=value), data=assocDF, col='#c00c00') +
theme_gdocs() +
geom_text(aes(x=value,label=value), colour="red",hjust="inward", vjust ="inward" , size=3)
#Analyze the mentions of gme during January
library(readr)
library(lubridate)
reddit <-read_csv(url('https://raw.githubusercontent.com/kwartler/hult_NLP_student/main/cases/session%20II/WallStreetBets/CASE_gme.csv'))
# Mentions by month
reddit$gmeMention <- grepl('gme', reddit$comment, ignore.case = T)
x <- aggregate(gmeMention~comm_date_yr+comm_date_month, reddit, sum)
x <- x[order(x$comm_date_yr, x$comm_date_month),]
plot(x$gmeMention, type = 'l')
# One Month
oneMonth <- subset(reddit, reddit$comm_date_yr==2021 &
reddit$comm_date_month==1)
oneMonth <- oneMonth[order(oneMonth$comm_date),]
y <- aggregate(gmeMention~+comm_date, oneMonth, sum)
plot(y$gmeMention, type = 'l')
plot(cumsum(y$gmeMention), type = 'l')
# Clean and Organize the old way instead of cleanMatrix for the sentiment analysis
txt <- read.csv('CASE_gme.csv')
table(txt$comment)
#Examine the emotional words used in these articles
txtDTM <- VCorpus(VectorSource(txt$comment))
txtDTM <- cleanCorpus(txtDTM, stops)
txtDTM <- DocumentTermMatrix(txtDTM)
# Examine
as.matrix(txtDTM[1:10,100:110])
dim(txtDTM)
#txtDTM <- as.DocumentTermMatrix(txtDTM, weighting = weightTf )
tidyCorp <- tidy(txtDTM)
tidyCorp[100:110,]
dim(tidyCorp)
# Get bing lexicon
# "afinn", "bing", "nrc", "loughran"
bing <- get_sentiments(lexicon = c("bing"))
head(bing)
# Perform Inner Join
bingSent <- inner_join(tidyCorp, bing, by=c('term' = 'word'))
bingSent
# Quick Analysis
table(bingSent$sentiment) #tally ignoring count
table(bingSent$sentiment, bingSent$count) #only a few with more than 1 term
aggregate(count~sentiment,bingSent, sum) #correct way to sum them
# Get afinn lexicon
afinn<-get_sentiments(lexicon = c("afinn"))
head(afinn)
# Perform Inner Join
afinnSent <- inner_join(tidyCorp,afinn, by=c('term' = 'word'))
afinnSent
# Examine the quantity
afinnSent$afinnAmt <- afinnSent$count * afinnSent$value
# Compare w/polarity and bing
mean(afinnSent$afinnAmt)
# FAKE EXAMPLE: if the documents were related and temporal, make sure they are sorted by time first!
# Example use case : i.e. over time how was the emotional content for a topic i.e. Pakistan articles
afinnTemporal <- aggregate(afinnAmt~document, afinnSent, sum)
afinnTemporal$document <- as.numeric(afinnTemporal$document)
afinnTemporal <- afinnTemporal[order(afinnTemporal$document),]
# Quick plot
plot(afinnTemporal$afinnAmt, type="l", main="Quick Timeline of Identified Words")
# Quick Check with the pptx for a reminder.
# Get nrc lexicon; deprecated in tidytext, use library(lexicon)
#nrc <- read.csv('nrcSentimentLexicon.csv')
nrc <- nrc_emotions
head(nrc)
# Tidy this up
nrc <- nrc %>% pivot_longer(-term, names_to = "emotion", values_to = "freq")
nrc <-subset(nrc, nrc$freq>0 )
head(nrc)
nrc$freq <- NULL #no longer needed
# Perform Inner Join
nrcSent <- inner_join(tidyCorp,nrc, by=c('term' = 'term'))
nrcSent
# Radar chart
table(nrcSent$emotion)
emos <- data.frame(table(nrcSent$emotion))
names(emos) <- c('emotion', 'termsCt')
emos %>%
e_charts(emotion) %>%
e_radar(termsCt, max = max(emos$termsCt), name = "Emotions") %>%
e_tooltip(trigger = "item") %>% e_theme("dark-mushroom")
# Other Emotion Lexicons Exist
emotionLex <- affect_wordnet
emotionLex
table(emotionLex$emotion)
table(emotionLex$category)
emotionLex <- subset(emotionLex,
emotionLex$emotion=='Positive'|emotionLex$emotion=='Negative')
# More emotional categories, fewer terms
lexSent <- inner_join(tidyCorp,emotionLex, by=c('term' = 'term'))
lexSent
emotionID <- aggregate(count ~ category, lexSent, sum)
emotionID %>%
e_charts(category) %>% e_theme("dark-mushroom") %>%
e_radar(count, max =max(emotionID$count), name = "Emotional Categories") %>%
e_tooltip() %>%
e_theme("dark-mushroom")
# End
|
/Pernillo_GME_case.R
|
no_license
|
Mafer104/WallStreet-bets-
|
R
| false
| false
| 9,435
|
r
|
#' Title: GME Case
#' Author: Maria Fernanda Pernillo
#' email: mpernillodominguez2019@student.hult.edu
#' Date: March 10 2021
# WD
setwd("~/Personal/hult_NLP_student/cases/session II/WallStreetBets")
# Libs
library(tidytext)
library(dplyr)
library(stringr)
library(tibble)
library(ggplot2)
library(tm)
library(lexicon)
library(echarts4r)
library(tidyr)
library(corpus)
library(ggthemes)
#Get sentiments from 3 separate libraries
get_sentiments("afinn")
get_sentiments("bing")
get_sentiments("nrc")
#Call Supporting Functions
options(stringsAsFactors = FALSE)
Sys.setlocale('LC_ALL','C')
tryTolower <- function(x){
y = NA
try_error = tryCatch(tolower(x), error = function(e) e)
if (!inherits(try_error, 'error'))
y = tolower(x)
return(y)
}
cleanCorpus<-function(corpus, customStopwords){
corpus <- tm_map(corpus, content_transformer(qdapRegex::rm_url))
corpus <- tm_map(corpus, content_transformer(tryTolower))
corpus <- tm_map(corpus, removeWords, customStopwords)
corpus <- tm_map(corpus, removePunctuation)
corpus <- tm_map(corpus, removeNumbers)
corpus <- tm_map(corpus, stripWhitespace)
return(corpus)
}
# Build the stop words function
stops <- c(stopwords('SMART'), 'just','like', 'shit', 'fucking', 'fuck', 'make', 'big','put','gamestop', 'gme',
'thing','made','thing', 'wsb')
# Data
#txt <- read.csv(file.choose('CASE_gme.csv'))
#txt$allText <- paste(txt$comment, txt$comment)
# Read in Data, clean & organize
text <- read.csv('CASE_gme.csv')
txtCorpus <- VCorpus(VectorSource(text$comment))
txtCorpus <- cleanCorpus(txtCorpus, stops)
gmeTDM <- TermDocumentMatrix(txtCorpus)
gmeTDMm <- as.matrix(gmeTDM)
# Frequency Data Frame
gmeSums <- rowSums(gmeTDMm)
gmeFreq <- data.frame(word=names(gmeSums),frequency=gmeSums)
# Review a section
gmeFreq[50:55,]
# Remove the row attributes meta family
rownames(gmeFreq) <- NULL
gmeFreq[50:55,]
# Simple barplot; values greater than 15
topWords <- subset(gmeFreq, gmeFreq$frequency >= 15)
topWords <- topWords[order(topWords$frequency, decreasing=F),]
# Chg to factor for ggplot
topWords$word <- factor(topWords$word,
levels=unique(as.character(topWords$word)))
ggplot(topWords, aes(x=word, y=frequency)) +
geom_bar(stat="identity", fill='darkred') +
coord_flip()+ theme_gdocs() +
geom_text(aes(label=frequency), colour="white",hjust=1.25, size=3.0)
##Inspect word associations with term CITRON
associations <- findAssocs(gmeTDM, 'citron', 0.30) #from line 50
associations
# Organize the word associations
assocDF <- data.frame(terms=names(associations[[1]]),
value=unlist(associations))
assocDF$terms <- factor(assocDF$terms, levels=assocDF$terms)
rownames(assocDF) <- NULL
assocDF
# Make a dot plot
ggplot(assocDF, aes(y=terms)) +
geom_point(aes(x=value), data=assocDF, col='#c00c00') +
theme_gdocs() +
geom_text(aes(x=value,label=value), colour="red",hjust="inward", vjust ="inward" , size=3)
## Inspect word associations with term NOK
associations <- findAssocs(gmeTDM, 'nok', 0.30) #from line 50
associations
# Organize the word associations
assocDF <- data.frame(terms=names(associations[[1]]),
value=unlist(associations))
assocDF$terms <- factor(assocDF$terms, levels=assocDF$terms)
rownames(assocDF) <- NULL
assocDF
# Make a dot plot
ggplot(assocDF, aes(y=terms)) +
geom_point(aes(x=value), data=assocDF, col='#c00c00') +
theme_gdocs() +
geom_text(aes(x=value,label=value), colour="red",hjust="inward", vjust ="inward" , size=3)
## Inspect word associations with term RETAIL
associations <- findAssocs(gmeTDM, 'retail', 0.40) #from line 50
associations
# Organize the word associations
assocDF <- data.frame(terms=names(associations[[1]]),
value=unlist(associations))
assocDF$terms <- factor(assocDF$terms, levels=assocDF$terms)
rownames(assocDF) <- NULL
assocDF
# Make a dot plot
ggplot(assocDF, aes(y=terms)) +
geom_point(aes(x=value), data=assocDF, col='#c00c00') +
theme_gdocs() +
geom_text(aes(x=value,label=value), colour="red",hjust="inward", vjust ="inward" , size=3)
## Inspect word associations with term SHORT
associations <- findAssocs(gmeTDM, 'short', 0.40) #from line 50
associations
# Organize the word associations
assocDF <- data.frame(terms=names(associations[[1]]),
value=unlist(associations))
assocDF$terms <- factor(assocDF$terms, levels=assocDF$terms)
rownames(assocDF) <- NULL
assocDF
# Make a dot plot
ggplot(assocDF, aes(y=terms)) +
geom_point(aes(x=value), data=assocDF, col='#c00c00') +
theme_gdocs() +
geom_text(aes(x=value,label=value), colour="red",hjust="inward", vjust ="inward" , size=3)
## Inspect word associations with term SHORT
associations <- findAssocs(gmeTDM, 'tesla', 0.40) #from line 50
associations
# Organize the word associations
assocDF <- data.frame(terms=names(associations[[1]]),
value=unlist(associations))
assocDF$terms <- factor(assocDF$terms, levels=assocDF$terms)
rownames(assocDF) <- NULL
assocDF
# Make a dot plot
ggplot(assocDF, aes(y=terms)) +
geom_point(aes(x=value), data=assocDF, col='#c00c00') +
theme_gdocs() +
geom_text(aes(x=value,label=value), colour="red",hjust="inward", vjust ="inward" , size=3)
#Analyze the mentions of gme during January
library(readr)
library(lubridate)
reddit <-read_csv(url('https://raw.githubusercontent.com/kwartler/hult_NLP_student/main/cases/session%20II/WallStreetBets/CASE_gme.csv'))
# Mentions by month
reddit$gmeMention <- grepl('gme', reddit$comment, ignore.case = T)
x <- aggregate(gmeMention~comm_date_yr+comm_date_month, reddit, sum)
x <- x[order(x$comm_date_yr, x$comm_date_month),]
plot(x$gmeMention, type = 'l')
# One Month
oneMonth <- subset(reddit, reddit$comm_date_yr==2021 &
reddit$comm_date_month==1)
oneMonth <- oneMonth[order(oneMonth$comm_date),]
y <- aggregate(gmeMention~+comm_date, oneMonth, sum)
plot(y$gmeMention, type = 'l')
plot(cumsum(y$gmeMention), type = 'l')
# Clean and Organize the old way instead of cleanMatrix for the sentiment analysis
txt <- read.csv('CASE_gme.csv')
table(txt$comment)
#Examine the emotional words used in these articles
txtDTM <- VCorpus(VectorSource(txt$comment))
txtDTM <- cleanCorpus(txtDTM, stops)
txtDTM <- DocumentTermMatrix(txtDTM)
# Examine
as.matrix(txtDTM[1:10,100:110])
dim(txtDTM)
#txtDTM <- as.DocumentTermMatrix(txtDTM, weighting = weightTf )
tidyCorp <- tidy(txtDTM)
tidyCorp[100:110,]
dim(tidyCorp)
# Get bing lexicon
# "afinn", "bing", "nrc", "loughran"
bing <- get_sentiments(lexicon = c("bing"))
head(bing)
# Perform Inner Join
bingSent <- inner_join(tidyCorp, bing, by=c('term' = 'word'))
bingSent
# Quick Analysis
table(bingSent$sentiment) #tally ignoring count
table(bingSent$sentiment, bingSent$count) #only a few with more than 1 term
aggregate(count~sentiment,bingSent, sum) #correct way to sum them
# Get afinn lexicon
afinn<-get_sentiments(lexicon = c("afinn"))
head(afinn)
# Perform Inner Join
afinnSent <- inner_join(tidyCorp,afinn, by=c('term' = 'word'))
afinnSent
# Examine the quantity
afinnSent$afinnAmt <- afinnSent$count * afinnSent$value
# Compare w/polarity and bing
mean(afinnSent$afinnAmt)
# FAKE EXAMPLE: if the documents were related and temporal, make sure they are sorted by time first!
# Example use case : i.e. over time how was the emotional content for a topic i.e. Pakistan articles
afinnTemporal <- aggregate(afinnAmt~document, afinnSent, sum)
afinnTemporal$document <- as.numeric(afinnTemporal$document)
afinnTemporal <- afinnTemporal[order(afinnTemporal$document),]
# Quick plot
plot(afinnTemporal$afinnAmt, type="l", main="Quick Timeline of Identified Words")
# Quick Check with the pptx for a reminder.
# Get nrc lexicon; deprecated in tidytext, use library(lexicon)
#nrc <- read.csv('nrcSentimentLexicon.csv')
nrc <- nrc_emotions
head(nrc)
# Tidy this up
nrc <- nrc %>% pivot_longer(-term, names_to = "emotion", values_to = "freq")
nrc <-subset(nrc, nrc$freq>0 )
head(nrc)
nrc$freq <- NULL #no longer needed
# Perform Inner Join
nrcSent <- inner_join(tidyCorp,nrc, by=c('term' = 'term'))
nrcSent
# Radar chart
table(nrcSent$emotion)
emos <- data.frame(table(nrcSent$emotion))
names(emos) <- c('emotion', 'termsCt')
emos %>%
e_charts(emotion) %>%
e_radar(termsCt, max = max(emos$termsCt), name = "Emotions") %>%
e_tooltip(trigger = "item") %>% e_theme("dark-mushroom")
# Other Emotion Lexicons Exist
emotionLex <- affect_wordnet
emotionLex
table(emotionLex$emotion)
table(emotionLex$category)
emotionLex <- subset(emotionLex,
emotionLex$emotion=='Positive'|emotionLex$emotion=='Negative')
# More emotional categories, fewer terms
lexSent <- inner_join(tidyCorp,emotionLex, by=c('term' = 'term'))
lexSent
emotionID <- aggregate(count ~ category, lexSent, sum)
emotionID %>%
e_charts(category) %>% e_theme("dark-mushroom") %>%
e_radar(count, max =max(emotionID$count), name = "Emotional Categories") %>%
e_tooltip() %>%
e_theme("dark-mushroom")
# End
|
## WGCNA with bootstrap
## 3.27.16.a
## Ciera Martinez
#This is one more attempt to get a Network with some usable information. This time I am going to
#use Dan Koenig's data. want to make network with JUST cluster 35
## library
library(WGCNA)
options(stringsAsFactors = FALSE)
#enableWGCNAThreads()
ALLOW_WGCNAT_THREADS = 4
library (igraph)
library(ggplot2)
library(reshape)
library(rgl)
library(tcltk2)
## Dan Koenig's Tomato Data. Try different samples.
counts <- read.delim("../data/GSE45774_rpkm_all.txt", header = TRUE)
head(counts)
dim(counts)
#at this point do I need to get rid of all the samples I don't need
#First I need to subset based on the genes I am interested in form my analysis.
#Read in lists of genes from both SOM and superSOM
#SOM_analysis9.5.csv is the large WT only analysis.
#The problem is here, I was unsing the analysis9.5, which is truncated to fit with Yasu's curated data.
SOM <- read.csv("../../08SOM/lcmSOM/data/SOM_analysis9.5forNetwork.csv", header = TRUE)
colnames(SOM)
head(SOM)
#Subset only the columns that specify clusters and genes
SOMsub <- SOM[,c(2,21,22)]
names(SOMsub)
#OMG I never subseted for the interesting genes?
# Maybe I icluded everything to make sure I had enough awesome genes? I want to run with just
#cluster 35. Whoop Whoop.
head(SOMsub)
cluster35 <- subset(SOMsub, som.unit.classif == "35")
dim(cluster35)
#Can I make a network with 53 genes?
#isolate only gene names
SOMclusters <- as.data.frame(cluster35[,1])
colnames(SOMclusters)[1] <- "Gene_ID"
#remove duplicates
dim(SOMclusters)
SOMclusters <- unique(SOMclusters)
dim(SOMclusters)
#Now merge with Dan's table to get only genes I am interested in.
#First rename 1st column in counts to gene for merging
colnames(counts)[1] <- "Gene_ID"
#Merge
dim(counts)
head(counts)
dim(SOMclusters)
merged <- merge(SOMclusters, counts, by = "Gene_ID")
#set row and column names R
#remove rows that the have duplicate gene names.
dim(merged)
countsUniq <- unique(merged)
dim(countsUniq)
#no duplicates
write.csv(countsUniq, file = "countsUniq.csv", row.names = FALSE)
countsUniq <- read.csv("countsUniq.csv", row.name = 1)
counts <- countsUniq
counts[is.na(counts)] <- 0
#for later
genes <- rownames(counts) #setting the genes names,
#transform data frame
counts.t <- t(counts)
str(counts.t)
head(counts.t)
#in Yasu's script, why is this used?
#counts[, c(2:64)] <- sapply(counts[, c(2:64)], as.numeric)
#counts.lt=t(log(counts+1))
############################################################
## Bootstrapping for hub gene prediction
B=100 ## select number of bootstrap resamples
powers = c(c(3:50)) #if you get error in the bootsrapping, you might need to the maximum value here.
result=matrix(nrow=ncol(counts.t), ncol=B)
for (i in 1:B){
set.seed(i*100+1)
print(i)
##bootstrap resample
sft.power=30
while(sft.power>29 || is.na(sft.power)){#because TOM need power < 30 #softconnecity power < 14
index.b=sample(x=1:nrow(counts.t), size=nrow(counts.t), replace=TRUE)
Y.b=counts.t[index.b,]
##soft thresholding
sft.b = pickSoftThreshold(Y.b, powerVector=powers, RsquaredCut=0.9, verbose = 5)
sft.power = sft.b$powerEstimate
}
print(sft.power)
##TOM
TOM.b = TOMsimilarityFromExpr(Y.b,power=sft.b$powerEstimate) #omega TOM-based connectivity
hub.b = rowSums(TOM.b)
#adj.b = adjacency(Y.b,power=sft.b$powerEstimate)
#hub.b = rowSums(adj.b) #k connectivity
result[,i]<-rank(-hub.b)
}
#Annotation Files
annotation1<- read.delim("../../06diffGeneExp/analysis/data/ITAG2.3_all_Arabidopsis_ITAG_annotations.tsv", header=FALSE) #Changed to the SGN human readable annotation
colnames(annotation1) <- c("ITAG", "SGN_annotation")
annotation2<- read.delim("../../06diffGeneExp/analysis/data/ITAG2.3_all_Arabidopsis_annotated.tsv")
annotation <- merge(annotation1,annotation2, by = "ITAG")
sub.annotation <- annotation[,c(1,4)] #Choose what you want to name them by
# #Merge with genes then call which column of genes you want.
table.genes <- as.data.frame(genes)
colnames(table.genes) <- "ITAG"
annotation.merge <- merge(table.genes,sub.annotation, by = "ITAG", all.x = TRUE)
#If NA, replace with ITAG column
annotation.merge$gene.name <- ifelse(is.na(annotation.merge$symbol),
annotation.merge$ITAG,
annotation.merge$symbol)
genes <- annotation.merge$gene.name
#Make Ranking
row.names(result) <- genes
average <- rowMeans(result)
sd <- apply(result,1,function(d)sd(d))
result.n <- cbind(result,average,sd)
result.n <- as.data.frame(result.n)
result.g <- subset(result.n[,11:12])
qplot(average, sd, data=result.g) #what should I be seeing here
colnames(result.g) <- c("ave.rank","sd.rank")
result.o <- result.g[order(result.g$ave.rank),]
top.hub <- rownames(result.o[1:200,]) # top hub genes
# save
save.image(file=paste("boot",B,".WGCNA.Rdata",sep=""))
## visualization
# Choose a set of soft-thresholding powers
powers = c(1:30)
# Call the network topology analysis function
sft=pickSoftThreshold(counts.t,powerVector=powers,RsquaredCut=0.9,verbose=5) #power must be between 1 and 30.
# create TOM (topological overlap matrix)
TOM =TOMsimilarityFromExpr(counts.t,power=14) # power=14 shows R^2=0.9
colnames(TOM)=genes
rownames(TOM)=genes
dim(TOM)
head(TOM,1)
# extract top hub genes, this is where the NAs are occuring.
index.sub=is.element(genes, top.hub)
subTOM=TOM[index.sub,index.sub]
# only strong interaction is shown
h.subTOM = (subTOM>0.1)*subTOM # only > 0.1 TOM will be shown in network
subnet = graph.adjacency(h.subTOM,mode="undirected",weighted=TRUE,diag=FALSE)
between <- betweenness(subnet, normalized=TRUE)
#between.a<-merge(between,annotation, by.x="row.names", by.y="Sequence_name", all.x=T,sort=F)
#write.csv(between.a,"betweenness.csv")
head(between[order(-between)])
# visualization
V(subnet)$color <- "mediumturquoise"
#V(net)[community.fastgreedy$membership==1]$color <- "mediumturquoise"
v.label=rep("",length(V(subnet)))
v.label=V(subnet)$name
v.size=rep(3,length(V(subnet)))
V(subnet)$shape <- "circle"
pdf("top.hub.test.032715.a.pdf", useDingbats=FALSE)
plot(subnet,
layout=layout.graphopt,
vertex.size=v.size,
vertex.frame.color=NA,
vertex.label=v.label,
vertex.label.cex=0.05,
edge.color="gray57",
edge.width=E(subnet)$weight*0.1)
dev.off()
######################################
# Plot soft-thresholding powers
tiff("SoftThresholding.tif", width=16, height=8, unit="in",compression="lzw",res=100)
par(mfrow = c(1,2))
cex1 = 0.9
# Scale-free topology fit index as a function of the soft-thresholding power
plot(sft$fitIndices[,1], -sign(sft$fitIndices[,3])*sft$fitIndices[,2],
xlab="Soft Threshold (power)",ylab="Scale Free Topology Model Fit,signed R^2",type="n",
main = paste("Scale independence"));
text(sft$fitIndices[,1], -sign(sft$fitIndices[,3])*sft$fitIndices[,2],
labels=powers,cex=cex1,col="red");
# this line corresponds to using an R^2 cut-off of h
abline(h=0.90,col="red")
# Mean connectivity as a function of the soft-thresholding power
plot(sft$fitIndices[,1], sft$fitIndices[,5],
xlab="Soft Threshold (power)",ylab="Mean Connectivity", type="n",
main = paste("Mean connectivity"))
text(sft$fitIndices[,1], sft$fitIndices[,5], labels=powers, cex=cex1,col="red")
dev.off()
|
/R/WGCNA.bootstrap.Ciera.03.27.15.a.R
|
no_license
|
Pwahle/10wgcna
|
R
| false
| false
| 7,457
|
r
|
## WGCNA with bootstrap
## 3.27.16.a
## Ciera Martinez
#This is one more attempt to get a Network with some usable information. This time I am going to
#use Dan Koenig's data. want to make network with JUST cluster 35
## library
library(WGCNA)
options(stringsAsFactors = FALSE)
#enableWGCNAThreads()
ALLOW_WGCNAT_THREADS = 4
library (igraph)
library(ggplot2)
library(reshape)
library(rgl)
library(tcltk2)
## Dan Koenig's Tomato Data. Try different samples.
counts <- read.delim("../data/GSE45774_rpkm_all.txt", header = TRUE)
head(counts)
dim(counts)
#at this point do I need to get rid of all the samples I don't need
#First I need to subset based on the genes I am interested in form my analysis.
#Read in lists of genes from both SOM and superSOM
#SOM_analysis9.5.csv is the large WT only analysis.
#The problem is here, I was unsing the analysis9.5, which is truncated to fit with Yasu's curated data.
SOM <- read.csv("../../08SOM/lcmSOM/data/SOM_analysis9.5forNetwork.csv", header = TRUE)
colnames(SOM)
head(SOM)
#Subset only the columns that specify clusters and genes
SOMsub <- SOM[,c(2,21,22)]
names(SOMsub)
#OMG I never subseted for the interesting genes?
# Maybe I icluded everything to make sure I had enough awesome genes? I want to run with just
#cluster 35. Whoop Whoop.
head(SOMsub)
cluster35 <- subset(SOMsub, som.unit.classif == "35")
dim(cluster35)
#Can I make a network with 53 genes?
#isolate only gene names
SOMclusters <- as.data.frame(cluster35[,1])
colnames(SOMclusters)[1] <- "Gene_ID"
#remove duplicates
dim(SOMclusters)
SOMclusters <- unique(SOMclusters)
dim(SOMclusters)
#Now merge with Dan's table to get only genes I am interested in.
#First rename 1st column in counts to gene for merging
colnames(counts)[1] <- "Gene_ID"
#Merge
dim(counts)
head(counts)
dim(SOMclusters)
merged <- merge(SOMclusters, counts, by = "Gene_ID")
#set row and column names R
#remove rows that the have duplicate gene names.
dim(merged)
countsUniq <- unique(merged)
dim(countsUniq)
#no duplicates
write.csv(countsUniq, file = "countsUniq.csv", row.names = FALSE)
countsUniq <- read.csv("countsUniq.csv", row.name = 1)
counts <- countsUniq
counts[is.na(counts)] <- 0
#for later
genes <- rownames(counts) #setting the genes names,
#transform data frame
counts.t <- t(counts)
str(counts.t)
head(counts.t)
#in Yasu's script, why is this used?
#counts[, c(2:64)] <- sapply(counts[, c(2:64)], as.numeric)
#counts.lt=t(log(counts+1))
############################################################
## Bootstrapping for hub gene prediction
B=100 ## select number of bootstrap resamples
powers = c(c(3:50)) #if you get error in the bootsrapping, you might need to the maximum value here.
result=matrix(nrow=ncol(counts.t), ncol=B)
for (i in 1:B){
set.seed(i*100+1)
print(i)
##bootstrap resample
sft.power=30
while(sft.power>29 || is.na(sft.power)){#because TOM need power < 30 #softconnecity power < 14
index.b=sample(x=1:nrow(counts.t), size=nrow(counts.t), replace=TRUE)
Y.b=counts.t[index.b,]
##soft thresholding
sft.b = pickSoftThreshold(Y.b, powerVector=powers, RsquaredCut=0.9, verbose = 5)
sft.power = sft.b$powerEstimate
}
print(sft.power)
##TOM
TOM.b = TOMsimilarityFromExpr(Y.b,power=sft.b$powerEstimate) #omega TOM-based connectivity
hub.b = rowSums(TOM.b)
#adj.b = adjacency(Y.b,power=sft.b$powerEstimate)
#hub.b = rowSums(adj.b) #k connectivity
result[,i]<-rank(-hub.b)
}
#Annotation Files
annotation1<- read.delim("../../06diffGeneExp/analysis/data/ITAG2.3_all_Arabidopsis_ITAG_annotations.tsv", header=FALSE) #Changed to the SGN human readable annotation
colnames(annotation1) <- c("ITAG", "SGN_annotation")
annotation2<- read.delim("../../06diffGeneExp/analysis/data/ITAG2.3_all_Arabidopsis_annotated.tsv")
annotation <- merge(annotation1,annotation2, by = "ITAG")
sub.annotation <- annotation[,c(1,4)] #Choose what you want to name them by
# #Merge with genes then call which column of genes you want.
table.genes <- as.data.frame(genes)
colnames(table.genes) <- "ITAG"
annotation.merge <- merge(table.genes,sub.annotation, by = "ITAG", all.x = TRUE)
#If NA, replace with ITAG column
annotation.merge$gene.name <- ifelse(is.na(annotation.merge$symbol),
annotation.merge$ITAG,
annotation.merge$symbol)
genes <- annotation.merge$gene.name
#Make Ranking
row.names(result) <- genes
average <- rowMeans(result)
sd <- apply(result,1,function(d)sd(d))
result.n <- cbind(result,average,sd)
result.n <- as.data.frame(result.n)
result.g <- subset(result.n[,11:12])
qplot(average, sd, data=result.g) #what should I be seeing here
colnames(result.g) <- c("ave.rank","sd.rank")
result.o <- result.g[order(result.g$ave.rank),]
top.hub <- rownames(result.o[1:200,]) # top hub genes
# save
save.image(file=paste("boot",B,".WGCNA.Rdata",sep=""))
## visualization
# Choose a set of soft-thresholding powers
powers = c(1:30)
# Call the network topology analysis function
sft=pickSoftThreshold(counts.t,powerVector=powers,RsquaredCut=0.9,verbose=5) #power must be between 1 and 30.
# create TOM (topological overlap matrix)
TOM =TOMsimilarityFromExpr(counts.t,power=14) # power=14 shows R^2=0.9
colnames(TOM)=genes
rownames(TOM)=genes
dim(TOM)
head(TOM,1)
# extract top hub genes, this is where the NAs are occuring.
index.sub=is.element(genes, top.hub)
subTOM=TOM[index.sub,index.sub]
# only strong interaction is shown
h.subTOM = (subTOM>0.1)*subTOM # only > 0.1 TOM will be shown in network
subnet = graph.adjacency(h.subTOM,mode="undirected",weighted=TRUE,diag=FALSE)
between <- betweenness(subnet, normalized=TRUE)
#between.a<-merge(between,annotation, by.x="row.names", by.y="Sequence_name", all.x=T,sort=F)
#write.csv(between.a,"betweenness.csv")
head(between[order(-between)])
# visualization
V(subnet)$color <- "mediumturquoise"
#V(net)[community.fastgreedy$membership==1]$color <- "mediumturquoise"
v.label=rep("",length(V(subnet)))
v.label=V(subnet)$name
v.size=rep(3,length(V(subnet)))
V(subnet)$shape <- "circle"
pdf("top.hub.test.032715.a.pdf", useDingbats=FALSE)
plot(subnet,
layout=layout.graphopt,
vertex.size=v.size,
vertex.frame.color=NA,
vertex.label=v.label,
vertex.label.cex=0.05,
edge.color="gray57",
edge.width=E(subnet)$weight*0.1)
dev.off()
######################################
# Plot soft-thresholding powers
tiff("SoftThresholding.tif", width=16, height=8, unit="in",compression="lzw",res=100)
par(mfrow = c(1,2))
cex1 = 0.9
# Scale-free topology fit index as a function of the soft-thresholding power
plot(sft$fitIndices[,1], -sign(sft$fitIndices[,3])*sft$fitIndices[,2],
xlab="Soft Threshold (power)",ylab="Scale Free Topology Model Fit,signed R^2",type="n",
main = paste("Scale independence"));
text(sft$fitIndices[,1], -sign(sft$fitIndices[,3])*sft$fitIndices[,2],
labels=powers,cex=cex1,col="red");
# this line corresponds to using an R^2 cut-off of h
abline(h=0.90,col="red")
# Mean connectivity as a function of the soft-thresholding power
plot(sft$fitIndices[,1], sft$fitIndices[,5],
xlab="Soft Threshold (power)",ylab="Mean Connectivity", type="n",
main = paste("Mean connectivity"))
text(sft$fitIndices[,1], sft$fitIndices[,5], labels=powers, cex=cex1,col="red")
dev.off()
|
library(matrixTests)
source("utils/capture.r")
#--- x argument errors ---------------------------------------------------------
# cannot be missing
err <- 'argument "x" is missing, with no default'
res <- capture(row_cor_pearson())
stopifnot(all.equal(res$error, err))
# cannot be NULL
err <- '"x" must be a numeric matrix or vector'
res <- capture(row_cor_pearson(NULL, 1:2))
stopifnot(all.equal(res$error, err))
# cannot be character
err <- '"x" must be a numeric matrix or vector'
res <- capture(row_cor_pearson(c("1", "2"), 1:2))
stopifnot(all.equal(res$error, err))
# cannot be logical
err <- '"x" must be a numeric matrix or vector'
res <- capture(row_cor_pearson(c(TRUE, FALSE), 1:2))
stopifnot(all.equal(res$error, err))
# cannot be complex
err <- '"x" must be a numeric matrix or vector'
res <- capture(row_cor_pearson(complex(c(1,2), c(3,4)), 1:2))
stopifnot(all.equal(res$error, err))
# cannot be data.frame containing some non numeric data
err <- '"x" must be a numeric matrix or vector'
res <- capture(row_cor_pearson(iris, 1:2))
stopifnot(all.equal(res$error, err))
# cannot be a list
err <- '"x" must be a numeric matrix or vector'
res <- capture(row_cor_pearson(as.list(c(1:5)), 1:2))
stopifnot(all.equal(res$error, err))
# cannot be in a list
err <- '"x" must be a numeric matrix or vector'
res <- capture(row_cor_pearson(list(1:5), 1:2))
stopifnot(all.equal(res$error, err))
#--- y argument errors ---------------------------------------------------------
# cannot be missing
err <- 'argument "y" is missing, with no default'
res <- capture(row_cor_pearson(1))
stopifnot(all.equal(res$error, err))
# cannot be NULL
err <- '"y" must be a numeric matrix or vector'
res <- capture(row_cor_pearson(1, NULL))
stopifnot(all.equal(res$error, err))
# cannot be character
err <- '"y" must be a numeric matrix or vector'
res <- capture(row_cor_pearson(1:2, c("1","2")))
stopifnot(all.equal(res$error, err))
# cannot be logical
err <- '"y" must be a numeric matrix or vector'
res <- capture(row_cor_pearson(1:2, c(TRUE, FALSE)))
stopifnot(all.equal(res$error, err))
# cannot be complex
err <- '"y" must be a numeric matrix or vector'
res <- capture(row_cor_pearson(1:2, complex(c(1,2), c(3,4))))
stopifnot(all.equal(res$error, err))
# cannot be data.frame containing some non numeric data
err <- '"y" must be a numeric matrix or vector'
res <- capture(row_cor_pearson(1:2, iris))
stopifnot(all.equal(res$error, err))
# cannot be a list
err <- '"y" must be a numeric matrix or vector'
res <- capture(row_cor_pearson(1:2, as.list(c(1:5))))
stopifnot(all.equal(res$error, err))
# cannot be in a list
err <- '"y" must be a numeric matrix or vector'
res <- capture(row_cor_pearson(1:2, list(1:5)))
stopifnot(all.equal(res$error, err))
#--- alternative argument errors -----------------------------------------------
err <- '"alternative" must be a character vector with length 1 or nrow(x)'
# cannot be NA
res <- capture(row_cor_pearson(x=1:3, y=2:4, alternative=NA))
stopifnot(all.equal(res$error, err))
# cannot be numeric
res <- capture(row_cor_pearson(x=1:3, y=2:4, alternative=1))
stopifnot(all.equal(res$error, err))
# cannot be complex
res <- capture(row_cor_pearson(x=1:3, y=2:4, alternative=complex(1)))
stopifnot(all.equal(res$error, err))
# cannot be in a list
res <- capture(row_cor_pearson(x=1:3, y=2:4, alternative=list("less")))
stopifnot(all.equal(res$error, err))
# cannot be a data frame
res <- capture(row_cor_pearson(x=1:3, y=2:4, alternative=data.frame("less")))
stopifnot(all.equal(res$error, err))
err <- 'all "alternative" values must be in: two.sided, less, greater'
# must be in correct set
res <- capture(row_cor_pearson(x=1:3, y=2:4, alternative="ga"))
stopifnot(all.equal(res$error, err))
# error produced even when some are correct
res <- capture(row_cor_pearson(x=matrix(1:10, nrow=2), y=matrix(1:10, nrow=2), alternative=c("g","c")))
stopifnot(all.equal(res$error, err))
#--- conf.level argument errors ------------------------------------------------
err <- '"conf.level" must be a numeric vector with length 1 or nrow(x)'
# cannot be character
res <- capture(row_cor_pearson(x=1:3, y=2:4, conf.level="0.95"))
stopifnot(all.equal(res$error, err))
# cannot be complex
res <- capture(row_cor_pearson(x=1:3, y=2:4, conf.level=complex(0.95)))
stopifnot(all.equal(res$error, err))
# cannot be in a list
res <- capture(row_cor_pearson(x=1:3, y=2:4, conf.level=list(0.95)))
stopifnot(all.equal(res$error, err))
# cannot be a data frame
res <- capture(row_cor_pearson(x=1:3, y=2:4, conf.level=data.frame(0.95)))
stopifnot(all.equal(res$error, err))
err <- 'all "conf.level" values must be between: 0 and 1'
# cannot be below 0
res <- capture(row_cor_pearson(x=1:3, y=2:4, conf.level=-0.0001))
stopifnot(all.equal(res$error, err))
# cannot be above 1
res <- capture(row_cor_pearson(x=1:3, y=2:4, conf.level=1.0001))
stopifnot(all.equal(res$error, err))
#--- dimension mismatch errors -------------------------------------------------
# y number of rows must match x number of rows
err <- '"x" and "y" must have the same number of rows'
x <- matrix(1:10, nrow=2)
y <- matrix(1:10, nrow=5)
res <- capture(row_cor_pearson(x, y))
stopifnot(all.equal(res$error, err))
# y number of columns must match x number of columns
err <- '"x" and "y" must have the same number of columns'
x <- matrix(1:10, nrow=2)
y <- matrix(1:20, nrow=2)
res <- capture(row_cor_pearson(x, y))
stopifnot(all.equal(res$error, err))
# alternative must match x number of rows
err <- '"alternative" must be a character vector with length 1 or nrow(x)'
x <- matrix(1:12, nrow=4)
y <- matrix(1:12, nrow=4)
res <- capture(row_cor_pearson(x, y, alternative=c("g","l")))
stopifnot(all.equal(res$error, err))
# conf.level must match x number of rows
err <- '"conf.level" must be a numeric vector with length 1 or nrow(x)'
x <- matrix(1:12, nrow=4)
y <- matrix(1:12, nrow=4)
res <- capture(row_cor_pearson(x, y, conf.level=c(0.95, 0.99)))
stopifnot(all.equal(res$error, err))
|
/tests/cor_pearson_errors.r
|
no_license
|
cran/matrixTests
|
R
| false
| false
| 6,010
|
r
|
library(matrixTests)
source("utils/capture.r")
#--- x argument errors ---------------------------------------------------------
# cannot be missing
err <- 'argument "x" is missing, with no default'
res <- capture(row_cor_pearson())
stopifnot(all.equal(res$error, err))
# cannot be NULL
err <- '"x" must be a numeric matrix or vector'
res <- capture(row_cor_pearson(NULL, 1:2))
stopifnot(all.equal(res$error, err))
# cannot be character
err <- '"x" must be a numeric matrix or vector'
res <- capture(row_cor_pearson(c("1", "2"), 1:2))
stopifnot(all.equal(res$error, err))
# cannot be logical
err <- '"x" must be a numeric matrix or vector'
res <- capture(row_cor_pearson(c(TRUE, FALSE), 1:2))
stopifnot(all.equal(res$error, err))
# cannot be complex
err <- '"x" must be a numeric matrix or vector'
res <- capture(row_cor_pearson(complex(c(1,2), c(3,4)), 1:2))
stopifnot(all.equal(res$error, err))
# cannot be data.frame containing some non numeric data
err <- '"x" must be a numeric matrix or vector'
res <- capture(row_cor_pearson(iris, 1:2))
stopifnot(all.equal(res$error, err))
# cannot be a list
err <- '"x" must be a numeric matrix or vector'
res <- capture(row_cor_pearson(as.list(c(1:5)), 1:2))
stopifnot(all.equal(res$error, err))
# cannot be in a list
err <- '"x" must be a numeric matrix or vector'
res <- capture(row_cor_pearson(list(1:5), 1:2))
stopifnot(all.equal(res$error, err))
#--- y argument errors ---------------------------------------------------------
# cannot be missing
err <- 'argument "y" is missing, with no default'
res <- capture(row_cor_pearson(1))
stopifnot(all.equal(res$error, err))
# cannot be NULL
err <- '"y" must be a numeric matrix or vector'
res <- capture(row_cor_pearson(1, NULL))
stopifnot(all.equal(res$error, err))
# cannot be character
err <- '"y" must be a numeric matrix or vector'
res <- capture(row_cor_pearson(1:2, c("1","2")))
stopifnot(all.equal(res$error, err))
# cannot be logical
err <- '"y" must be a numeric matrix or vector'
res <- capture(row_cor_pearson(1:2, c(TRUE, FALSE)))
stopifnot(all.equal(res$error, err))
# cannot be complex
err <- '"y" must be a numeric matrix or vector'
res <- capture(row_cor_pearson(1:2, complex(c(1,2), c(3,4))))
stopifnot(all.equal(res$error, err))
# cannot be data.frame containing some non numeric data
err <- '"y" must be a numeric matrix or vector'
res <- capture(row_cor_pearson(1:2, iris))
stopifnot(all.equal(res$error, err))
# cannot be a list
err <- '"y" must be a numeric matrix or vector'
res <- capture(row_cor_pearson(1:2, as.list(c(1:5))))
stopifnot(all.equal(res$error, err))
# cannot be in a list
err <- '"y" must be a numeric matrix or vector'
res <- capture(row_cor_pearson(1:2, list(1:5)))
stopifnot(all.equal(res$error, err))
#--- alternative argument errors -----------------------------------------------
err <- '"alternative" must be a character vector with length 1 or nrow(x)'
# cannot be NA
res <- capture(row_cor_pearson(x=1:3, y=2:4, alternative=NA))
stopifnot(all.equal(res$error, err))
# cannot be numeric
res <- capture(row_cor_pearson(x=1:3, y=2:4, alternative=1))
stopifnot(all.equal(res$error, err))
# cannot be complex
res <- capture(row_cor_pearson(x=1:3, y=2:4, alternative=complex(1)))
stopifnot(all.equal(res$error, err))
# cannot be in a list
res <- capture(row_cor_pearson(x=1:3, y=2:4, alternative=list("less")))
stopifnot(all.equal(res$error, err))
# cannot be a data frame
res <- capture(row_cor_pearson(x=1:3, y=2:4, alternative=data.frame("less")))
stopifnot(all.equal(res$error, err))
err <- 'all "alternative" values must be in: two.sided, less, greater'
# must be in correct set
res <- capture(row_cor_pearson(x=1:3, y=2:4, alternative="ga"))
stopifnot(all.equal(res$error, err))
# error produced even when some are correct
res <- capture(row_cor_pearson(x=matrix(1:10, nrow=2), y=matrix(1:10, nrow=2), alternative=c("g","c")))
stopifnot(all.equal(res$error, err))
#--- conf.level argument errors ------------------------------------------------
err <- '"conf.level" must be a numeric vector with length 1 or nrow(x)'
# cannot be character
res <- capture(row_cor_pearson(x=1:3, y=2:4, conf.level="0.95"))
stopifnot(all.equal(res$error, err))
# cannot be complex
res <- capture(row_cor_pearson(x=1:3, y=2:4, conf.level=complex(0.95)))
stopifnot(all.equal(res$error, err))
# cannot be in a list
res <- capture(row_cor_pearson(x=1:3, y=2:4, conf.level=list(0.95)))
stopifnot(all.equal(res$error, err))
# cannot be a data frame
res <- capture(row_cor_pearson(x=1:3, y=2:4, conf.level=data.frame(0.95)))
stopifnot(all.equal(res$error, err))
err <- 'all "conf.level" values must be between: 0 and 1'
# cannot be below 0
res <- capture(row_cor_pearson(x=1:3, y=2:4, conf.level=-0.0001))
stopifnot(all.equal(res$error, err))
# cannot be above 1
res <- capture(row_cor_pearson(x=1:3, y=2:4, conf.level=1.0001))
stopifnot(all.equal(res$error, err))
#--- dimension mismatch errors -------------------------------------------------
# y number of rows must match x number of rows
err <- '"x" and "y" must have the same number of rows'
x <- matrix(1:10, nrow=2)
y <- matrix(1:10, nrow=5)
res <- capture(row_cor_pearson(x, y))
stopifnot(all.equal(res$error, err))
# y number of columns must match x number of columns
err <- '"x" and "y" must have the same number of columns'
x <- matrix(1:10, nrow=2)
y <- matrix(1:20, nrow=2)
res <- capture(row_cor_pearson(x, y))
stopifnot(all.equal(res$error, err))
# alternative must match x number of rows
err <- '"alternative" must be a character vector with length 1 or nrow(x)'
x <- matrix(1:12, nrow=4)
y <- matrix(1:12, nrow=4)
res <- capture(row_cor_pearson(x, y, alternative=c("g","l")))
stopifnot(all.equal(res$error, err))
# conf.level must match x number of rows
err <- '"conf.level" must be a numeric vector with length 1 or nrow(x)'
x <- matrix(1:12, nrow=4)
y <- matrix(1:12, nrow=4)
res <- capture(row_cor_pearson(x, y, conf.level=c(0.95, 0.99)))
stopifnot(all.equal(res$error, err))
|
setwd("~/Dropbox/NFW-EFW Analyses/NFW-EFW/Year 1")
source("00_MergedY1Cohort.R")
table(Y1$Know.Pre); table(Y1$Know.Post)
table(Y1$Skill.Pre); table(Y1$Skill.Post)
table(Y1$Eff.Pre); table(Y1$Eff.Post)
table(Y1$Mot.Pre); table(Y1$Mot.Post)
table(Y1$SuppOth.Pre); table(Y1$SuppOth.Post)
table(Y1$SuppCol.Pre); table(Y1$SuppCol.Post)
table(Y1$Eval.Pre); table(Y1$Eval.Post)
# Generate InExper and UgradinNA Variable
Y1$InExper <- ifelse(Y1$TchExp<3, 1, 0)
Y1$UgradinNA <- ifelse(Y1$Ugrad==6, 1, 0)
# Pre- Post- Descriptives with Gains
I1 <- Y1[is.na(Y1$Know.Gain)==FALSE,c(1,3,10,17,22,25,32:33)]
I2 <- Y1[is.na(Y1$Skill.Gain)==FALSE,c(1,4,11,17,22,26,32:33)]
I3 <- Y1[is.na(Y1$Eff.Gain)==FALSE,c(1,5,12,17,22,27,32:33)]
I4 <- Y1[is.na(Y1$Mot.Gain)==FALSE,c(1,6,13,17,22,28,32:33)]
I5 <- Y1[is.na(Y1$SuppOth.Gain)==FALSE,c(1,7,14,17,22,29,32:33)]
I6 <- Y1[is.na(Y1$SuppCol.Gain)==FALSE,c(1,8,15,17,22,30,32:33)]
I7 <- Y1[is.na(Y1$Eval.Gain)==FALSE,c(1,9,16,17,22,31,32:33)]
describe(I1[,2:4]); sd(as.numeric(as.matrix(I1[,2:3])))
(3.28-2.80)/0.7264963
describe(I2[,2:4]); sd(as.numeric(as.matrix(I2[,2:3])))
(2.64-2.40)/0.713366
describe(I3[,2:4]); sd(as.numeric(as.matrix(I3[,2:3])))
(3.57-3.56)/0.6288884
describe(I4[,2:4]); sd(as.numeric(as.matrix(I4[,2:3])))
(3.52-3.51)/0.6278783
describe(I5[,2:4]); sd(as.numeric(as.matrix(I5[,2:3])))
(3.07-3.35)/0.815443
describe(I6[,2:4]); sd(as.numeric(as.matrix(I6[,2:3])))
(3.04-3.05)/0.866811
describe(I7[,2:4]); sd(as.numeric(as.matrix(I7[,2:3])))
(2.93-3.18)/0.7842666
# Pre- Post- Gain Distributions
barplot(table(I1[,2]), ylim=c(0,100))
barplot(table(I1[,3]), ylim=c(0,100))
barplot(table(I1[,4]), ylim=c(0,100))
barplot(table(I2[,2]), ylim=c(0,100))
barplot(table(I2[,3]), ylim=c(0,100))
barplot(table(I2[,4]), ylim=c(0,100))
barplot(table(I3[,2]), ylim=c(0,100))
barplot(c(0, table(I3[,3])), ylim=c(0,100))
barplot(table(I3[,4]), ylim=c(0,100))
barplot(c(0,table(I4[,2])), ylim=c(0,100))
barplot(c(0,table(I4[,3])), ylim=c(0,100))
barplot(table(I4[,4]), ylim=c(0,100))
barplot(table(I5[,2]), ylim=c(0,100))
barplot(table(I5[,3]), ylim=c(0,100))
barplot(table(I5[,4]), ylim=c(0,100))
barplot(table(I6[,2]), ylim=c(0,100))
barplot(table(I6[,3]), ylim=c(0,100))
barplot(table(I6[,4]), ylim=c(0,100))
barplot(table(I7[,2]), ylim=c(0,100))
barplot(table(I7[,3]), ylim=c(0,100))
barplot(table(I7[,4]), ylim=c(0,100))
# Gender Analysis
describeBy(I1[,c(2:3,5)], group=I1$Gender)
describeBy(I2[,c(2:3,5)], group=I2$Gender)
describeBy(I3[,c(2:3,5)], group=I3$Gender)
describeBy(I4[,c(2:3,5)], group=I4$Gender)
describeBy(I5[,c(2:3,5)], group=I5$Gender)
describeBy(I6[,c(2:3,5)], group=I6$Gender)
describeBy(I7[,c(2:3,5)], group=I7$Gender)
# Percent of Active Learning
describeBy(I1[,c(2:3,6)], group=I1$ClassTime)
describeBy(I2[,c(2:3,6)], group=I2$ClassTime)
describeBy(I3[,c(2:3,6)], group=I3$ClassTime)
describeBy(I4[,c(2:3,6)], group=I4$ClassTime)
describeBy(I5[,c(2:3,6)], group=I5$ClassTime)
describeBy(I6[,c(2:3,6)], group=I6$ClassTime)
describeBy(I7[,c(2:3,6)], group=I7$ClassTime)
# Teaching Experience
describeBy(I1[,c(2:3,6)], group=I1$InExper)
describeBy(I2[,c(2:3,6)], group=I2$InExper)
describeBy(I3[,c(2:3,6)], group=I3$InExper)
describeBy(I4[,c(2:3,6)], group=I4$InExper)
describeBy(I5[,c(2:3,6)], group=I5$InExper)
describeBy(I6[,c(2:3,6)], group=I6$InExper)
describeBy(I7[,c(2:3,6)], group=I7$InExper)
# Ugrad in American
describeBy(I1[,c(2:3,6)], group=I1$UgradinNA)
describeBy(I2[,c(2:3,6)], group=I2$UgradinNA)
describeBy(I3[,c(2:3,6)], group=I3$UgradinNA)
describeBy(I4[,c(2:3,6)], group=I4$UgradinNA)
describeBy(I5[,c(2:3,6)], group=I5$UgradinNA)
describeBy(I6[,c(2:3,6)], group=I6$UgradinNA)
describeBy(I7[,c(2:3,6)], group=I7$UgradinNA)
# Distributions of Ratings
Y1.b <- Y1.b[Y1.b$Cohort!="June2015",]
Y1.b[,17:18] <- 5-Y1.b[,17:18]
barplot(c(0,table(Y1.b$TheOverallQualityoftheWorkshopExceeded...)), ylim=c(0,80))
barplot(c(0,table(Y1.b$`I gained a broad perspective...`)), ylim=c(0,80))
describe(Y1.b[,17:18])
# Correlations
# Convert any NAs to Os
Y1[,c(3:16)][is.na(Y1[,c(3:16)])] <- 0
# Create a new variable for the total score for pre- and post-survey
Y1$PreTotal <- Y1$Know.Pre + Y1$Skill.Pre + Y1$Eff.Pre + Y1$Mot.Pre + Y1$SuppOth.Pre +
Y1$SuppCol.Pre + Y1$Eval.Pre
Y1$PreTotal <- (Y1$PreTotal/28)*100
Y1$PostTotal <- Y1$Know.Post + Y1$Skill.Post + Y1$Eff.Post + Y1$Mot.Post + Y1$SuppOth.Post +
Y1$SuppCol.Post + Y1$Eval.Post
Y1$PostTotal <- (Y1$PostTotal/28)*100
cor(Y1$PreTotal,Y1$PostTotal)
plot(Y1$PreTotal,Y1$PostTotal,
ylim=c(0,100), ylab="Post Total Score",
xlim=c(0,100), xlab="Pre Total Score")
text (90,20, "r=0.66")
par(mfrow=c(1,2))
cor(Y1$PIPS.C1,Y1$PreTotal, use="complete.obs")
plot(Y1$PIPS.C1,Y1$PreTotal,
ylim=c(0,100), ylab="PIPS Student-Centered Total Score",
xlim=c(0,100), xlab="Pre Total Score")
text (90,10, "r = 0.35")
cor(Y1$PIPS.C2,Y1$PreTotal, use="complete.obs")
plot(Y1$PIPS.C2,Y1$PreTotal,
ylim=c(0,100), ylab="PIPS Teacher-Centered Total Score",
xlim=c(0,100), xlab="Pre Total Score")
text (90,10, "r = -0.08")
cor(Y1$PIPS.C1,Y1$PostTotal, use="complete.obs")
plot(Y1$PIPS.C1,Y1$PostTotal,
ylim=c(0,100), ylab="PIPS Student-Centered Total Score",
xlim=c(0,100), xlab="Post Total Score")
text (90,10, "r = 0.22")
cor(Y1$PIPS.C2,Y1$PostTotal, use="complete.obs")
plot(Y1$PIPS.C2,Y1$PostTotal,
ylim=c(0,100), ylab="PIPS Teacher-Centered Total Score",
xlim=c(0,100), xlab="Post Total Score")
text (90,10, "r = -0.03")
|
/Y1 June15-June16/02_Y1Analysis.R
|
no_license
|
rajendrc/NFW-EFW
|
R
| false
| false
| 5,560
|
r
|
setwd("~/Dropbox/NFW-EFW Analyses/NFW-EFW/Year 1")
source("00_MergedY1Cohort.R")
table(Y1$Know.Pre); table(Y1$Know.Post)
table(Y1$Skill.Pre); table(Y1$Skill.Post)
table(Y1$Eff.Pre); table(Y1$Eff.Post)
table(Y1$Mot.Pre); table(Y1$Mot.Post)
table(Y1$SuppOth.Pre); table(Y1$SuppOth.Post)
table(Y1$SuppCol.Pre); table(Y1$SuppCol.Post)
table(Y1$Eval.Pre); table(Y1$Eval.Post)
# Generate InExper and UgradinNA Variable
Y1$InExper <- ifelse(Y1$TchExp<3, 1, 0)
Y1$UgradinNA <- ifelse(Y1$Ugrad==6, 1, 0)
# Pre- Post- Descriptives with Gains
I1 <- Y1[is.na(Y1$Know.Gain)==FALSE,c(1,3,10,17,22,25,32:33)]
I2 <- Y1[is.na(Y1$Skill.Gain)==FALSE,c(1,4,11,17,22,26,32:33)]
I3 <- Y1[is.na(Y1$Eff.Gain)==FALSE,c(1,5,12,17,22,27,32:33)]
I4 <- Y1[is.na(Y1$Mot.Gain)==FALSE,c(1,6,13,17,22,28,32:33)]
I5 <- Y1[is.na(Y1$SuppOth.Gain)==FALSE,c(1,7,14,17,22,29,32:33)]
I6 <- Y1[is.na(Y1$SuppCol.Gain)==FALSE,c(1,8,15,17,22,30,32:33)]
I7 <- Y1[is.na(Y1$Eval.Gain)==FALSE,c(1,9,16,17,22,31,32:33)]
describe(I1[,2:4]); sd(as.numeric(as.matrix(I1[,2:3])))
(3.28-2.80)/0.7264963
describe(I2[,2:4]); sd(as.numeric(as.matrix(I2[,2:3])))
(2.64-2.40)/0.713366
describe(I3[,2:4]); sd(as.numeric(as.matrix(I3[,2:3])))
(3.57-3.56)/0.6288884
describe(I4[,2:4]); sd(as.numeric(as.matrix(I4[,2:3])))
(3.52-3.51)/0.6278783
describe(I5[,2:4]); sd(as.numeric(as.matrix(I5[,2:3])))
(3.07-3.35)/0.815443
describe(I6[,2:4]); sd(as.numeric(as.matrix(I6[,2:3])))
(3.04-3.05)/0.866811
describe(I7[,2:4]); sd(as.numeric(as.matrix(I7[,2:3])))
(2.93-3.18)/0.7842666
# Pre- Post- Gain Distributions
barplot(table(I1[,2]), ylim=c(0,100))
barplot(table(I1[,3]), ylim=c(0,100))
barplot(table(I1[,4]), ylim=c(0,100))
barplot(table(I2[,2]), ylim=c(0,100))
barplot(table(I2[,3]), ylim=c(0,100))
barplot(table(I2[,4]), ylim=c(0,100))
barplot(table(I3[,2]), ylim=c(0,100))
barplot(c(0, table(I3[,3])), ylim=c(0,100))
barplot(table(I3[,4]), ylim=c(0,100))
barplot(c(0,table(I4[,2])), ylim=c(0,100))
barplot(c(0,table(I4[,3])), ylim=c(0,100))
barplot(table(I4[,4]), ylim=c(0,100))
barplot(table(I5[,2]), ylim=c(0,100))
barplot(table(I5[,3]), ylim=c(0,100))
barplot(table(I5[,4]), ylim=c(0,100))
barplot(table(I6[,2]), ylim=c(0,100))
barplot(table(I6[,3]), ylim=c(0,100))
barplot(table(I6[,4]), ylim=c(0,100))
barplot(table(I7[,2]), ylim=c(0,100))
barplot(table(I7[,3]), ylim=c(0,100))
barplot(table(I7[,4]), ylim=c(0,100))
# Gender Analysis
describeBy(I1[,c(2:3,5)], group=I1$Gender)
describeBy(I2[,c(2:3,5)], group=I2$Gender)
describeBy(I3[,c(2:3,5)], group=I3$Gender)
describeBy(I4[,c(2:3,5)], group=I4$Gender)
describeBy(I5[,c(2:3,5)], group=I5$Gender)
describeBy(I6[,c(2:3,5)], group=I6$Gender)
describeBy(I7[,c(2:3,5)], group=I7$Gender)
# Percent of Active Learning
describeBy(I1[,c(2:3,6)], group=I1$ClassTime)
describeBy(I2[,c(2:3,6)], group=I2$ClassTime)
describeBy(I3[,c(2:3,6)], group=I3$ClassTime)
describeBy(I4[,c(2:3,6)], group=I4$ClassTime)
describeBy(I5[,c(2:3,6)], group=I5$ClassTime)
describeBy(I6[,c(2:3,6)], group=I6$ClassTime)
describeBy(I7[,c(2:3,6)], group=I7$ClassTime)
# Teaching Experience
describeBy(I1[,c(2:3,6)], group=I1$InExper)
describeBy(I2[,c(2:3,6)], group=I2$InExper)
describeBy(I3[,c(2:3,6)], group=I3$InExper)
describeBy(I4[,c(2:3,6)], group=I4$InExper)
describeBy(I5[,c(2:3,6)], group=I5$InExper)
describeBy(I6[,c(2:3,6)], group=I6$InExper)
describeBy(I7[,c(2:3,6)], group=I7$InExper)
# Ugrad in American
describeBy(I1[,c(2:3,6)], group=I1$UgradinNA)
describeBy(I2[,c(2:3,6)], group=I2$UgradinNA)
describeBy(I3[,c(2:3,6)], group=I3$UgradinNA)
describeBy(I4[,c(2:3,6)], group=I4$UgradinNA)
describeBy(I5[,c(2:3,6)], group=I5$UgradinNA)
describeBy(I6[,c(2:3,6)], group=I6$UgradinNA)
describeBy(I7[,c(2:3,6)], group=I7$UgradinNA)
# Distributions of Ratings
Y1.b <- Y1.b[Y1.b$Cohort!="June2015",]
Y1.b[,17:18] <- 5-Y1.b[,17:18]
barplot(c(0,table(Y1.b$TheOverallQualityoftheWorkshopExceeded...)), ylim=c(0,80))
barplot(c(0,table(Y1.b$`I gained a broad perspective...`)), ylim=c(0,80))
describe(Y1.b[,17:18])
# Correlations
# Convert any NAs to Os
Y1[,c(3:16)][is.na(Y1[,c(3:16)])] <- 0
# Create a new variable for the total score for pre- and post-survey
Y1$PreTotal <- Y1$Know.Pre + Y1$Skill.Pre + Y1$Eff.Pre + Y1$Mot.Pre + Y1$SuppOth.Pre +
Y1$SuppCol.Pre + Y1$Eval.Pre
Y1$PreTotal <- (Y1$PreTotal/28)*100
Y1$PostTotal <- Y1$Know.Post + Y1$Skill.Post + Y1$Eff.Post + Y1$Mot.Post + Y1$SuppOth.Post +
Y1$SuppCol.Post + Y1$Eval.Post
Y1$PostTotal <- (Y1$PostTotal/28)*100
cor(Y1$PreTotal,Y1$PostTotal)
plot(Y1$PreTotal,Y1$PostTotal,
ylim=c(0,100), ylab="Post Total Score",
xlim=c(0,100), xlab="Pre Total Score")
text (90,20, "r=0.66")
par(mfrow=c(1,2))
cor(Y1$PIPS.C1,Y1$PreTotal, use="complete.obs")
plot(Y1$PIPS.C1,Y1$PreTotal,
ylim=c(0,100), ylab="PIPS Student-Centered Total Score",
xlim=c(0,100), xlab="Pre Total Score")
text (90,10, "r = 0.35")
cor(Y1$PIPS.C2,Y1$PreTotal, use="complete.obs")
plot(Y1$PIPS.C2,Y1$PreTotal,
ylim=c(0,100), ylab="PIPS Teacher-Centered Total Score",
xlim=c(0,100), xlab="Pre Total Score")
text (90,10, "r = -0.08")
cor(Y1$PIPS.C1,Y1$PostTotal, use="complete.obs")
plot(Y1$PIPS.C1,Y1$PostTotal,
ylim=c(0,100), ylab="PIPS Student-Centered Total Score",
xlim=c(0,100), xlab="Post Total Score")
text (90,10, "r = 0.22")
cor(Y1$PIPS.C2,Y1$PostTotal, use="complete.obs")
plot(Y1$PIPS.C2,Y1$PostTotal,
ylim=c(0,100), ylab="PIPS Teacher-Centered Total Score",
xlim=c(0,100), xlab="Post Total Score")
text (90,10, "r = -0.03")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/survival.r
\name{survival.data.frame}
\alias{survival.data.frame}
\title{Compute survival (data.frame input)}
\usage{
survival.data.frame(
df,
times = NULL,
digits = 2,
followup = FALSE,
label = FALSE
)
}
\arguments{
\item{df}{df}
\item{times}{times}
\item{digits}{digits}
\item{followup}{followup}
\item{label}{label}
}
\description{
Compute survival (data.frame input)
}
\author{
David Hajage
}
|
/man/survival.data.frame.Rd
|
no_license
|
DanChaltiel/biostat2
|
R
| false
| true
| 489
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/survival.r
\name{survival.data.frame}
\alias{survival.data.frame}
\title{Compute survival (data.frame input)}
\usage{
survival.data.frame(
df,
times = NULL,
digits = 2,
followup = FALSE,
label = FALSE
)
}
\arguments{
\item{df}{df}
\item{times}{times}
\item{digits}{digits}
\item{followup}{followup}
\item{label}{label}
}
\description{
Compute survival (data.frame input)
}
\author{
David Hajage
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/des_gs_pois.R
\name{des_gs_pois}
\alias{des_gs_pois}
\title{Design a multi-stage group-sequential multi-arm clinical trial for a
Poisson distributed primary outcome}
\usage{
des_gs_pois(
K = 2,
J = 2,
alpha = 0.025,
beta = 0.1,
lambda0 = 5,
delta1 = 1,
delta0 = 0,
ratio = 1,
power = "marginal",
stopping = "simultaneous",
type = "variable",
fshape = "pocock",
eshape = "pocock",
ffix = -3,
efix = 3,
spacing = (1:J)/J,
integer = FALSE,
summary = FALSE
)
}
\arguments{
\item{K}{A \code{\link{numeric}} indicating the chosen value for
\ifelse{html}{\out{<i>K</i>}}{\eqn{K}}, the (initial) number of experimental
treatment arms. Defaults to \code{2}.}
\item{J}{A \code{\link{numeric}} indicating the chosen value for
\ifelse{html}{\out{<i>J</i>}}{\eqn{J}}, the (maximum) number of allowed
stages. Defaults to \code{2}.}
\item{alpha}{A \code{\link{numeric}} indicating the chosen value for
\ifelse{html}{\out{<i>α</i>}}{\eqn{\alpha}}, the significance level
(family-wise error-rate). Defaults to \code{0.025}.}
\item{beta}{A \code{\link{numeric}} indicating the chosen value for
\ifelse{html}{\out{<i>β</i>}}{\eqn{\beta}}, used in the definition of
the desired power. Defaults to \code{0.1}.}
\item{lambda0}{A \code{\link{numeric}} indicating the chosen value for
\ifelse{html}{\out{<i>λ</i><sub>0</sub>}}{\eqn{\lambda_0}}, the
event rate in the control arm. Defaults to \code{5}.}
\item{delta1}{A \code{\link{numeric}} indicating the chosen value for
\ifelse{html}{\out{<i>δ</i><sub>1</sub>}}{\eqn{\delta_1}}, the
'interesting' treatment effect. Defaults to \code{1}.}
\item{delta0}{A \code{\link{numeric}} indicating the chosen value for
\ifelse{html}{\out{<i>δ</i><sub>0</sub>}}{\eqn{\delta_0}}, the
'uninteresting' treatment effect. Defaults to \code{0}.}
\item{ratio}{A \code{\link{numeric}} indicating the chosen value for
\ifelse{html}{\out{<i>r</i>}}{\eqn{r}}, the stage-wise allocation ratio to
present experimental arms. Defaults to \code{1}.}
\item{power}{A \code{\link{character}} string indicating the chosen type of
power to design the trial for. Can be \code{"disjunctive"} or
\code{"marginal"}. Defaults to \code{"marginal"}.}
\item{stopping}{A \code{\link{character}} string indicating the chosen type
of stopping rule. Can be \code{"separate"} or \code{"simultaneous"}. Defaults
to \code{"simultaneous"}.}
\item{type}{A \code{\link{character}} string indicating the choice for the
stage-wise sample size. Can be \code{"variable"} or \code{"fixed"}. Defaults
to \code{"variable"}.}
\item{fshape}{A \code{\link{character}} string indicating the choice for the
futility (lower) stopping boundaries. Can be any of \code{"fixed"},
\code{"obf"}, \code{"pocock"}, and \code{"triangular"}. Defaults to
\code{"pocock"}.}
\item{eshape}{A \code{\link{character}} string indicating the choice for the
efficacy (upper) stopping boundaries. Can be any of \code{"fixed"},
\code{"obf"}, \code{"pocock"}, and \code{"triangular"}. Defaults to
\code{"pocock"}.}
\item{ffix}{A \code{\link{numeric}} indicating the chosen value for the fixed
interim futility bounds. Only used when \code{fshape = "fixed"}. Defaults to
\code{"-3"}.}
\item{efix}{A \code{\link{numeric}} indicating the chosen value for the fixed
interim efficacy bounds. Only used when \code{eshape = "fixed"}. Defaults to
\code{"3"}.}
\item{spacing}{A \code{\link{numeric}} \code{\link{vector}} indicating the
chosen spacing of the interim analyses in terms of the proportion of the
maximal possible sample size. It must contain strictly increasing values,
with final element equal to \code{1}. Defaults to
\code{(1:J)/J} (i.e., to equally spaced analyses).}
\item{integer}{A \code{\link{logical}} variable indicating whether the
computed possible sample sizes required in each arm in each stage should be
forced to be whole numbers. Defaults to \code{FALSE}. WARNING: If you set
\code{integer = TRUE} and \code{ratio != 1}, obscure results can occur due to
difficulties in identifying a suitable whole number sample size that meets
the allocation ratio requirement.}
\item{summary}{A \code{\link{logical}} variable indicating whether a summary
of the function's progress should be printed to the console. Defaults to
\code{FALSE}.}
}
\value{
A \code{\link{list}}, with additional class
\code{"multiarm_des_gs_pois"}, containing the following elements:
\itemize{
\item A \code{\link{tibble}} in the slot \code{$opchar} summarising the
operating characteristics of the identified design.
\item A \code{\link{tibble}} in the slot \code{$pmf_N} summarising the
probability mass function of the random required sample size under key
scenarios.
\item A \code{\link{numeric}} \code{\link{vector}} in the slot \code{$e}
specifying \ifelse{html}{\out{<b><i>e</i></b>}}{\eqn{\bold{e}}}, the trial's
efficacy (upper) stopping boundaries.
\item A \code{\link{numeric}} \code{\link{vector}} in the slot \code{$f}
specifying \ifelse{html}{\out{<b><i>f</i></b>}}{\eqn{\bold{f}}}, the trial's
futility (lower) stopping boundaries.
\item A \code{\link{numeric}} in the slot \code{$maxN} specifying
\ifelse{html}{\out{max <i>N</i>}}{max \eqn{N}}, the trial's maximum required
sample size.
\item A \code{\link{numeric}} in the slot \code{$n_factor}, for internal use
in other functions.
\item A \code{\link{numeric}} in the slot \code{$n1} specifying
\ifelse{html}{\out{<i>n</i><sub>1</sub>}}{\eqn{n_1}}, the total sample size
required in stage one of the trial.
\item A \code{\link{numeric}} in the slot \code{$n10} specifying
\ifelse{html}{\out{<i>n</i><sub>10</sub>}}{\eqn{n_{10}}}, the sample size
required in the control arm in stage one of the trial.
\item Each of the input variables.
}
}
\description{
\code{des_gs_pois()} determines multi-stage group-sequential multi-arm
clinical trial designs assuming the primary outcome variable is Poisson
distributed. It computes required design components and returns information
on key operating characteristics.
}
\examples{
# The design for the default parameters
des <- des_gs_pois()
}
\seealso{
\code{\link{build_gs_pois}}, \code{\link{gui}},
\code{\link{opchar_gs_pois}}, \code{\link{plot.multiarm_des_gs_pois}},
\code{\link{sim_gs_pois}}.
}
|
/man/des_gs_pois.Rd
|
permissive
|
mjg211/multiarm
|
R
| false
| true
| 6,300
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/des_gs_pois.R
\name{des_gs_pois}
\alias{des_gs_pois}
\title{Design a multi-stage group-sequential multi-arm clinical trial for a
Poisson distributed primary outcome}
\usage{
des_gs_pois(
K = 2,
J = 2,
alpha = 0.025,
beta = 0.1,
lambda0 = 5,
delta1 = 1,
delta0 = 0,
ratio = 1,
power = "marginal",
stopping = "simultaneous",
type = "variable",
fshape = "pocock",
eshape = "pocock",
ffix = -3,
efix = 3,
spacing = (1:J)/J,
integer = FALSE,
summary = FALSE
)
}
\arguments{
\item{K}{A \code{\link{numeric}} indicating the chosen value for
\ifelse{html}{\out{<i>K</i>}}{\eqn{K}}, the (initial) number of experimental
treatment arms. Defaults to \code{2}.}
\item{J}{A \code{\link{numeric}} indicating the chosen value for
\ifelse{html}{\out{<i>J</i>}}{\eqn{J}}, the (maximum) number of allowed
stages. Defaults to \code{2}.}
\item{alpha}{A \code{\link{numeric}} indicating the chosen value for
\ifelse{html}{\out{<i>α</i>}}{\eqn{\alpha}}, the significance level
(family-wise error-rate). Defaults to \code{0.025}.}
\item{beta}{A \code{\link{numeric}} indicating the chosen value for
\ifelse{html}{\out{<i>β</i>}}{\eqn{\beta}}, used in the definition of
the desired power. Defaults to \code{0.1}.}
\item{lambda0}{A \code{\link{numeric}} indicating the chosen value for
\ifelse{html}{\out{<i>λ</i><sub>0</sub>}}{\eqn{\lambda_0}}, the
event rate in the control arm. Defaults to \code{5}.}
\item{delta1}{A \code{\link{numeric}} indicating the chosen value for
\ifelse{html}{\out{<i>δ</i><sub>1</sub>}}{\eqn{\delta_1}}, the
'interesting' treatment effect. Defaults to \code{1}.}
\item{delta0}{A \code{\link{numeric}} indicating the chosen value for
\ifelse{html}{\out{<i>δ</i><sub>0</sub>}}{\eqn{\delta_0}}, the
'uninteresting' treatment effect. Defaults to \code{0}.}
\item{ratio}{A \code{\link{numeric}} indicating the chosen value for
\ifelse{html}{\out{<i>r</i>}}{\eqn{r}}, the stage-wise allocation ratio to
present experimental arms. Defaults to \code{1}.}
\item{power}{A \code{\link{character}} string indicating the chosen type of
power to design the trial for. Can be \code{"disjunctive"} or
\code{"marginal"}. Defaults to \code{"marginal"}.}
\item{stopping}{A \code{\link{character}} string indicating the chosen type
of stopping rule. Can be \code{"separate"} or \code{"simultaneous"}. Defaults
to \code{"simultaneous"}.}
\item{type}{A \code{\link{character}} string indicating the choice for the
stage-wise sample size. Can be \code{"variable"} or \code{"fixed"}. Defaults
to \code{"variable"}.}
\item{fshape}{A \code{\link{character}} string indicating the choice for the
futility (lower) stopping boundaries. Can be any of \code{"fixed"},
\code{"obf"}, \code{"pocock"}, and \code{"triangular"}. Defaults to
\code{"pocock"}.}
\item{eshape}{A \code{\link{character}} string indicating the choice for the
efficacy (upper) stopping boundaries. Can be any of \code{"fixed"},
\code{"obf"}, \code{"pocock"}, and \code{"triangular"}. Defaults to
\code{"pocock"}.}
\item{ffix}{A \code{\link{numeric}} indicating the chosen value for the fixed
interim futility bounds. Only used when \code{fshape = "fixed"}. Defaults to
\code{"-3"}.}
\item{efix}{A \code{\link{numeric}} indicating the chosen value for the fixed
interim efficacy bounds. Only used when \code{eshape = "fixed"}. Defaults to
\code{"3"}.}
\item{spacing}{A \code{\link{numeric}} \code{\link{vector}} indicating the
chosen spacing of the interim analyses in terms of the proportion of the
maximal possible sample size. It must contain strictly increasing values,
with final element equal to \code{1}. Defaults to
\code{(1:J)/J} (i.e., to equally spaced analyses).}
\item{integer}{A \code{\link{logical}} variable indicating whether the
computed possible sample sizes required in each arm in each stage should be
forced to be whole numbers. Defaults to \code{FALSE}. WARNING: If you set
\code{integer = TRUE} and \code{ratio != 1}, obscure results can occur due to
difficulties in identifying a suitable whole number sample size that meets
the allocation ratio requirement.}
\item{summary}{A \code{\link{logical}} variable indicating whether a summary
of the function's progress should be printed to the console. Defaults to
\code{FALSE}.}
}
\value{
A \code{\link{list}}, with additional class
\code{"multiarm_des_gs_pois"}, containing the following elements:
\itemize{
\item A \code{\link{tibble}} in the slot \code{$opchar} summarising the
operating characteristics of the identified design.
\item A \code{\link{tibble}} in the slot \code{$pmf_N} summarising the
probability mass function of the random required sample size under key
scenarios.
\item A \code{\link{numeric}} \code{\link{vector}} in the slot \code{$e}
specifying \ifelse{html}{\out{<b><i>e</i></b>}}{\eqn{\bold{e}}}, the trial's
efficacy (upper) stopping boundaries.
\item A \code{\link{numeric}} \code{\link{vector}} in the slot \code{$f}
specifying \ifelse{html}{\out{<b><i>f</i></b>}}{\eqn{\bold{f}}}, the trial's
futility (lower) stopping boundaries.
\item A \code{\link{numeric}} in the slot \code{$maxN} specifying
\ifelse{html}{\out{max <i>N</i>}}{max \eqn{N}}, the trial's maximum required
sample size.
\item A \code{\link{numeric}} in the slot \code{$n_factor}, for internal use
in other functions.
\item A \code{\link{numeric}} in the slot \code{$n1} specifying
\ifelse{html}{\out{<i>n</i><sub>1</sub>}}{\eqn{n_1}}, the total sample size
required in stage one of the trial.
\item A \code{\link{numeric}} in the slot \code{$n10} specifying
\ifelse{html}{\out{<i>n</i><sub>10</sub>}}{\eqn{n_{10}}}, the sample size
required in the control arm in stage one of the trial.
\item Each of the input variables.
}
}
\description{
\code{des_gs_pois()} determines multi-stage group-sequential multi-arm
clinical trial designs assuming the primary outcome variable is Poisson
distributed. It computes required design components and returns information
on key operating characteristics.
}
\examples{
# The design for the default parameters
des <- des_gs_pois()
}
\seealso{
\code{\link{build_gs_pois}}, \code{\link{gui}},
\code{\link{opchar_gs_pois}}, \code{\link{plot.multiarm_des_gs_pois}},
\code{\link{sim_gs_pois}}.
}
|
bd2ad56fe8cfaf44ad63efcc21168545 ctrl.e#1.a#3.E#134.A#48.c#.w#9.s#42.asp.qdimacs 9829 28942
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1+A1/Database/Amendola-Ricca-Truszczynski/selection-hard/ctrl.e#1.a#3.E#134.A#48.c#.w#9.s#42.asp/ctrl.e#1.a#3.E#134.A#48.c#.w#9.s#42.asp.R
|
no_license
|
arey0pushpa/dcnf-autarky
|
R
| false
| false
| 91
|
r
|
bd2ad56fe8cfaf44ad63efcc21168545 ctrl.e#1.a#3.E#134.A#48.c#.w#9.s#42.asp.qdimacs 9829 28942
|
# R code for DESeq2 analysis
#
# Author: Ming Wang
# Date: 2018-09-30
#
# DESeq2: v1.18.1
#' run DESeq2 for matrix data
#'
#' @param file Path to the matrix file, featureCount output
#'
#' @param species Name of the species, dm3, dm6, mm9, mm10, hh19, default: dm3
#'
#' @param outdir Path to directory to save results, default: cwd
#'
#' @export
#'
#' @describeIn Suppose multiple BAM files in featureCounts output
#' require at least four samples, name of file endswith "_rep[1-3]"
#'
DESeq2_for_featureCounts <- function(x, organism = "dm3", outdir = "./",
pvalue_cutoff = 0.05) {
# read counts
if(inherits(x, "data.frame")){
# sample name in colname
# gene name in rowname
df_count <- x
} else {
df_count <- featureCountsReader(x, organism)
}
# fetch replicate samples
col_reps <- grep("_rep[1-9]", names(df_count), ignore.case = TRUE)
df_reps <- df_count[, col_reps]
if (length(df_reps) < 4) {
stop("less than 4 BAM files of replicate samples")
}
# make pairs: Control vs experiment
tags <- gsub("\\.\\d$|\\_\\d$", "", names(df_reps))
g <- unique(gsub("_rep[1-9]$", "", tags, ignore.case = TRUE))
g_pairs <- combn(g, 2)
# DE analysis
for (i in seq_len(ncol(g_pairs))) {
smp <- g_pairs[, i]
rep_names <- names(df_reps)
g1 <- rep_names[startsWith(rep_names, smp[1])]
g2 <- rep_names[startsWith(rep_names, smp[2])]
smp_line <- paste(smp, collapse = "_vs_")
smp_dir <- file.path(outdir, smp_line)
if (! dir.exists(smp_dir)) {
dir.create(smp_dir, recursive = TRUE, mode = "0750")
}
# make countdata
ma <- as.matrix(df_reps[, c(g1, g2)])
# make coldata
coldata <- design_example(colnames(ma), smp)
# run DESeq2
deseq2_run(ma, coldata, smp_dir, pvalue_cutoff)
##------------------------------------------------------------------------##
# rename gene id
fs <- file.path(smp_dir, "transcripts_deseq2.csv")
fsFix <- file.path(smp_dir, "transcripts_deseq2.fix.xls")
## save table
fName = paste("genelist", organism, "rda", sep = ".")
f = system.file("extdata", fName, package = "goldclipReport")
load(f) # genelist
## read data
df <- DESeq2_csv2df(fs)
df2 <- dplyr::mutate(df, id = as.character(id)) %>%
dplyr::mutate(id = plyr::mapvalues(id, genelist$gene_id, genelist$gene_name, FALSE)) %>%
dplyr::filter(! is.na(padj)) %>%
dplyr::arrange(padj)
readr::write_delim(df2, fsFix, delim = "\t", col_names = TRUE)
##----------------------------------------------------------------------------##
}
}
#' read featureCount matrix file
#'
#' @param x Path the matrix file
#'
#' @param normalizeTo1M Logical value, whether or not normalize the counts of
#' each BAM file to 1 million reads. default: FALSE
#'
#' @import dplyr
#' @import readr
#' @import tidyr
#' @import tibble
#'
#'
#' @export
#'
featureCountsReader <- function(x, organism = "dm3", normalizeTo1M = FALSE,
fixZero = 0) {
# parse file
df_exp <- readr::read_delim(x, "\t",
col_types = readr::cols(),
comment = "#") %>%
dplyr::rename(id = Geneid) %>% # "id" column
dplyr::select(-(2:6)) %>% # remove extra columns
as.data.frame()
# convert wide to long table
df_exp2 <- tidyr::gather(df_exp, key = sample, value = count, -id)
# fix Zero values
if (fixZero > 0) {
df_exp2$count[df_exp2$count == 0] <- fixZero
}
# normalize to 1 Million reads (RPM)
if (isTRUE(normalizeTo1M)) {
dfNorm <- dplyr::group_by(df_exp2, sample) %>%
mutate(norm = count / sum(count) * 1e6) %>%
dplyr::rename(value = norm) %>%
dplyr::select(-count)
} else {
dfNorm <- dplyr::rename(df_exp2, value = count)
}
# convert count to int (--fraction option in featureCounts)
dfNorm$value <- round(dfNorm$value, 0)
# output
dfOut <- dplyr::ungroup(dfNorm) %>%
dplyr::select(id, sample, value) %>%
tidyr::spread(key = sample, value = value) %>%
as.data.frame() %>%
tibble::column_to_rownames("id")
# reorder columns
origin_header <- names(df_exp)[-1]
dfOut <- dfOut[, origin_header]
# rename colnames
smps <- basename(colnames(dfOut))
ext <- str_common(smps, suffix = TRUE)
colnames(dfOut) <- gsub(ext, "", smps)
# rename rownames
genelist <- get_genelist(organism)
stopifnot(all(c("gene_id", "gene_name") %in% names(genelist)))
# rownames(dfOut) <- plyr::mapvalues(rownames(dfOut),
# from = genelist$gene_id,
# to = genelist$gene_name,
# warn_missing = FALSE)
# # remove temp objects
rm(df_exp)
rm(df_exp2)
rm(dfNorm)
# report
return(dfOut)
}
#' run DESeq2 analysis
#'
#' @param ma count data in matrix
#'
#' @param coldata experiment design, in data.frame format
#'
#' @param outdir Directory to save the results
#'
#' @param pvalue_cutoff Cutoff to filt the records, padj for DESeq2 output,
#' default: 0.05
#'
#'
#' @import DESeq2
#' @import ggplot2
#' @import pheatmap
#' @import RColorBrewer
#' @import SummarizedExperiment
#'
#' @export
#'
deseq2_run <- function(ma, coldata, outdir, pvalue_cutoff = 0.05) {
stopifnot(is.matrix(ma))
# prepare files
de_count <- file.path(outdir, "transcripts_deseq2.csv")
de_plots <- file.path(outdir, c("figure1_MA_plot.png",
"figure2_MA_plot_LFC.png",
"figure3_sample_counts.png",
"figure4_PCA_plot.png",
"figure5_dispersion.png",
"figure6_sample_distance.png",
"figure7_top_genes.png",
"figure8_volcano.png"))
# prepare experiment design
countdata <- ma
tags <- gsub("\\.\\d$|\\_\\d$", "", colnames(countdata))
smp <- unique(gsub("_rep[1-9]$", "", tags, ignore.case = TRUE))
coldata <- design_example(ids = colnames(countdata), conditions = smp)
# load matrix
dds <- DESeq2::DESeqDataSetFromMatrix(countData = countdata, colData = coldata,
design = ~condition)
# Run the DESeq pipeline
dds <- DESeq2::DESeq(dds)
# Get differential expression results
res <- DESeq2::results(dds)
resLFC <- DESeq2::lfcShrink(dds, coef = 2, res = res)
rld <- DESeq2::rlogTransformation(dds, blind = FALSE)
ntd <- DESeq2::normTransform(dds)
# Order by adjusted p-value
res <- res[order(res$padj), ]
resSig <- subset(as.data.frame(res), padj < pvalue_cutoff)
# Normalied counts
ncount <- DESeq2::counts(dds, normalized = TRUE)
## Merge with normalized count data
resdata <- merge(as.data.frame(ncount), as.data.frame(res), by = "row.names",
sort = FALSE)
resdata <- resdata[order(resdata$padj), ]
names(resdata)[1] <- "Gene"
# save data to file
write.csv(resdata, de_count, quote = FALSE, row.names = TRUE)
# MA
png(de_plots[1], width = 1200, height = 1200, res = 300)
DESeq2::plotMA(res, ylim = c(-2, 2))
dev.off()
# MA for LFC
png(de_plots[2], width = 1200, height = 1200, res = 300)
DESeq2::plotMA(resLFC, ylim = c(-2, 2))
dev.off()
# Sample counts
png(de_plots[3], width = 1200, height = 1200, res = 300)
DESeq2::plotCounts(dds, gene = which.min(res$padj), intgroup = "condition")
dev.off()
# PCA
png(de_plots[4], width = 2000, height = 2000, res = 300)
print(DESeq2::plotPCA(rld, intgroup = c("condition")))
dev.off()
# Dispersion
png(de_plots[5], width = 1500, height = 1500, res = 300)
DESeq2::plotDispEsts(dds)
dev.off()
# Sample distance
png(de_plots[6], width = 1000, height = 1000, res = 300)
sampleDists <- dist(t(SummarizedExperiment::assay(rld)))
sampleDistMatrix <- as.matrix(sampleDists)
rownames(sampleDistMatrix) <- rld$condition
colnames(sampleDistMatrix) <- NULL
colors <- colorRampPalette( rev(RColorBrewer::brewer.pal(9, "Blues")) )(255)
pheatmap::pheatmap(sampleDistMatrix,
clustering_distance_rows = sampleDists,
clustering_distance_cols = sampleDists,
col = colors)
dev.off()
# Top genes
png(de_plots[7], width = 1200, height = 1200, res = 300)
select <- order(rowMeans(DESeq2::counts(dds, normalized = TRUE)),
decreasing = TRUE)[1:30]
ma <- SummarizedExperiment::assay(ntd)[select, ]
df <- as.data.frame(SummarizedExperiment::colData(dds)[, c("condition")])
colnames(df) <- "condition"
rownames(df) <- colnames(ma)
pheatmap::pheatmap(SummarizedExperiment::assay(ntd)[select,],
cluster_rows = FALSE,
cluster_cols = FALSE,
show_rownames = FALSE,
annotation_col = df)
dev.off()
}
#' return the longest common string
#'
#' @param list A list of strings
#'
#' @param suffix A logical value, if TRUE, return the longest suffix of strings
#' default: FALSE
#'
#' @export
#'
str_common <- function(list, suffix = FALSE) {
# return the longest common string between list of strings
# default: prefix
if (isTRUE(suffix)) {
list <- stringi::stri_reverse(list)
}
# split string
dl <- lapply(list, function(s) {
unlist(strsplit(s, ''))
})
# min len
len_min <- min(unlist(lapply(dl, length)))
# check common
cs = c()
for (i in seq_len(len_min)) {
s <- unique(unlist(lapply(dl, function(ds){ds[i]})))
if (length(s) > 1){
break
}
cs <- c(cs, s)
}
if (isTRUE(suffix)) {
cs <- rev(cs)
}
# output
ss <- paste0(cs, collapse = "")
return(ss)
}
#' create experiment design for DESeq2 analysis
#'
#' @param ids A set of variables, the name of samples, default: letters[1:4]
#'
#' @param conditions A set of variables, the levels for design, default:
#' letters[1:2]
#'
#' @export
design_example <- function(ids = letters[1:4], conditions = letters[1:2]) {
rep = length(ids) / length(conditions)
if ( floor(rep) < rep) {
warning("Number of ids not divided by conditinos")
}
df <- data.frame(condition = factor(rep(conditions, each = rep),
levels = conditions))
rownames(df) <- ids
return(df)
}
#' import matrix to DESeq2
#'
#' @param df A data.frame of count matrix
#'
#' @param coldata A data.frame of design
#'
#' @export
#'
DESeq2_data_from_matrix <- function(df, coldata) {
stopifnot(is.data.frame(df))
if(! "condition" %in% colnames(coldata)) {
print(colnames(coldata))
stop("condition - not found in coldata")
}
# subset of dataframe
dfx <- dplyr::select(df, rownames(coldata))
if (length(dfx) != nrow(coldata)) {
stop("data.frame and coldata not match")
}
# load DESeq2
dds <- DESeq2::DESeqDataSetFromMatrix(df, coldata, ~condition)
dds <- DESeq2::DESeq(dds)
return(dds)
}
#' fetch gene id and name from Ensembl
#' using biomaRt package
#'
#' @param organism A character of organism, default: dm3
#'
#' @import biomaRt
#'
#' @export
#'
#' @description to-do, support all avilable organism
#' gene_id, gene_name/external_gene_id
#'
#' load from package data,
#' optional: download from ensembl, update
get_genelist <- function(organism = "dm3") {
# library(biomaRt)
print(organism)
f_name = paste("genelist", organism, "rda", sep = ".")
f = system.file("extdata", f_name, package = "goldclipReport")
if (file.exists(f)) {
print(f)
load(f) # genelist
return(genelist)
} else {
stop('genelist file not exists')
}
}
#' cal mean of replicates
#' default, 2 replicates for each condition
#'
#' @param data A data fraem of DESeq2 output, first 5 columns:
#' <id> <a-rep1> <a-rep2> <b-rep1> <b-rep2> ...
#'
#' @export
DESeq2_add_mean <- function(data){
# only for DESeq2 output csv
# insert to: col-2, col-3
stopifnot(is.data.frame(data))
ctl_cols <- colnames(data)[2:3]
tre_cols <- colnames(data)[4:5]
ctl_name <- str_common(ctl_cols)
tre_name <- str_common(tre_cols)
ctl_name <- gsub("_R|_rep", "", ctl_name, ignore.case = TRUE)
tre_name <- gsub("_R|_rep", "", tre_name, ignore.case = TRUE)
# calculate mean
df_mean <- data.frame(id = data$id,
ctl = rowMeans(data[, 2:3]),
tre = rowMeans(data[, 4:5]))
colnames(df_mean) <- c("id", ctl_name, tre_name)
# merge
df <- merge(df_mean, data, by = "id")
return(df)
}
#' read output of DESeq2_run function
#' csv format
#'
#' @param file Path to csv file
#'
#' @import readr
#' @import dplyr
#'
#' @export
DESeq2_csv2df <- function(x) {
# read DESeq2 csv file
df <- readr::read_csv(x, col_types = readr::cols())
# colnames(df)[1] <- 'id'
df[, 1] <- NULL # remove the first column
colnames(df)[1] <- "id"
df$id <- as.character(df$id)
# add mean columns
df2 <- DESeq2_add_mean(df)
return(df2)
}
#' read output of DESeq2_run function
#' xls format
#'
#' @param x Path to xls file
#'
#' @import readr
#' @import dplyr
#'
#' @export
DESeq2_xls2df <- function(x) {
# read DESeq2 csv file
#df <- readr::read_csv(x, col_types = readr::cols())
df <- readr::read_delim(x, "\t", col_types = readr::cols())
return(df)
}
# de_deseq2_plot <- function(df, withTE = FALSE, add_sigNames = TRUE,
# extra_ids = c("CG9754", "nxf2", "piwi")) {
# s1 <- colnames(df)[2:3]
# df[, s1[1]] <- log10(df[, s1[1]])
# df[, s1[2]] <- log10(df[, s1[2]])
# # gene list
# dfGene <- filtGeneId(df, gene = TRUE, te = FALSE)
# # filter > 0
# # dfGene <- dfGene[is.finite(dfGene$shattp2) & is.finite(dfGene$shNxf2), ]
# dfGene <- dplyr::filter_(dfGene, s1[1] > 0 & s1[2] > 0)
# p1 <- de_scatter(dfGene, s1[1], s1[2],
# add_sigNames = add_sigNames,
# extra_ids = extra_ids)
# # p2 <- de_ma(dfGene, add_sigNames = add_sigNames, extra_ids = extra_ids)
# # print(p1)
# # print(p2)
# # pout <- list(p1, p2)
# pout <- list(p1)
#
# # te list
# if (isTRUE(withTE)) {
# dfTE <- filtGeneId(df, gene = FALSE, te = TRUE)
# p3 <- de_scatter(dfTE, s1[1], s1[2], extra_ids = extra_ids)
# pout <- c(pout, p3)
# # print(p3)
# }
# return(pout)
# }
#
#
#
# de_scatter <- function(df, xName, yName, add_sigNames = TRUE,
# extra_ids = c("piwi", "CG9754"),
# tt = "Differential expression", cutoff = 0.05) {
# library(ggplot2)
# library(ggrepel)
# # at most, 10 labels
# dfSig <- dplyr::filter(df, padj <= cutoff) %>%
# dplyr::arrange(padj)
# dfEx <- dplyr::filter(df, id %in% extra_ids)
# if (isTRUE(add_sigNames)) {
# dfLabel <- rbind(dfEx, dfSig)
# } else {
# dfLabel <- dfEx
# }
# if (nrow(dfLabel) > 3) dfLabel <- dfLabel[1:3, ]
# # tt = paste(xName, yName, sep = "_")
# tt = glue::glue("DE-analysis: {xName} vs {yName}")
#
# p <- ggplot(df, aes_string(xName, yName)) +
# geom_abline(slope = 1, intercept = 0, color = "grey10") +
# geom_abline(slope = 1, intercept = log10(2),
# color = "grey30", linetype = 2) +
# geom_abline(slope = 1, intercept = -log10(2),
# color = "grey30", linetype = 2) +
# geom_point(size = 1.3,
# color = ifelse(is.na(df$padj), "grey20",
# ifelse(df$padj <= cutoff, "red", "grey20"))) +
# # ggtitle(tt) +
# xlab(xName) + ylab(yName) +
# scale_x_continuous(limits = c(0, 6),
# breaks = seq(0, 6, 1),
# labels = seq(0, 6, 1),
# expand = c(0, 0, 0, 0)) +
# scale_y_continuous(limits = c(0, 6),
# breaks = seq(0, 6, 1),
# labels = seq(0, 6, 1),
# expand = c(0, 0, 0, 0)) +
# theme_classic() +
# theme(panel.border = element_rect(color = "black", fill = NA, size = .7),
# plot.title = element_text(color = "black", hjust = .5, size = 14),
# axis.title = element_text(color = "black", size = 12),
# axis.text = element_text(color = "black", size = 10))
# if (nrow(dfLabel) > 0) {
# p <- p + geom_text_repel(
# data = dfLabel,
# label = dfLabel$id,
# box.padding = 1,
# segment.size = 0.4,
# segment.color = "grey50",
# direction = "both"
# )
# }
# return(p)
# }
#
# matrix_DESeq2_batch <- function(fn, genelist, outDir) {
# # only for featureCount output
# stopifnot(file.exists(fn))
# stopifnot(is.data.frame(genelist))
# stopifnot(all(c("gene_id", "gene_name") %in% names(genelist)))
# df <- fc2df(fn)
# df <- dplyr::mutate(df,
# id = plyr::mapvalues(id,
# genelist$gene_id,
# genelist$gene_name,
# warn_missing = FALSE))
# # recursive pair
# smp_ids <- names(df)[-1]
# smp_ids <- gsub("_rep\\d+", "", smp_ids)
# smp_ids <- unique(smp_ids)
# # combinations
# cc <- combn(smp_ids, 2)
# for (i in seq(ncol(cc))) {
# smp <- cc[, i]
# smp_line <- paste(smp, collapse = "_vs_")
# g1 <- stringr::str_which(names(df), smp[1])
# g2 <- stringr::str_which(names(df), smp[2])
# ## subset of data.frame
# dfx <- dplyr::select_(df, .dots = c(g1, g2)) %>% as.data.frame()
# rownames(dfx) <- df$id
# ## make coldata
# coldata <- data.frame(row.names = colnames(dfx),
# condition = rep(c("wt", "mut"), each = 2))
# dds <- load_matrix_to_DESeq2(dfx, coldata)
# ## out_dir
# dfDir <- file.path(outDir, smp_line)
# if (! dir.exists(dfDir)) {
# dir.create(dfDir, recursive = TRUE)
# }
# oldDir <- getwd()
# setwd(dfDir)
# run_DESeq2(dds)
# setwd(oldDir)
# print(smp_line)
# }
# }
#
#
# kallistoToDESeq2Batch <- function(fn, outDir, tx2gene = NULL,
# gene_level = FALSE) {
# # only for kallisto output
# # directory structure
# # suppose 2 replicates for each sample
# # path-to-kallisto/prjname/abundance.h5
# fn <- sort(fn)
# # combination
# smp_ids <- gsub("RNAseq_|_rep\\d+", "", basename(dirname(fn)))
# smp_ids <- unique(smp_ids)
# # combinations
# cc <- combn(smp_ids, 2)
# for (i in seq(ncol(cc))) {
# smp <- cc[, i]
# smp_line <- paste(smp, collapse = "_vs_")
# g1 <- stringr::str_which(fn, smp[1])
# g2 <- stringr::str_which(fn, smp[2])
# # sub files
# fx <- fn[c(g1, g2)]
# dds <- kallistoToDESeq2(fx, tx2gene, gene_level = TRUE)
# ## out_dir
# dfDir <- file.path(outDir, smp_line)
# if (! dir.exists(dfDir)) {
# dir.create(dfDir, recursive = TRUE)
# }
# oldDir <- getwd()
# setwd(dfDir)
# run_DESeq2(dds)
# setwd(oldDir)
# print(smp_line)
# }
# }
#
# tetoolkit2DESeq2 <- function(fn) {
# # f <- "../tetranscript/dsCG9754_vs_dsPiwi/dsCG9754_vs_dsPiwi.cntTable"
# df <- readr::read_delim(fn, "\t", quote = "\"", col_types = cols())
# colnames(df)[1] <- 'id'
# dfx <- df[, -1] %>% as.data.frame()
# n <- gsub(".Aligned.sortedByCoord.out.bam|.C|.T|RNAseq_", "", basename(names(dfx)))
# rownames(dfx) <- df$id
# colnames(dfx) <- n
# coldata <- data.frame(row.names = colnames(dfx),
# condition = rep(c("mut", "wt"), each = 2))
# dds <- load_matrix_to_DESeq2(dfx, coldata)
# ## out_dir
# dfDir <- dirname(fn)
# oldDir <- getwd()
# setwd(dfDir)
# run_DESeq2(dds)
# setwd(oldDir)
# print(basename(fn))
# }
#
# fc2df <- function(fn, normalize = FALSE,
# convertLog10 = FALSE,
# convertInt = TRUE) {
# # parse featureCounts
# # convert count to INT
# # convert count to log10
# # normalize, RPM
# library(readr)
# library(dplyr)
# library(tidyr)
# df <- read_delim(fn, "\t", col_types = cols(), comment = "#") %>%
# dplyr::rename(id = Geneid)
# df1 <- df[, -c(2:6)]
#
# # normalization
# # RPM: reads per million
# if (isTRUE(normalize)) {
# df2 <- gather(df1, sample, count, -1) %>%
# dplyr::group_by(sample) %>%
# mutate(norm = count / sum(count) * 1e6) %>%
# dplyr::rename(value = norm)
# } else {
# df2 <- gather(df1, sample, count, -1) %>%
# dplyr::rename(value = count)
# }
# #
# if (isTRUE(convertLog10)) {
# df2 <- dplyr::mutate(df2, value = log10(value))
# }
#
# if(isTRUE(convertInt)) {
# df2 <- dplyr::mutate(df2, value = round(value, 0))
# }
#
# df3 <- ungroup(df2) %>%
# dplyr::select(id, sample, value) %>%
# mutate(sample = gsub("RNAseq_|.piRNA_clusters|.unique|.merged|.Aligned|.sortedByCoord|.out|.bam", "", basename(sample))) %>%
# spread(sample, value)
# return(df3)
# }
#
#
# #' import kallisto to DESeq2
# #'
# #' @param kal_files A set of files
# #'
# #' @param coldata A data.frame of experiment design
# #'
# #'
# #' @import tximport
# #'
# #' @export
# #'
# DESeq2_data_from_kallisto <- function(kal_files, coldata, t2g) {
#
# library(tximport)
# kal_files <- kal_files[file.exists(kal_files)]
# # transcript-level
# txi <- tximport(files = kal_files, type = "kallisto", txOut = TRUE,
# dropInfReps = TRUE)
# # # gene-level
# # txi <- tximport(files = kal_files, type = "kallisto", txOut = FALSE,
# # tx2gene = t2g, dropInfReps = TRUE)
# rownames(txi$counts) <- as.character(rownames(txi$counts))
# # checkout dataset
# if(length(kal_files) != nrow(coldata)) {
# stop('kallisto file not match coldata')
# }
# if(! "condition" %in% colnames(coldata)) {
# print(colnames(coldata))
# stop("condition - not found in coldata")
# }
# dds <- DESeqDataSetFromTximport(txi = txi, colData = coldata,
# design = ~condition)
# dds <- DESeq(dds)
# return(dds)
# }
#
# filtGeneId <- function(df, gene = TRUE, te = TRUE) {
# # filt output of TEtranscripts
# if (isTRUE(gene) & isTRUE(te)) {
# return(df) # do not filt
# } else if (isTRUE(gene)) {
# # id, do not contain ":"
# gids <- grepl(":", df$id)
# return(df[! gids, ])
# } else if(isTRUE(te)) {
# # id, only contain ":"
# teids <- grep(":", df$id)
# return(df[teids, ])
# } else {
# warning("do not return records")
# }
# }
#' run regular DESeq2 analysis
#'
#' @param dds A variable of dds
#'
#'
#' @import ggplot2
#' @import DESeq2
#' @import pheatmap
#' @import RColorBrewer
#' @import SummarizedExperiment
#'
#' @export
#'
# DESeq2_run <- function(dds, path_out, pval_cutoff = 0.05) {
#
# # prepare files
# file_count <- file.path(path_out, "transcripts_deseq2.csv")
# file_plot <- file.path(path_out, c("figure1_MA_plot.png",
# "figure2_MA_plot_LFC.png",
# "figure3_sample_counts.png",
# "figure4_PCA_plot.png",
# "figure5_dispersion.png",
# "figure6_sample_distance.png",
# "figure7_top_genes.png",
# "figure8_volcano.png"))
# #
# # # prepare variables
# # library(DESeq2, quietly = TRUE)
# # library("ggplot2")
# # library(pheatmap)
# # library("RColorBrewer")
#
# # start
# res <- DESeq2::results(dds)
# resOrdered <- res[order(res$padj), ]
# # print(head(resOrdered))
# # resSig <- dplyr::filter(resOrdered, padj < pval_cutoff)
# resSig <- subset(as.data.frame(resOrdered), padj < pval_cutoff)
# resLFC <- DESeq2::lfcShrink(dds, coef = 2, res = res)
# rld <- DESeq2::rlog(dds, blind = FALSE)
# #vsd <- DESeq2::varianceStabilizingTransformation(dds, blind = FALSE)
# #vsd.fast <- DESeq2::vst(dds, blind = FALSE)
# ntd <- DESeq2::normTransform(dds)
# #add normalized counts
# ncount <- DESeq2::counts(dds, normalized = TRUE)
# exp <- cbind(as.data.frame(ncount), as.data.frame(res))
# expOrdered <- exp[order(exp$padj), ]
#
# # save data to file
# write.csv(expOrdered, file_count, quote = FALSE, row.names = TRUE)
#
# # make plots
# # MA plot
# png(file_plot[1], width = 1200, height = 1200, res = 300)
# DESeq2::plotMA(res, ylim = c(-2, 2))
# dev.off()
#
# png(file_plot[2], width = 1200, height = 1200, res = 300)
# DESeq2::plotMA(resLFC, ylim = c(-2, 2))
# dev.off()
#
# png(file_plot[3], width = 1200, height = 1200, res = 300)
# DESeq2::plotCounts(dds, gene = which.min(res$padj), intgroup = "condition")
# dev.off()
#
# # PCA
# png(file_plot[4], width = 2000, height = 2000, res = 300)
# print(DESeq2::plotPCA(rld, intgroup = c("condition")))
# dev.off()
#
# png(file_plot[5], width = 1500, height = 1500, res = 300)
# DESeq2::plotDispEsts(dds)
# dev.off()
#
# # Sample distance
# png(file_plot[6], width = 1000, height = 1000, res = 300)
# sampleDists <- dist(t(SummarizedExperiment::assay(rld)))
# sampleDistMatrix <- as.matrix(sampleDists)
# rownames(sampleDistMatrix) <- rld$condition
# colnames(sampleDistMatrix) <- NULL
# colors <- colorRampPalette( rev(RColorBrewer::brewer.pal(9, "Blues")) )(255)
# pheatmap::pheatmap(sampleDistMatrix,
# clustering_distance_rows = sampleDists,
# clustering_distance_cols = sampleDists,
# col = colors)
# dev.off()
#
# # Count matrix of top genes
# png(file_plot[7], width = 1200, height = 1200, res = 300)
# select <- order(rowMeans(DESeq2::counts(dds, normalized = TRUE)),
# decreasing = TRUE)[1:30]
# ma <- SummarizedExperiment::assay(ntd)[select, ]
# df <- as.data.frame(SummarizedExperiment::colData(dds)[, c("condition")])
# colnames(df) <- "condition"
# rownames(df) <- colnames(ma)
# pheatmap::pheatmap(SummarizedExperiment::assay(ntd)[select,],
# cluster_rows = FALSE,
# cluster_cols = FALSE,
# show_rownames = FALSE,
# annotation_col = df)
# dev.off()
# }
|
/R/rnaseq_deseq2_functions.R
|
no_license
|
bakerwm/goldclipReport
|
R
| false
| false
| 26,242
|
r
|
# R code for DESeq2 analysis
#
# Author: Ming Wang
# Date: 2018-09-30
#
# DESeq2: v1.18.1
#' run DESeq2 for matrix data
#'
#' @param file Path to the matrix file, featureCount output
#'
#' @param species Name of the species, dm3, dm6, mm9, mm10, hh19, default: dm3
#'
#' @param outdir Path to directory to save results, default: cwd
#'
#' @export
#'
#' @describeIn Suppose multiple BAM files in featureCounts output
#' require at least four samples, name of file endswith "_rep[1-3]"
#'
DESeq2_for_featureCounts <- function(x, organism = "dm3", outdir = "./",
pvalue_cutoff = 0.05) {
# read counts
if(inherits(x, "data.frame")){
# sample name in colname
# gene name in rowname
df_count <- x
} else {
df_count <- featureCountsReader(x, organism)
}
# fetch replicate samples
col_reps <- grep("_rep[1-9]", names(df_count), ignore.case = TRUE)
df_reps <- df_count[, col_reps]
if (length(df_reps) < 4) {
stop("less than 4 BAM files of replicate samples")
}
# make pairs: Control vs experiment
tags <- gsub("\\.\\d$|\\_\\d$", "", names(df_reps))
g <- unique(gsub("_rep[1-9]$", "", tags, ignore.case = TRUE))
g_pairs <- combn(g, 2)
# DE analysis
for (i in seq_len(ncol(g_pairs))) {
smp <- g_pairs[, i]
rep_names <- names(df_reps)
g1 <- rep_names[startsWith(rep_names, smp[1])]
g2 <- rep_names[startsWith(rep_names, smp[2])]
smp_line <- paste(smp, collapse = "_vs_")
smp_dir <- file.path(outdir, smp_line)
if (! dir.exists(smp_dir)) {
dir.create(smp_dir, recursive = TRUE, mode = "0750")
}
# make countdata
ma <- as.matrix(df_reps[, c(g1, g2)])
# make coldata
coldata <- design_example(colnames(ma), smp)
# run DESeq2
deseq2_run(ma, coldata, smp_dir, pvalue_cutoff)
##------------------------------------------------------------------------##
# rename gene id
fs <- file.path(smp_dir, "transcripts_deseq2.csv")
fsFix <- file.path(smp_dir, "transcripts_deseq2.fix.xls")
## save table
fName = paste("genelist", organism, "rda", sep = ".")
f = system.file("extdata", fName, package = "goldclipReport")
load(f) # genelist
## read data
df <- DESeq2_csv2df(fs)
df2 <- dplyr::mutate(df, id = as.character(id)) %>%
dplyr::mutate(id = plyr::mapvalues(id, genelist$gene_id, genelist$gene_name, FALSE)) %>%
dplyr::filter(! is.na(padj)) %>%
dplyr::arrange(padj)
readr::write_delim(df2, fsFix, delim = "\t", col_names = TRUE)
##----------------------------------------------------------------------------##
}
}
#' read featureCount matrix file
#'
#' @param x Path the matrix file
#'
#' @param normalizeTo1M Logical value, whether or not normalize the counts of
#' each BAM file to 1 million reads. default: FALSE
#'
#' @import dplyr
#' @import readr
#' @import tidyr
#' @import tibble
#'
#'
#' @export
#'
featureCountsReader <- function(x, organism = "dm3", normalizeTo1M = FALSE,
fixZero = 0) {
# parse file
df_exp <- readr::read_delim(x, "\t",
col_types = readr::cols(),
comment = "#") %>%
dplyr::rename(id = Geneid) %>% # "id" column
dplyr::select(-(2:6)) %>% # remove extra columns
as.data.frame()
# convert wide to long table
df_exp2 <- tidyr::gather(df_exp, key = sample, value = count, -id)
# fix Zero values
if (fixZero > 0) {
df_exp2$count[df_exp2$count == 0] <- fixZero
}
# normalize to 1 Million reads (RPM)
if (isTRUE(normalizeTo1M)) {
dfNorm <- dplyr::group_by(df_exp2, sample) %>%
mutate(norm = count / sum(count) * 1e6) %>%
dplyr::rename(value = norm) %>%
dplyr::select(-count)
} else {
dfNorm <- dplyr::rename(df_exp2, value = count)
}
# convert count to int (--fraction option in featureCounts)
dfNorm$value <- round(dfNorm$value, 0)
# output
dfOut <- dplyr::ungroup(dfNorm) %>%
dplyr::select(id, sample, value) %>%
tidyr::spread(key = sample, value = value) %>%
as.data.frame() %>%
tibble::column_to_rownames("id")
# reorder columns
origin_header <- names(df_exp)[-1]
dfOut <- dfOut[, origin_header]
# rename colnames
smps <- basename(colnames(dfOut))
ext <- str_common(smps, suffix = TRUE)
colnames(dfOut) <- gsub(ext, "", smps)
# rename rownames
genelist <- get_genelist(organism)
stopifnot(all(c("gene_id", "gene_name") %in% names(genelist)))
# rownames(dfOut) <- plyr::mapvalues(rownames(dfOut),
# from = genelist$gene_id,
# to = genelist$gene_name,
# warn_missing = FALSE)
# # remove temp objects
rm(df_exp)
rm(df_exp2)
rm(dfNorm)
# report
return(dfOut)
}
#' run DESeq2 analysis
#'
#' @param ma count data in matrix
#'
#' @param coldata experiment design, in data.frame format
#'
#' @param outdir Directory to save the results
#'
#' @param pvalue_cutoff Cutoff to filt the records, padj for DESeq2 output,
#' default: 0.05
#'
#'
#' @import DESeq2
#' @import ggplot2
#' @import pheatmap
#' @import RColorBrewer
#' @import SummarizedExperiment
#'
#' @export
#'
deseq2_run <- function(ma, coldata, outdir, pvalue_cutoff = 0.05) {
stopifnot(is.matrix(ma))
# prepare files
de_count <- file.path(outdir, "transcripts_deseq2.csv")
de_plots <- file.path(outdir, c("figure1_MA_plot.png",
"figure2_MA_plot_LFC.png",
"figure3_sample_counts.png",
"figure4_PCA_plot.png",
"figure5_dispersion.png",
"figure6_sample_distance.png",
"figure7_top_genes.png",
"figure8_volcano.png"))
# prepare experiment design
countdata <- ma
tags <- gsub("\\.\\d$|\\_\\d$", "", colnames(countdata))
smp <- unique(gsub("_rep[1-9]$", "", tags, ignore.case = TRUE))
coldata <- design_example(ids = colnames(countdata), conditions = smp)
# load matrix
dds <- DESeq2::DESeqDataSetFromMatrix(countData = countdata, colData = coldata,
design = ~condition)
# Run the DESeq pipeline
dds <- DESeq2::DESeq(dds)
# Get differential expression results
res <- DESeq2::results(dds)
resLFC <- DESeq2::lfcShrink(dds, coef = 2, res = res)
rld <- DESeq2::rlogTransformation(dds, blind = FALSE)
ntd <- DESeq2::normTransform(dds)
# Order by adjusted p-value
res <- res[order(res$padj), ]
resSig <- subset(as.data.frame(res), padj < pvalue_cutoff)
# Normalied counts
ncount <- DESeq2::counts(dds, normalized = TRUE)
## Merge with normalized count data
resdata <- merge(as.data.frame(ncount), as.data.frame(res), by = "row.names",
sort = FALSE)
resdata <- resdata[order(resdata$padj), ]
names(resdata)[1] <- "Gene"
# save data to file
write.csv(resdata, de_count, quote = FALSE, row.names = TRUE)
# MA
png(de_plots[1], width = 1200, height = 1200, res = 300)
DESeq2::plotMA(res, ylim = c(-2, 2))
dev.off()
# MA for LFC
png(de_plots[2], width = 1200, height = 1200, res = 300)
DESeq2::plotMA(resLFC, ylim = c(-2, 2))
dev.off()
# Sample counts
png(de_plots[3], width = 1200, height = 1200, res = 300)
DESeq2::plotCounts(dds, gene = which.min(res$padj), intgroup = "condition")
dev.off()
# PCA
png(de_plots[4], width = 2000, height = 2000, res = 300)
print(DESeq2::plotPCA(rld, intgroup = c("condition")))
dev.off()
# Dispersion
png(de_plots[5], width = 1500, height = 1500, res = 300)
DESeq2::plotDispEsts(dds)
dev.off()
# Sample distance
png(de_plots[6], width = 1000, height = 1000, res = 300)
sampleDists <- dist(t(SummarizedExperiment::assay(rld)))
sampleDistMatrix <- as.matrix(sampleDists)
rownames(sampleDistMatrix) <- rld$condition
colnames(sampleDistMatrix) <- NULL
colors <- colorRampPalette( rev(RColorBrewer::brewer.pal(9, "Blues")) )(255)
pheatmap::pheatmap(sampleDistMatrix,
clustering_distance_rows = sampleDists,
clustering_distance_cols = sampleDists,
col = colors)
dev.off()
# Top genes
png(de_plots[7], width = 1200, height = 1200, res = 300)
select <- order(rowMeans(DESeq2::counts(dds, normalized = TRUE)),
decreasing = TRUE)[1:30]
ma <- SummarizedExperiment::assay(ntd)[select, ]
df <- as.data.frame(SummarizedExperiment::colData(dds)[, c("condition")])
colnames(df) <- "condition"
rownames(df) <- colnames(ma)
pheatmap::pheatmap(SummarizedExperiment::assay(ntd)[select,],
cluster_rows = FALSE,
cluster_cols = FALSE,
show_rownames = FALSE,
annotation_col = df)
dev.off()
}
#' return the longest common string
#'
#' @param list A list of strings
#'
#' @param suffix A logical value, if TRUE, return the longest suffix of strings
#' default: FALSE
#'
#' @export
#'
str_common <- function(list, suffix = FALSE) {
# return the longest common string between list of strings
# default: prefix
if (isTRUE(suffix)) {
list <- stringi::stri_reverse(list)
}
# split string
dl <- lapply(list, function(s) {
unlist(strsplit(s, ''))
})
# min len
len_min <- min(unlist(lapply(dl, length)))
# check common
cs = c()
for (i in seq_len(len_min)) {
s <- unique(unlist(lapply(dl, function(ds){ds[i]})))
if (length(s) > 1){
break
}
cs <- c(cs, s)
}
if (isTRUE(suffix)) {
cs <- rev(cs)
}
# output
ss <- paste0(cs, collapse = "")
return(ss)
}
#' create experiment design for DESeq2 analysis
#'
#' @param ids A set of variables, the name of samples, default: letters[1:4]
#'
#' @param conditions A set of variables, the levels for design, default:
#' letters[1:2]
#'
#' @export
design_example <- function(ids = letters[1:4], conditions = letters[1:2]) {
rep = length(ids) / length(conditions)
if ( floor(rep) < rep) {
warning("Number of ids not divided by conditinos")
}
df <- data.frame(condition = factor(rep(conditions, each = rep),
levels = conditions))
rownames(df) <- ids
return(df)
}
#' import matrix to DESeq2
#'
#' @param df A data.frame of count matrix
#'
#' @param coldata A data.frame of design
#'
#' @export
#'
DESeq2_data_from_matrix <- function(df, coldata) {
stopifnot(is.data.frame(df))
if(! "condition" %in% colnames(coldata)) {
print(colnames(coldata))
stop("condition - not found in coldata")
}
# subset of dataframe
dfx <- dplyr::select(df, rownames(coldata))
if (length(dfx) != nrow(coldata)) {
stop("data.frame and coldata not match")
}
# load DESeq2
dds <- DESeq2::DESeqDataSetFromMatrix(df, coldata, ~condition)
dds <- DESeq2::DESeq(dds)
return(dds)
}
#' fetch gene id and name from Ensembl
#' using biomaRt package
#'
#' @param organism A character of organism, default: dm3
#'
#' @import biomaRt
#'
#' @export
#'
#' @description to-do, support all avilable organism
#' gene_id, gene_name/external_gene_id
#'
#' load from package data,
#' optional: download from ensembl, update
get_genelist <- function(organism = "dm3") {
# library(biomaRt)
print(organism)
f_name = paste("genelist", organism, "rda", sep = ".")
f = system.file("extdata", f_name, package = "goldclipReport")
if (file.exists(f)) {
print(f)
load(f) # genelist
return(genelist)
} else {
stop('genelist file not exists')
}
}
#' cal mean of replicates
#' default, 2 replicates for each condition
#'
#' @param data A data fraem of DESeq2 output, first 5 columns:
#' <id> <a-rep1> <a-rep2> <b-rep1> <b-rep2> ...
#'
#' @export
DESeq2_add_mean <- function(data){
# only for DESeq2 output csv
# insert to: col-2, col-3
stopifnot(is.data.frame(data))
ctl_cols <- colnames(data)[2:3]
tre_cols <- colnames(data)[4:5]
ctl_name <- str_common(ctl_cols)
tre_name <- str_common(tre_cols)
ctl_name <- gsub("_R|_rep", "", ctl_name, ignore.case = TRUE)
tre_name <- gsub("_R|_rep", "", tre_name, ignore.case = TRUE)
# calculate mean
df_mean <- data.frame(id = data$id,
ctl = rowMeans(data[, 2:3]),
tre = rowMeans(data[, 4:5]))
colnames(df_mean) <- c("id", ctl_name, tre_name)
# merge
df <- merge(df_mean, data, by = "id")
return(df)
}
#' read output of DESeq2_run function
#' csv format
#'
#' @param file Path to csv file
#'
#' @import readr
#' @import dplyr
#'
#' @export
DESeq2_csv2df <- function(x) {
# read DESeq2 csv file
df <- readr::read_csv(x, col_types = readr::cols())
# colnames(df)[1] <- 'id'
df[, 1] <- NULL # remove the first column
colnames(df)[1] <- "id"
df$id <- as.character(df$id)
# add mean columns
df2 <- DESeq2_add_mean(df)
return(df2)
}
#' read output of DESeq2_run function
#' xls format
#'
#' @param x Path to xls file
#'
#' @import readr
#' @import dplyr
#'
#' @export
DESeq2_xls2df <- function(x) {
# read DESeq2 csv file
#df <- readr::read_csv(x, col_types = readr::cols())
df <- readr::read_delim(x, "\t", col_types = readr::cols())
return(df)
}
# de_deseq2_plot <- function(df, withTE = FALSE, add_sigNames = TRUE,
# extra_ids = c("CG9754", "nxf2", "piwi")) {
# s1 <- colnames(df)[2:3]
# df[, s1[1]] <- log10(df[, s1[1]])
# df[, s1[2]] <- log10(df[, s1[2]])
# # gene list
# dfGene <- filtGeneId(df, gene = TRUE, te = FALSE)
# # filter > 0
# # dfGene <- dfGene[is.finite(dfGene$shattp2) & is.finite(dfGene$shNxf2), ]
# dfGene <- dplyr::filter_(dfGene, s1[1] > 0 & s1[2] > 0)
# p1 <- de_scatter(dfGene, s1[1], s1[2],
# add_sigNames = add_sigNames,
# extra_ids = extra_ids)
# # p2 <- de_ma(dfGene, add_sigNames = add_sigNames, extra_ids = extra_ids)
# # print(p1)
# # print(p2)
# # pout <- list(p1, p2)
# pout <- list(p1)
#
# # te list
# if (isTRUE(withTE)) {
# dfTE <- filtGeneId(df, gene = FALSE, te = TRUE)
# p3 <- de_scatter(dfTE, s1[1], s1[2], extra_ids = extra_ids)
# pout <- c(pout, p3)
# # print(p3)
# }
# return(pout)
# }
#
#
#
# de_scatter <- function(df, xName, yName, add_sigNames = TRUE,
# extra_ids = c("piwi", "CG9754"),
# tt = "Differential expression", cutoff = 0.05) {
# library(ggplot2)
# library(ggrepel)
# # at most, 10 labels
# dfSig <- dplyr::filter(df, padj <= cutoff) %>%
# dplyr::arrange(padj)
# dfEx <- dplyr::filter(df, id %in% extra_ids)
# if (isTRUE(add_sigNames)) {
# dfLabel <- rbind(dfEx, dfSig)
# } else {
# dfLabel <- dfEx
# }
# if (nrow(dfLabel) > 3) dfLabel <- dfLabel[1:3, ]
# # tt = paste(xName, yName, sep = "_")
# tt = glue::glue("DE-analysis: {xName} vs {yName}")
#
# p <- ggplot(df, aes_string(xName, yName)) +
# geom_abline(slope = 1, intercept = 0, color = "grey10") +
# geom_abline(slope = 1, intercept = log10(2),
# color = "grey30", linetype = 2) +
# geom_abline(slope = 1, intercept = -log10(2),
# color = "grey30", linetype = 2) +
# geom_point(size = 1.3,
# color = ifelse(is.na(df$padj), "grey20",
# ifelse(df$padj <= cutoff, "red", "grey20"))) +
# # ggtitle(tt) +
# xlab(xName) + ylab(yName) +
# scale_x_continuous(limits = c(0, 6),
# breaks = seq(0, 6, 1),
# labels = seq(0, 6, 1),
# expand = c(0, 0, 0, 0)) +
# scale_y_continuous(limits = c(0, 6),
# breaks = seq(0, 6, 1),
# labels = seq(0, 6, 1),
# expand = c(0, 0, 0, 0)) +
# theme_classic() +
# theme(panel.border = element_rect(color = "black", fill = NA, size = .7),
# plot.title = element_text(color = "black", hjust = .5, size = 14),
# axis.title = element_text(color = "black", size = 12),
# axis.text = element_text(color = "black", size = 10))
# if (nrow(dfLabel) > 0) {
# p <- p + geom_text_repel(
# data = dfLabel,
# label = dfLabel$id,
# box.padding = 1,
# segment.size = 0.4,
# segment.color = "grey50",
# direction = "both"
# )
# }
# return(p)
# }
#
# matrix_DESeq2_batch <- function(fn, genelist, outDir) {
# # only for featureCount output
# stopifnot(file.exists(fn))
# stopifnot(is.data.frame(genelist))
# stopifnot(all(c("gene_id", "gene_name") %in% names(genelist)))
# df <- fc2df(fn)
# df <- dplyr::mutate(df,
# id = plyr::mapvalues(id,
# genelist$gene_id,
# genelist$gene_name,
# warn_missing = FALSE))
# # recursive pair
# smp_ids <- names(df)[-1]
# smp_ids <- gsub("_rep\\d+", "", smp_ids)
# smp_ids <- unique(smp_ids)
# # combinations
# cc <- combn(smp_ids, 2)
# for (i in seq(ncol(cc))) {
# smp <- cc[, i]
# smp_line <- paste(smp, collapse = "_vs_")
# g1 <- stringr::str_which(names(df), smp[1])
# g2 <- stringr::str_which(names(df), smp[2])
# ## subset of data.frame
# dfx <- dplyr::select_(df, .dots = c(g1, g2)) %>% as.data.frame()
# rownames(dfx) <- df$id
# ## make coldata
# coldata <- data.frame(row.names = colnames(dfx),
# condition = rep(c("wt", "mut"), each = 2))
# dds <- load_matrix_to_DESeq2(dfx, coldata)
# ## out_dir
# dfDir <- file.path(outDir, smp_line)
# if (! dir.exists(dfDir)) {
# dir.create(dfDir, recursive = TRUE)
# }
# oldDir <- getwd()
# setwd(dfDir)
# run_DESeq2(dds)
# setwd(oldDir)
# print(smp_line)
# }
# }
#
#
# kallistoToDESeq2Batch <- function(fn, outDir, tx2gene = NULL,
# gene_level = FALSE) {
# # only for kallisto output
# # directory structure
# # suppose 2 replicates for each sample
# # path-to-kallisto/prjname/abundance.h5
# fn <- sort(fn)
# # combination
# smp_ids <- gsub("RNAseq_|_rep\\d+", "", basename(dirname(fn)))
# smp_ids <- unique(smp_ids)
# # combinations
# cc <- combn(smp_ids, 2)
# for (i in seq(ncol(cc))) {
# smp <- cc[, i]
# smp_line <- paste(smp, collapse = "_vs_")
# g1 <- stringr::str_which(fn, smp[1])
# g2 <- stringr::str_which(fn, smp[2])
# # sub files
# fx <- fn[c(g1, g2)]
# dds <- kallistoToDESeq2(fx, tx2gene, gene_level = TRUE)
# ## out_dir
# dfDir <- file.path(outDir, smp_line)
# if (! dir.exists(dfDir)) {
# dir.create(dfDir, recursive = TRUE)
# }
# oldDir <- getwd()
# setwd(dfDir)
# run_DESeq2(dds)
# setwd(oldDir)
# print(smp_line)
# }
# }
#
# tetoolkit2DESeq2 <- function(fn) {
# # f <- "../tetranscript/dsCG9754_vs_dsPiwi/dsCG9754_vs_dsPiwi.cntTable"
# df <- readr::read_delim(fn, "\t", quote = "\"", col_types = cols())
# colnames(df)[1] <- 'id'
# dfx <- df[, -1] %>% as.data.frame()
# n <- gsub(".Aligned.sortedByCoord.out.bam|.C|.T|RNAseq_", "", basename(names(dfx)))
# rownames(dfx) <- df$id
# colnames(dfx) <- n
# coldata <- data.frame(row.names = colnames(dfx),
# condition = rep(c("mut", "wt"), each = 2))
# dds <- load_matrix_to_DESeq2(dfx, coldata)
# ## out_dir
# dfDir <- dirname(fn)
# oldDir <- getwd()
# setwd(dfDir)
# run_DESeq2(dds)
# setwd(oldDir)
# print(basename(fn))
# }
#
# fc2df <- function(fn, normalize = FALSE,
# convertLog10 = FALSE,
# convertInt = TRUE) {
# # parse featureCounts
# # convert count to INT
# # convert count to log10
# # normalize, RPM
# library(readr)
# library(dplyr)
# library(tidyr)
# df <- read_delim(fn, "\t", col_types = cols(), comment = "#") %>%
# dplyr::rename(id = Geneid)
# df1 <- df[, -c(2:6)]
#
# # normalization
# # RPM: reads per million
# if (isTRUE(normalize)) {
# df2 <- gather(df1, sample, count, -1) %>%
# dplyr::group_by(sample) %>%
# mutate(norm = count / sum(count) * 1e6) %>%
# dplyr::rename(value = norm)
# } else {
# df2 <- gather(df1, sample, count, -1) %>%
# dplyr::rename(value = count)
# }
# #
# if (isTRUE(convertLog10)) {
# df2 <- dplyr::mutate(df2, value = log10(value))
# }
#
# if(isTRUE(convertInt)) {
# df2 <- dplyr::mutate(df2, value = round(value, 0))
# }
#
# df3 <- ungroup(df2) %>%
# dplyr::select(id, sample, value) %>%
# mutate(sample = gsub("RNAseq_|.piRNA_clusters|.unique|.merged|.Aligned|.sortedByCoord|.out|.bam", "", basename(sample))) %>%
# spread(sample, value)
# return(df3)
# }
#
#
# #' import kallisto to DESeq2
# #'
# #' @param kal_files A set of files
# #'
# #' @param coldata A data.frame of experiment design
# #'
# #'
# #' @import tximport
# #'
# #' @export
# #'
# DESeq2_data_from_kallisto <- function(kal_files, coldata, t2g) {
#
# library(tximport)
# kal_files <- kal_files[file.exists(kal_files)]
# # transcript-level
# txi <- tximport(files = kal_files, type = "kallisto", txOut = TRUE,
# dropInfReps = TRUE)
# # # gene-level
# # txi <- tximport(files = kal_files, type = "kallisto", txOut = FALSE,
# # tx2gene = t2g, dropInfReps = TRUE)
# rownames(txi$counts) <- as.character(rownames(txi$counts))
# # checkout dataset
# if(length(kal_files) != nrow(coldata)) {
# stop('kallisto file not match coldata')
# }
# if(! "condition" %in% colnames(coldata)) {
# print(colnames(coldata))
# stop("condition - not found in coldata")
# }
# dds <- DESeqDataSetFromTximport(txi = txi, colData = coldata,
# design = ~condition)
# dds <- DESeq(dds)
# return(dds)
# }
#
# filtGeneId <- function(df, gene = TRUE, te = TRUE) {
# # filt output of TEtranscripts
# if (isTRUE(gene) & isTRUE(te)) {
# return(df) # do not filt
# } else if (isTRUE(gene)) {
# # id, do not contain ":"
# gids <- grepl(":", df$id)
# return(df[! gids, ])
# } else if(isTRUE(te)) {
# # id, only contain ":"
# teids <- grep(":", df$id)
# return(df[teids, ])
# } else {
# warning("do not return records")
# }
# }
#' run regular DESeq2 analysis
#'
#' @param dds A variable of dds
#'
#'
#' @import ggplot2
#' @import DESeq2
#' @import pheatmap
#' @import RColorBrewer
#' @import SummarizedExperiment
#'
#' @export
#'
# DESeq2_run <- function(dds, path_out, pval_cutoff = 0.05) {
#
# # prepare files
# file_count <- file.path(path_out, "transcripts_deseq2.csv")
# file_plot <- file.path(path_out, c("figure1_MA_plot.png",
# "figure2_MA_plot_LFC.png",
# "figure3_sample_counts.png",
# "figure4_PCA_plot.png",
# "figure5_dispersion.png",
# "figure6_sample_distance.png",
# "figure7_top_genes.png",
# "figure8_volcano.png"))
# #
# # # prepare variables
# # library(DESeq2, quietly = TRUE)
# # library("ggplot2")
# # library(pheatmap)
# # library("RColorBrewer")
#
# # start
# res <- DESeq2::results(dds)
# resOrdered <- res[order(res$padj), ]
# # print(head(resOrdered))
# # resSig <- dplyr::filter(resOrdered, padj < pval_cutoff)
# resSig <- subset(as.data.frame(resOrdered), padj < pval_cutoff)
# resLFC <- DESeq2::lfcShrink(dds, coef = 2, res = res)
# rld <- DESeq2::rlog(dds, blind = FALSE)
# #vsd <- DESeq2::varianceStabilizingTransformation(dds, blind = FALSE)
# #vsd.fast <- DESeq2::vst(dds, blind = FALSE)
# ntd <- DESeq2::normTransform(dds)
# #add normalized counts
# ncount <- DESeq2::counts(dds, normalized = TRUE)
# exp <- cbind(as.data.frame(ncount), as.data.frame(res))
# expOrdered <- exp[order(exp$padj), ]
#
# # save data to file
# write.csv(expOrdered, file_count, quote = FALSE, row.names = TRUE)
#
# # make plots
# # MA plot
# png(file_plot[1], width = 1200, height = 1200, res = 300)
# DESeq2::plotMA(res, ylim = c(-2, 2))
# dev.off()
#
# png(file_plot[2], width = 1200, height = 1200, res = 300)
# DESeq2::plotMA(resLFC, ylim = c(-2, 2))
# dev.off()
#
# png(file_plot[3], width = 1200, height = 1200, res = 300)
# DESeq2::plotCounts(dds, gene = which.min(res$padj), intgroup = "condition")
# dev.off()
#
# # PCA
# png(file_plot[4], width = 2000, height = 2000, res = 300)
# print(DESeq2::plotPCA(rld, intgroup = c("condition")))
# dev.off()
#
# png(file_plot[5], width = 1500, height = 1500, res = 300)
# DESeq2::plotDispEsts(dds)
# dev.off()
#
# # Sample distance
# png(file_plot[6], width = 1000, height = 1000, res = 300)
# sampleDists <- dist(t(SummarizedExperiment::assay(rld)))
# sampleDistMatrix <- as.matrix(sampleDists)
# rownames(sampleDistMatrix) <- rld$condition
# colnames(sampleDistMatrix) <- NULL
# colors <- colorRampPalette( rev(RColorBrewer::brewer.pal(9, "Blues")) )(255)
# pheatmap::pheatmap(sampleDistMatrix,
# clustering_distance_rows = sampleDists,
# clustering_distance_cols = sampleDists,
# col = colors)
# dev.off()
#
# # Count matrix of top genes
# png(file_plot[7], width = 1200, height = 1200, res = 300)
# select <- order(rowMeans(DESeq2::counts(dds, normalized = TRUE)),
# decreasing = TRUE)[1:30]
# ma <- SummarizedExperiment::assay(ntd)[select, ]
# df <- as.data.frame(SummarizedExperiment::colData(dds)[, c("condition")])
# colnames(df) <- "condition"
# rownames(df) <- colnames(ma)
# pheatmap::pheatmap(SummarizedExperiment::assay(ntd)[select,],
# cluster_rows = FALSE,
# cluster_cols = FALSE,
# show_rownames = FALSE,
# annotation_col = df)
# dev.off()
# }
|
#!/bin/Rscript
# account.snp.sample.fate.R
#
# This script accounts for the fate of each SNP and sample through the QC
# process.
library(dplyr)
library(readr)
library(purrr)
library(tidyr)
################################################################################
################ Section 1: Manifest Resolution and Liftover ###############
################################################################################
# The original ImmunoChip manifests that we received contain multiple types of
# conflicts--SNPs mapped to multiple positions across manifests, positions
# mapped to multiple SNPs, and names varied for the same SNP.
#
# Note that we did not have RA data when we performed the initial manifest
# resolution. RA is subsequently coerced to be consistent with the consensus
# derived from the original 7 datasets.
#
# The datasets that we received were mapped to hg18. In this section, we also
# account for liftover failures.
# Read manifests for original consortia:
original.consortia <- tibble(CONS = c("ced", "ibd", "ms", "sle_g", "sle_o", "t1d", "t1d_asp"),
dataset_stem = c("CeD_phen", "ibdrelease5_QCI", "MS",
"Genentech_phenos", "OMRF_all_chr_phenos", "UK",
"ASP"))
snp.table <- original.consortia %>%
mutate(manifest_file = paste0("data/immchip/", dataset_stem, ".bim")) %>%
mutate(manifest_data = map(manifest_file, ~ read_tsv(.,
col_names = c("MANIFEST_CHR", "MANIFEST_SNP",
"CM", "MANIFEST_POS",
"MANIFEST_A1", "MANIFEST_A2"),
col_types = "icdicc"))) %>%
unnest() %>%
select(-dataset_stem, -manifest_file, -CM)
# Read SNPs to remap, rename and remove:
snps.to.remap <- original.consortia %>%
mutate(remap_file = paste0("logs/manifest/", CONS, ".snp.newpos.txt")) %>%
mutate(remap_data = map(remap_file, ~ read_tsv(.,
col_names = c("SNP", "CHR_new", "POS_new"),
col_types = "cii"))) %>%
unnest() %>%
select(-dataset_stem, -remap_file)
snps.to.rename <- original.consortia %>%
mutate(rename_file = paste0("logs/manifest/", CONS, ".snp.rename.txt")) %>%
mutate(rename_data = map(rename_file, ~ read_tsv(.,
col_names = c("SNP_old", "SNP_new"),
col_types = "cc"))) %>%
unnest() %>%
select(-dataset_stem, -rename_file)
snps.to.remove <- original.consortia %>%
mutate(remove_file = paste0("logs/manifest/", CONS, ".snp.remove.txt")) %>%
mutate(remove_data = map(remove_file, ~ read_tsv(.,
col_names = c("SNP"),
col_types = "c"))) %>%
unnest() %>%
select(-dataset_stem, -remove_file) %>%
mutate(STATUS = "REMOVE_MANIFEST_INCONSISTENT")
# Update original consortia to reflect manifest resolution:
snp.table <- snp.table %>%
select(CONS, MANIFEST_SNP, MANIFEST_CHR, MANIFEST_POS, MANIFEST_A1, MANIFEST_A2) %>%
# Resolve multimapping SNPs:
left_join(snps.to.remap, by = c("CONS", "MANIFEST_SNP" = "SNP")) %>%
mutate(CHR = ifelse(is.na(CHR_new), MANIFEST_CHR, CHR_new),
POS = ifelse(is.na(POS_new), MANIFEST_POS, POS_new)) %>%
# Remove redundant SNPs:
left_join(snps.to.remove, by = c("CONS", "MANIFEST_SNP" = "SNP")) %>%
# Rename remaining SNPs:
left_join(snps.to.rename, by = c("CONS", "MANIFEST_SNP" = "SNP_old")) %>%
# Do not assign new names to SNPs that have been removed:
mutate(SNP = ifelse(!is.na(STATUS), NA,
ifelse(!is.na(SNP_new), SNP_new, MANIFEST_SNP))) %>%
# group_by(STATUS, is.na(SNP)) %>% summarize(n = n()) %>% ungroup()
# filter(CONS == "ced" & MANIFEST_CHR == 1 & MANIFEST_POS == 159785154)
# mutate(SNP = ifelse(!is.na(SNP_new), SNP_new, MANIFEST_SNP)) %>%
select(CONS, MANIFEST_SNP, MANIFEST_CHR, MANIFEST_POS, MANIFEST_A1, MANIFEST_A2, SNP, CHR, POS,
STATUS)
rm(snps.to.remap, snps.to.remove, snps.to.rename)
# Expand SNP table to include all strata for original consortia:
cons.strata <- tibble(
CONS = c("ced", "ced", "ced", "ced", "ced", "ced", "ced", "ced", "ced",
"ibd", "ibd", "ibd", "ibd", "ibd", "ibd", "ibd", "ibd", "ibd", "ibd", "ibd", "ibd",
"ibd", "ibd", "ibd", "ibd", "ibd", "ibd",
"ms", "ms", "ms", "ms", "ms", "ms", "ms", "ms", "ms", "ms", "ms", "ms",
"ra", "ra", "ra", "ra", "ra", "ra",
"sle_g", "sle_g", "sle_g",
"sle_o",
"t1d",
"t1d_asp"),
STRATUM = c("British", "Dutch", "Gosias_mystery", "Indian", "Italian", "Polish", "Romanian",
"Spanish", "Unknown",
"Australia", "Belgium", "China", "Denmark", "Germany", "IMSGC", "Iran", "Italy",
"Lithuania-Baltic", "Netherlands", "New_Zealand", "Norway", "Slovenia", "Spain",
"Sweden", "UK", "Unknown", "USA-Canada",
"AUSNZ", "Belgium", "Denmark", "Finland", "France", "Germany", "Italy", "Norway",
"Sweden", "UK", "Unknown", "US",
"ES", "NL", "SE-E", "SE-U", "UK", "US",
"AA", "EA", "Others",
NA,
NA,
NA))
snp.table <- cons.strata %>%
# Exclude RA (QC'd separately) and ms.Unknown (was not lifted over):
filter(CONS != "ra" & !(CONS == "ms" & STRATUM == "Unknown")) %>%
left_join(snp.table, by = "CONS")
# The ms.Unknown stratum was not subjected to manifest harmonization or liftover:
ms.Unknown <- read_tsv("data/immchip/MS.bim",
col_names = c("MANIFEST_CHR", "MANIFEST_SNP", "CM", "MANIFEST_POS",
"MANIFEST_A1", "MANIFEST_A2"),
col_types = "icdicc") %>%
mutate(CONS = "ms",
STRATUM = "Unknown") %>%
select(CONS, STRATUM, MANIFEST_SNP, MANIFEST_CHR, MANIFEST_POS, MANIFEST_A1, MANIFEST_A2) %>%
mutate(SNP = MANIFEST_SNP, CHR = MANIFEST_CHR, POS = MANIFEST_POS)
snp.table <- bind_rows(snp.table,
ms.Unknown) %>%
arrange(CONS, STRATUM, CHR, POS)
rm(ms.Unknown)
# RA was added later; it arrived with separate files for each stratum:
ra.snp.table <- tibble(CONS = c("ra", "ra", "ra", "ra", "ra", "ra"),
STRATUM = c("ES", "NL", "SE-E", "SE-U", "UK", "US")) %>%
mutate(manifest_file = paste0("data/immchip/iChip_RACI_PhaseII_", STRATUM, ".bim")) %>%
mutate(manifest_data = map(manifest_file, ~ read_tsv(.,
col_names = c("MANIFEST_CHR", "MANIFEST_SNP",
"CM", "MANIFEST_POS",
"MANIFEST_A1", "MANIFEST_A2"),
col_types = "icdicc"))) %>%
unnest() %>%
select(-manifest_file, -CM)
# RA manifest resolution was done later, with outputs stored in QC directory:
ra.snps.to.remap <- read_tsv(paste0("logs/qc/ra/ra.snp.newpos.txt"),
col_names = c("SNP", "CHR_new", "POS_new"),
col_types = "cii")
ra.snps.to.rename <- read_tsv(paste0("logs/qc/ra/ra.snp.rename.txt"),
col_names = c("SNP_old", "SNP_new"),
col_types = "cc")
### Note that there are no RA SNPs to remove
# Update RA dataset to reflect manifest resolution:
ra.snp.table <- ra.snp.table %>%
select(CONS, STRATUM, MANIFEST_SNP, MANIFEST_CHR, MANIFEST_POS, MANIFEST_A1, MANIFEST_A2) %>%
# Resolve multimapping SNPs:
left_join(ra.snps.to.remap, by = c("MANIFEST_SNP" = "SNP")) %>%
mutate(CHR = ifelse(is.na(CHR_new), MANIFEST_CHR, CHR_new),
POS = ifelse(is.na(POS_new), MANIFEST_POS, POS_new)) %>%
# Rename SNPs:
left_join(ra.snps.to.rename, by = c("MANIFEST_SNP" = "SNP_old")) %>%
mutate(SNP = ifelse(!is.na(SNP_new), SNP_new, MANIFEST_SNP)) %>%
select(CONS, STRATUM, MANIFEST_SNP, MANIFEST_CHR, MANIFEST_POS, MANIFEST_A1, MANIFEST_A2, SNP,
CHR, POS)
rm(ra.snps.to.remap, ra.snps.to.rename)
# Combine RA dataset with the other consortia:
snp.table <- bind_rows(snp.table,
ra.snp.table)
rm(ra.snp.table)
### We now deal with liftover failures for each consortium. Note that the
### Unknown MS stratum was not subjected to liftover. So we temporarily set its
### SNP names to NA (so no errors are recorded). We will then set them back to
### the original manifest names (since these were not renamed, either).
# Read SNPs that failed liftover:
liftover.failures <- cons.strata %>%
select(CONS) %>%
unique() %>%
mutate(liftover_file = ifelse(CONS == "ra",
paste0("logs/qc/ra/ra.liftover.hg19.removed.txt"),
paste0("logs/manifest/", CONS, ".liftover.hg19.removed.txt"))) %>%
mutate(liftover_data = map(liftover_file, ~ read_tsv(.,
col_names = c("SNP"),
col_types = "c"))) %>%
unnest() %>%
select(-liftover_file) %>%
mutate(LIFT_STATUS = "REMOVE_LIFTOVER_FAIL")
# Update SNP table to incorporate liftover failures:
snp.table <- snp.table %>%
mutate(SNP = ifelse((CONS == "ms" & STRATUM == "Unknown"), NA, SNP),
CHR = ifelse((CONS == "ms" & STRATUM == "Unknown"), MANIFEST_CHR, CHR),
POS = ifelse((CONS == "ms" & STRATUM == "Unknown"), MANIFEST_POS, POS)) %>%
left_join(liftover.failures, by = c("CONS", "SNP")) %>%
mutate(STATUS = ifelse(is.na(STATUS), LIFT_STATUS, STATUS)) %>%
select(-LIFT_STATUS) %>%
mutate(SNP = ifelse((CONS == "ms" & STRATUM == "Unknown"), MANIFEST_SNP, SNP))
rm(liftover.failures)
# Read liftover results:
liftover.coords <- cons.strata %>%
select(CONS) %>%
unique() %>%
mutate(liftover_file = ifelse(CONS == "ra",
paste0("logs/qc/ra/ra.liftover.out.map"),
paste0("logs/manifest/", CONS, ".liftover.out.map"))) %>%
mutate(liftover_data = map(liftover_file, ~ read_tsv(.,
col_names = c("new_CHR", "SNP", "CM",
"new_POS"),
col_types = "icdi"))) %>%
unnest() %>%
select(-liftover_file)
# Check that the number of SNPs agrees:
# liftover.coords %>%
# group_by(CONS) %>%
# summarize(n = n()) %>%
# ungroup()
# snp.table %>%
# filter(is.na(STATUS)) %>%
# filter(!(CONS == "ms" & STRATUM == "Unknown")) %>%
# select(CONS, MANIFEST_SNP, MANIFEST_CHR, MANIFEST_POS) %>%
# unique() %>%
# group_by(CONS) %>%
# summarize(n = n()) %>%
# ungroup()
# Update coordinates for liftover:
snp.table <- snp.table %>%
left_join(liftover.coords, by = c("CONS", "SNP")) %>%
# Adjust positions for SNPs that were lifted over:
mutate(CHR = ifelse(is.na(STATUS) & !(CONS == "ms" & STRATUM == "Unknown"), new_CHR, CHR),
POS = ifelse(is.na(STATUS) & !(CONS == "ms" & STRATUM == "Unknown"), new_POS, POS)) %>%
select(-new_CHR, -CM, -new_POS)
rm(liftover.coords)
################################################################################
################### Section 2: Consortium-Level QC (SNPs) ##################
################################################################################
# QC was performed on each consortium independently. In this section, we
# identify SNPs that were filtered for missingness or violation of
# Hardy-Weinberg equilibrium.
snp.miss.05 <- cons.strata %>%
# Files differ depending on whether the consortium has strata:
mutate(miss_05_file = ifelse(is.na(STRATUM),
paste0("logs/qc/", CONS, "/", CONS, ".snp.miss.05.removed.txt"),
paste0("logs/qc/", CONS, "/", CONS, ".", STRATUM,
".snp.miss.05.removed.txt"))) %>%
# Some strata did not have SNPs removed:
filter(file.exists(miss_05_file)) %>%
# Read SNP list for each stratum:
mutate(miss_data = map(miss_05_file, ~ read_tsv(., col_names = "SNP", col_types = "c"))) %>%
unnest() %>%
select(-miss_05_file) %>%
mutate(MISS_05_STATUS = "REMOVE_MISS_05")
snp.hwe.initial <- cons.strata %>%
# Files differ depending on whether the consortium has strata:
mutate(hwe_initial_file = ifelse(is.na(STRATUM),
paste0("logs/qc/", CONS, "/", CONS,
".hwe.snps.removed.lenient.txt"),
paste0("logs/qc/", CONS, "/", CONS, ".", STRATUM,
".hwe.snps.removed.lenient.txt"))) %>%
# Some strata did not have SNPs removed:
filter(file.exists(hwe_initial_file)) %>%
# Read SNP list for each stratum:
mutate(hwe_data = map(hwe_initial_file, ~ read_table2(., col_names = "SNP", col_types = "c"))) %>%
unnest() %>%
select(-hwe_initial_file) %>%
mutate(HWE_INITIAL_STATUS = "REMOVE_HWE_INITIAL")
### Note that sle_g.Others has A TONNE of HWE failures. There is initial white
### space on each line, so we use read_table2 to parse correctly.
snp.miss.01 <- cons.strata %>%
# Files differ depending on whether the consortium has strata:
mutate(miss_01_file = ifelse(is.na(STRATUM),
paste0("logs/qc/", CONS, "/", CONS, ".snp.miss.01.removed.txt"),
paste0("logs/qc/", CONS, "/", CONS, ".", STRATUM,
".snp.miss.01.removed.txt"))) %>%
# Some strata did not have SNPs removed:
filter(file.exists(miss_01_file)) %>%
# Read SNP list for each stratum:
mutate(miss_data = map(miss_01_file, ~ read_tsv(., col_names = "SNP", col_types = "c"))) %>%
unnest() %>%
select(-miss_01_file) %>%
mutate(MISS_01_STATUS = "REMOVE_MISS_01")
snp.hwe.strict <- cons.strata %>%
# Files differ depending on whether the consortium has strata:
mutate(hwe_strict_file = ifelse(is.na(STRATUM),
paste0("logs/qc/", CONS, "/", CONS,
".hwe.snps.removed.strict.txt"),
paste0("logs/qc/", CONS, "/", CONS, ".", STRATUM,
".hwe.snps.removed.strict.txt"))) %>%
# Some strata did not have SNPs removed:
filter(file.exists(hwe_strict_file)) %>%
# Read SNP list for each stratum:
mutate(hwe_data = map(hwe_strict_file, ~ read_tsv(., col_names = "SNP", col_types = "c"))) %>%
unnest() %>%
select(-hwe_strict_file) %>%
mutate(HWE_STRICT_STATUS = "REMOVE_HWE_STRICT")
# Update SNP table to reflect QC results:
snp.table <- snp.table %>%
# Initial (5%) SNP missingness:
left_join(snp.miss.05, by = c("CONS", "STRATUM", "SNP")) %>%
# filter(!is.na(STATUS) & !is.na(MISS_05_STATUS)) %>%
mutate(STATUS = ifelse(is.na(STATUS), MISS_05_STATUS, STATUS)) %>%
select(-MISS_05_STATUS) %>%
# Lenient (0.00000001) HWE:
left_join(snp.hwe.initial, by = c("CONS", "STRATUM", "SNP")) %>%
mutate(STATUS = ifelse(is.na(STATUS), HWE_INITIAL_STATUS, STATUS)) %>%
select(-HWE_INITIAL_STATUS) %>%
# Strict (1%) SNP missingness:
left_join(snp.miss.01, by = c("CONS", "STRATUM", "SNP")) %>%
mutate(STATUS = ifelse(is.na(STATUS), MISS_01_STATUS, STATUS)) %>%
select(-MISS_01_STATUS) %>%
# Strict (0.00001) HWE:
left_join(snp.hwe.strict, by = c("CONS", "STRATUM", "SNP")) %>%
mutate(STATUS = ifelse(is.na(STATUS), HWE_STRICT_STATUS, STATUS)) %>%
select(-HWE_STRICT_STATUS)
rm(snp.miss.05, snp.hwe.initial, snp.miss.01, snp.hwe.strict)
# A number of strata were excluded from analysis:
excluded.strata <- tibble(
CONS = c("ced",
# "ced",
"ibd",
"ibd",
# "ms",
"sle_g",
"sle_g"),
STRATUM = c("Indian",
# "Unknown",
"China",
"Iran",
# "Unknown",
"AA",
"Others"),
STRATUM_EXCLUDE_REASON = c("INSUFFICIENT_SUBJECTS_AFTER_OUTLIER_REMOVAL",
# "ALL_PHENOS_UNKNOWN",
"INSUFFICIENT_SUBJECTS",
"INSUFFICIENT_SUBJECTS_AFTER_OUTLIER_REMOVAL",
# "INSUFFICIENT_SUBJECTS",
"INSUFFICIENT_SUBJECTS_AFTER_OUTLIER_REMOVAL",
"INSUFFICIENT_SUBJECTS_AFTER_OUTLIER_REMOVAL"))
snp.table <- snp.table %>%
left_join(excluded.strata, by = c("CONS", "STRATUM"))
# Read final post-QC manifests:
final.snp.manifest <- cons.strata %>%
mutate(final_manifest = ifelse(is.na(STRATUM),
paste0("results/qc/final_counts/", CONS, ".all.qc.bim"),
paste0("results/qc/final_counts/", CONS, ".", STRATUM,
".all.qc.bim"))) %>%
# Not all strata survived QC:
filter(file.exists(final_manifest)) %>%
mutate(manifest_data = map(final_manifest, ~ read_tsv(.,
col_names = c("CHR", "SNP", "CM", "POS",
"A1", "A2"),
col_types = "icdicc"))) %>%
unnest() %>%
select(-final_manifest)
# Do all remaining SNPs agree with the QC'd manifests (where available)?
final.snp.manifest %>%
anti_join(snp.table %>% filter(!grepl("^REMOVE_", STATUS) & is.na(STRATUM_EXCLUDE_REASON)),
by = c("CONS", "STRATUM", "SNP", "CHR", "POS"))
snp.table %>%
filter(!grepl("^REMOVE_", STATUS) & is.na(STRATUM_EXCLUDE_REASON)) %>%
anti_join(final.snp.manifest, by = c("CONS", "STRATUM", "SNP", "CHR", "POS"))
snp.table %>%
# Ignore SNPs or strata that were removed:
filter(!grepl("^REMOVE_", STATUS) & is.na(STRATUM_EXCLUDE_REASON)) %>%
# anti_join(final.snp.manifest, by = c("CONS", "STRATUM", "SNP", "CHR", "POS"))
full_join(final.snp.manifest, by = c("CONS", "STRATUM", "SNP", "CHR", "POS")) %>%
filter(is.na(MANIFEST_SNP) | is.na(CM))
### We have successfully accounted for SNPs up to this point
rm(final.snp.manifest)
# ced.Unknown and ms.Unknown strata were excluded from subsequent analysis:
snp.table <- snp.table %>%
mutate(STRATUM_EXCLUDE_REASON = ifelse(CONS == "ced" & STRATUM == "Unknown",
"ALL_PHENOTYPES_UNKNOWN", STRATUM_EXCLUDE_REASON)) %>%
mutate(STRATUM_EXCLUDE_REASON = ifelse(CONS == "ms" & STRATUM == "Unknown",
"INSUFFICIENT_SUBJECTS", STRATUM_EXCLUDE_REASON))
################################################################################
############### Section 3: Consortium-Level QC (Individuals) ###############
################################################################################
# In this section, we turn our attention to the individuals that were removed in
# the QC process.
# Read manifests for original consortia:
subject.table <- original.consortia %>%
mutate(manifest_file = paste0("data/immchip/", dataset_stem, ".fam")) %>%
mutate(manifest_data = map(manifest_file, ~ read_table2(.,
col_names = c("FID", "IID", "FATHER",
"MOTHER", "SEX",
"PHENOTYPE"),
col_types = "cciiii"))) %>%
unnest() %>%
select(-dataset_stem, -manifest_file)
# For consortia with strata, read stratum information:
stratum.data <- original.consortia %>%
select(CONS) %>%
mutate(stratum_file = paste0("logs/qc/", CONS, "/", CONS, ".subjects.by.stratum.txt")) %>%
filter(file.exists(stratum_file)) %>%
mutate(stratum_data = map(stratum_file, ~ read_table2(.,
col_names = c("FID", "IID", "STRATUM"),
col_types = "ccc"))) %>%
unnest() %>%
select(-stratum_file) %>%
# Note that a few samples appear to be duplicated!
unique()
subject.table <- subject.table %>%
left_join(stratum.data, by = c("CONS", "FID", "IID")) %>%
select(CONS, STRATUM, FID, IID, FATHER, MOTHER, SEX, PHENOTYPE)
rm(stratum.data)
# RA arrived in separate files for each stratum:
ra.subject.table <- cons.strata %>%
filter(CONS == "ra") %>%
mutate(manifest_file = paste0("data/immchip/iChip_RACI_PhaseII_", STRATUM, ".fam")) %>%
mutate(manifest_data = map(manifest_file, ~ read_table2(.,
col_names = c("FID", "IID", "FATHER",
"MOTHER", "SEX",
"PHENOTYPE"),
col_types = "cciiii"))) %>%
unnest() %>%
select(-manifest_file)
subject.table <- bind_rows(subject.table,
ra.subject.table) %>%
arrange(CONS, STRATUM, FID, IID)
rm(ra.subject.table)
# Liftover changed unknown phenotypes to -9:
liftover.ped <- cons.strata %>%
select(CONS) %>%
unique() %>%
mutate(liftover_ped = paste0("results/qc/manifests_post_liftover/", CONS,
".liftover.out.subjects")) %>%
mutate(liftover_data = map(liftover_ped, ~ read_table2(.,
col_names = c("FID", "IID", "FATHER",
"MOTHER", "SEX",
"LIFTOVER_PHENO"),
col_types = "cciiii"))) %>%
unnest() %>%
select(-liftover_ped)
subject.table <- subject.table %>%
left_join(liftover.ped, by = c("CONS", "FID", "IID", "FATHER", "MOTHER", "SEX")) %>%
mutate(PHENOTYPE = ifelse(is.na(LIFTOVER_PHENO), PHENOTYPE, LIFTOVER_PHENO)) %>%
select(-LIFTOVER_PHENO)
# Initial missingness (10%):
sub.miss.10 <- cons.strata %>%
# Files differ depending on whether the consortium has strata:
mutate(miss_10_file = ifelse(is.na(STRATUM),
paste0("logs/qc/", CONS, "/", CONS, ".sub.miss.10.removed.txt"),
paste0("logs/qc/", CONS, "/", CONS, ".", STRATUM,
".sub.miss.10.removed.txt"))) %>%
# Some strata did not have subjects removed:
filter(file.exists(miss_10_file)) %>%
# Read removal list for each stratum:
mutate(miss_data = map(miss_10_file, ~ read_tsv(., col_names = c("FID", "IID"),
col_types = "cc"))) %>%
unnest() %>%
select(-miss_10_file) %>%
mutate(MISS_10_STATUS = "REMOVE_MISS_10")
# Sex inconsistencies to remove:
sex.remove <- cons.strata %>%
# Files differ depending on whether the consortium has strata:
mutate(sex_rem_file = ifelse(is.na(STRATUM),
paste0("logs/qc/", CONS, "/", CONS, ".sex.prob.sub.removed.txt"),
paste0("logs/qc/", CONS, "/", CONS, ".", STRATUM,
".sex.prob.sub.removed.txt"))) %>%
# Some strata did not have subjects removed:
filter(file.exists(sex_rem_file)) %>%
# Read removal list for each stratum:
mutate(sex_data = map(sex_rem_file, ~ read_table2(., col_names = c("FID", "IID"),
col_types = "cc"))) %>%
unnest() %>%
select(-sex_rem_file) %>%
mutate(SEX_REM_STATUS = "REMOVE_SEX_INCONSISTENT")
# Sex inconsistencies to correct:
sex.correct <- cons.strata %>%
# Files differ depending on whether the consortium has strata:
mutate(sex_corr_file = ifelse(is.na(STRATUM),
paste0("logs/qc/", CONS, "/", CONS, ".sex.prob.sub.corrected.txt"),
paste0("logs/qc/", CONS, "/", CONS, ".", STRATUM,
".sex.prob.sub.corrected.txt"))) %>%
# Some strata did not have subjects removed:
filter(file.exists(sex_corr_file)) %>%
# Read removal list for each stratum:
mutate(sex_data = map(sex_corr_file, ~ read_table2(., col_names = c("FID", "IID", "new_SEX"),
col_types = "cci"))) %>%
unnest() %>%
select(-sex_corr_file)
# Samples that survived population outlier removal:
eur.samples <- cons.strata %>%
mutate(european_file = ifelse(is.na(STRATUM),
paste0("results/qc/population_outliers/", CONS, ".europeans.fam"),
paste0("results/qc/population_outliers/", CONS, ".", STRATUM,
".europeans.fam"))) %>%
# Some consortia did not survive to the population outliers stage:
filter(file.exists(european_file)) %>%
mutate(european_data = map(european_file, ~ read_table2(.,
col_names = c("FID", "IID", "FATHER",
"MOTHER", "SEX", "PHENOTYPE"),
col_types = "cciiii"))) %>%
unnest() %>%
select(-european_file) %>%
mutate(TYPE = "European")
# Heterozygosity outliers:
het.outliers <- cons.strata %>%
# Files differ depending on whether the consortium has strata:
mutate(het_file = ifelse(is.na(STRATUM),
paste0("logs/qc/", CONS, "/", CONS, ".het.outliers.remove.txt"),
paste0("logs/qc/", CONS, "/", CONS, ".", STRATUM,
".het.outliers.remove.txt"))) %>%
# Some strata did not have subjects removed:
filter(file.exists(het_file)) %>%
# Read removal list for each stratum:
mutate(het_data = map(het_file, ~ read_table2(., col_names = c("FID", "IID"),
col_types = "cc"))) %>%
unnest() %>%
select(-het_file) %>%
mutate(HET_REM_STATUS = "REMOVE_HETEROZYGOSITY")
# Stringent missingness (1%):
sub.miss.01 <- cons.strata %>%
# Files differ depending on whether the consortium has strata:
mutate(miss_01_file = ifelse(is.na(STRATUM),
paste0("logs/qc/", CONS, "/", CONS, ".sub.miss.01.removed.txt"),
paste0("logs/qc/", CONS, "/", CONS, ".", STRATUM,
".sub.miss.01.removed.txt"))) %>%
# Some strata did not have subjects removed:
filter(file.exists(miss_01_file)) %>%
# Read removal list for each stratum:
mutate(miss_data = map(miss_01_file, ~ read_tsv(., col_names = c("FID", "IID"),
col_types = "cc"))) %>%
unnest() %>%
select(-miss_01_file) %>%
mutate(MISS_01_STATUS = "REMOVE_MISS_01")
# Duplicates to remove:
dups.to.remove <- cons.strata %>%
# Files differ depending on whether the consortium has strata:
mutate(dup_file = ifelse(is.na(STRATUM),
paste0("logs/qc/", CONS, "/", CONS, ".duplicates.to.remove.txt"),
paste0("logs/qc/", CONS, "/", CONS, ".", STRATUM,
".duplicates.to.remove.txt"))) %>%
# Some strata did not have subjects removed:
filter(file.exists(dup_file)) %>%
# Read subjects to remove for each stratum:
mutate(dup_data = map(dup_file, ~ read_table2(., col_names = c("FID", "IID"),
col_types = "cc"))) %>%
unnest() %>%
select(-dup_file) %>%
mutate(DUP_STATUS = "REMOVE_DUPLICATE")
# Update sample table to reflect QC results:
subject.table <- subject.table %>%
# Initial (10%) subject missingness:
left_join(sub.miss.10, by = c("CONS", "STRATUM", "FID", "IID")) %>%
rename(STATUS = MISS_10_STATUS) %>%
# Sex inconsistencies to remove:
left_join(sex.remove, by = c("CONS", "STRATUM", "FID", "IID")) %>%
mutate(STATUS = ifelse(is.na(STATUS), SEX_REM_STATUS, STATUS)) %>%
select(-SEX_REM_STATUS) %>%
# Sex inconsistencies to correct:
left_join(sex.correct, by = c("CONS", "STRATUM", "FID", "IID")) %>%
mutate(SEX = ifelse(!is.na(new_SEX), new_SEX, SEX)) %>%
select(-new_SEX) %>%
# Population outliers:
left_join(eur.samples, by = c("CONS", "STRATUM", "FID", "IID", "FATHER", "MOTHER", "SEX",
"PHENOTYPE")) %>%
mutate(STATUS = ifelse(!(CONS == "ibd" & STRATUM == "China") & is.na(STATUS) & is.na(TYPE),
"REMOVE_POPULATION_OUTLIER", STATUS)) %>%
select(-TYPE) %>%
# Heterozygosity outliers:
left_join(het.outliers, by = c("CONS", "STRATUM", "FID", "IID")) %>%
mutate(STATUS = ifelse(is.na(STATUS), HET_REM_STATUS, STATUS)) %>%
select(-HET_REM_STATUS) %>%
# Stringent missingness:
left_join(sub.miss.01, by = c("CONS", "STRATUM", "FID", "IID")) %>%
mutate(STATUS = ifelse(is.na(STATUS), MISS_01_STATUS, STATUS)) %>%
select(-MISS_01_STATUS) %>%
# Remove duplicates:
left_join(dups.to.remove, by = c("CONS", "STRATUM", "FID", "IID")) %>%
mutate(STATUS = ifelse(is.na(STATUS), DUP_STATUS, STATUS)) %>%
select(-DUP_STATUS)
rm(sub.miss.10, sex.remove, sex.correct, eur.samples, het.outliers, sub.miss.01, dups.to.remove, liftover.ped)
# Indicate which strata were excluded:
subject.table <- subject.table %>%
left_join(excluded.strata, by = c("CONS", "STRATUM"))
# Read final post-QC manifests:
final.sub.manifest <- cons.strata %>%
mutate(final_manifest = ifelse(is.na(STRATUM),
paste0("results/qc/final_counts/", CONS, ".all.qc.fam"),
paste0("results/qc/final_counts/", CONS, ".", STRATUM,
".all.qc.fam"))) %>%
# Not all strata survived QC:
filter(file.exists(final_manifest)) %>%
mutate(manifest_data = map(final_manifest, ~ read_table2(.,
col_names = c("FID", "IID", "FATHER",
"MOTHER", "SEX",
"PHENOTYPE"),
col_types = "cciiii"))) %>%
unnest() %>%
select(-final_manifest)
# Do all remaining subjects agree with the QC'd manifests (where available)?
subject.table %>%
# Ignore SNPs or strata that were removed:
filter(!grepl("^REMOVE_", STATUS) & is.na(STRATUM_EXCLUDE_REASON)) %>%
mutate(TABLE = "Table") %>%
full_join(final.sub.manifest %>% mutate(MANIFEST = "Manifest"),
by = c("CONS", "STRATUM", "FID", "IID", "FATHER", "MOTHER", "SEX", "PHENOTYPE")) %>%
filter(is.na(TABLE) | is.na(MANIFEST))
### We have successfully accounted for subjects up to this point
rm(final.sub.manifest, excluded.strata)
# Relatives to remove:
rels.to.remove <- cons.strata %>%
# Files differ depending on whether the consortium has strata:
mutate(rel_file = ifelse(is.na(STRATUM),
paste0("logs/qc/", CONS, "/", CONS, ".relatives.to.remove.txt"),
paste0("logs/qc/", CONS, "/", CONS, ".", STRATUM,
".relatives.to.remove.txt"))) %>%
# Some strata did not have subjects removed:
filter(file.exists(rel_file)) %>%
# Read subjects to remove for each stratum:
mutate(rel_data = map(rel_file, ~ read_table2(., col_names = c("FID", "IID"),
col_types = "cc"))) %>%
unnest() %>%
select(-rel_file) %>%
mutate(REL_STATUS = "REMOVE_RELATIVE")
# Update subject table with relatives to remove:
subject.table <- subject.table %>%
left_join(rels.to.remove, by = c("CONS", "STRATUM", "FID", "IID")) %>%
mutate(STATUS = ifelse(is.na(STATUS), REL_STATUS, STATUS)) %>%
select(-REL_STATUS)
rm(rels.to.remove)
################################################################################
############# Section 4: Combine Consortia and Recode Subjects #############
################################################################################
# After completing QC, we reorganized our datasets into disease-level consortia
# (i.e. SLE Genentech and SLE OMRF, and T1D and T1D_ASP were combined. We also
# recoded subjects to ensure that FID and IID are unique across all strata.
# In this section, we also remove relatives and duplicates that are shared
# across datasets.
# Define disease-level datasets:
new.cons.strata <- cons.strata %>%
mutate(NEW_CONS = ifelse(CONS == "sle_g" | CONS == "sle_o", "sle",
ifelse(CONS == "t1d" | CONS == "t1d_asp", "t1d", CONS)),
NEW_STRATUM = ifelse(CONS == "sle_g", paste(CONS, STRATUM, sep = "."),
ifelse(CONS == "sle_o", CONS,
ifelse(CONS == "t1d", "GRID",
ifelse(CONS == "t1d_asp", "ASP", STRATUM)))))
# Add new dataset names to SNP and subject tables:
snp.table <- snp.table %>%
left_join(new.cons.strata, by = c("CONS", "STRATUM"))
subject.table <- subject.table %>%
left_join(new.cons.strata, by = c("CONS", "STRATUM"))
# Read subject recoding:
new.fid.iid <- new.cons.strata %>%
select(NEW_CONS) %>%
unique() %>%
mutate(recode_file = paste0("logs/manifest/", NEW_CONS, ".recoding.txt")) %>%
filter(file.exists(recode_file)) %>%
mutate(recode_data = map(recode_file, ~ read_table2(.,
col_names = c("FID", "IID", "NEW_FID",
"NEW_IID"),
col_types = "cccc"))) %>%
unnest() %>%
select(-recode_file)
# Add recoded FID and IID:
subject.table <- subject.table %>%
left_join(new.fid.iid, by = c("NEW_CONS", "FID", "IID"))
rm(new.fid.iid)
################################################################################
#################### Section 5: Pre-Imputation Filtering ###################
################################################################################
# In this section, we deal with SNPs that were filtered prior to imputation
# Indels that were excluded:
exclude.indels <- new.cons.strata %>%
mutate(indel_file = paste0("logs/imputation/", NEW_CONS, "/", NEW_STRATUM, "/", NEW_CONS, ".",
NEW_STRATUM, ".indels.txt")) %>%
filter(file.exists(indel_file)) %>%
mutate(indel_data = map(indel_file, ~ read_table2(., col_names = c("SNP"),
col_types = "c"))) %>%
unnest() %>%
select(-CONS, -STRATUM, -indel_file) %>%
mutate(INDEL_STATUS = "EXCLUDE_INDEL")
# SNPs that were excluded from imputation for MAF < 0.05:
exclude.maf.05 <- new.cons.strata %>%
mutate(maf_file = paste0("logs/imputation/", NEW_CONS, "/", NEW_STRATUM, "/", NEW_CONS, ".",
NEW_STRATUM, ".maf_0.05.txt")) %>%
filter(file.exists(maf_file)) %>%
mutate(maf_data = map(maf_file, ~ read_table2(., col_names = c("SNP"),
col_types = "c"))) %>%
unnest() %>%
select(-CONS, -STRATUM, -maf_file) %>%
mutate(MAF_STATUS = "EXCLUDE_MAF_0.05")
# SNPs that were excluded for differential missingness (P < 0.00001):
exclude.diff.missing <- new.cons.strata %>%
mutate(diff_missing_file = paste0("logs/assoc_test/", NEW_CONS, "/", NEW_CONS, ".", NEW_STRATUM,
".diff.miss.snps.to.remove.txt")) %>%
filter(file.exists(diff_missing_file)) %>%
mutate(diff_missing_data = map(diff_missing_file, ~ read_table2(., col_names = c("SNP"),
col_types = "c"))) %>%
unnest() %>%
select(-CONS, -STRATUM, -diff_missing_file) %>%
mutate(DIFF_MISSING_STATUS = "EXCLUDE_DIFF_MISSING_0.00001")
# Subsequent filtering is done per chromosome, so we add chromosomes to stratum data:
cons.strata.chr <- new.cons.strata %>%
mutate(dummy = "join") %>%
left_join(tibble(dummy = rep("join", 22),
CHR = 1:22),
by = "dummy") %>%
select(-dummy)
# SNPs that were missing from the 1,000 Genomes reference panel:
exclude.ref.missing <- cons.strata.chr %>%
mutate(ref_missing_file = paste0("logs/imputation/", NEW_CONS, "/", NEW_STRATUM, "/", NEW_CONS,
".", NEW_STRATUM, ".chr_", CHR, ".1kg.missing.snps.txt")) %>%
filter(file.exists(ref_missing_file)) %>%
mutate(ref_missing_data = map(ref_missing_file, ~ read_table2(., col_names = c("SNP"),
col_types = "c"))) %>%
unnest() %>%
select(-CONS, -STRATUM, -ref_missing_file) %>%
mutate(REF_MISSING_STATUS = "EXCLUDE_MISSING_1KG_REFERENCE")
# SNPs that disagree with 1,000 Genomes reference panel:
exclude.ref.inconsistent <- cons.strata.chr %>%
mutate(ref_inconsistent_file = paste0("logs/imputation/", NEW_CONS, "/", NEW_STRATUM, "/", NEW_CONS,
".", NEW_STRATUM, ".chr_", CHR, ".1kg.inconsistent.snps.txt")) %>%
filter(file.exists(ref_inconsistent_file)) %>%
mutate(ref_inconsistent_data = map(ref_inconsistent_file, ~ read_table2(., col_names = c("SNP"),
col_types = "c"))) %>%
unnest() %>%
select(-CONS, -STRATUM, -ref_inconsistent_file) %>%
mutate(REF_INCONSISTENT_STATUS = "EXCLUDE_INCONSISTENT_1KG_REFERENCE")
# Update SNP table to reflect pre-imputation filtering:
snp.table <- snp.table %>%
# Exclude non-autosomal SNPs:
left_join(tibble(
CHR = c(23,24,25,26),
IMPUTE_STATUS = c("EXCLUDE_CHR_X", "EXCLUDE_CHR_Y", "EXCLUDE_CHR_XY", "EXCLUDE_CHR_MT")), by = "CHR") %>%
# Exclude indels:
left_join(exclude.indels, by = c("NEW_CONS", "NEW_STRATUM", "SNP")) %>%
mutate(IMPUTE_STATUS = ifelse(is.na(IMPUTE_STATUS), INDEL_STATUS, IMPUTE_STATUS)) %>%
select(-INDEL_STATUS) %>%
# Exclude minor allele frequency < 0.05 (note that some of these are also indels):
left_join(exclude.maf.05, by = c("NEW_CONS", "NEW_STRATUM", "SNP")) %>%
mutate(IMPUTE_STATUS = ifelse(is.na(IMPUTE_STATUS), MAF_STATUS, IMPUTE_STATUS)) %>%
select(-MAF_STATUS) %>%
# Differential missingness P < 0.0001:
left_join(exclude.diff.missing, by = c("NEW_CONS", "NEW_STRATUM", "SNP")) %>%
mutate(IMPUTE_STATUS = ifelse(is.na(IMPUTE_STATUS), DIFF_MISSING_STATUS, IMPUTE_STATUS)) %>%
select(-DIFF_MISSING_STATUS) %>%
# Exclude SNPs not found on 1,000 Genomes reference:
left_join(exclude.ref.missing, by = c("NEW_CONS", "NEW_STRATUM", "CHR", "SNP")) %>%
mutate(IMPUTE_STATUS = ifelse(is.na(IMPUTE_STATUS), REF_MISSING_STATUS, IMPUTE_STATUS)) %>%
select(-REF_MISSING_STATUS) %>%
# Exclude SNPs inconsistent with 1,000 Genomes reference:
left_join(exclude.ref.inconsistent, by = c("NEW_CONS", "NEW_STRATUM", "CHR", "SNP")) %>%
mutate(IMPUTE_STATUS = ifelse(is.na(IMPUTE_STATUS), REF_INCONSISTENT_STATUS, IMPUTE_STATUS)) %>%
select(-REF_INCONSISTENT_STATUS) # %>%
# group_by(STATUS, STRATUM_EXCLUDE_REASON, IMPUTE_STATUS) # %>%
# summarize(n = n()) %>%
# ungroup()
# How many indels are non-autosomal?
# exclude.indels %>%
# left_join(snp.table, by = c("NEW_CONS", "NEW_STRATUM", "SNP")) %>%
# filter(CHR > 22) %>%
# group_by(CHR) %>%
# summarize(n = n()) %>%
# ungroup()
# How many MAF are not indels?
# anti_join(exclude.maf.05, exclude.indels, by = c("NEW_CONS", "NEW_STRATUM", "SNP")) %>% nrow()
# How many diff missing are not either indels or MAF?
# exclude.diff.missing %>%
# anti_join(exclude.indels, by = c("NEW_CONS", "NEW_STRATUM", "SNP")) %>%
# anti_join(exclude.maf.05, by = c("NEW_CONS", "NEW_STRATUM", "SNP")) %>% nrow()
rm(exclude.indels, exclude.maf.05, exclude.diff.missing, exclude.ref.missing, exclude.ref.inconsistent)
# Read pre-imputation manifests:
pre.imputation.manifests <- read_table2("logs/imputation/pre.phasing.manifests.txt.gz",
col_names = TRUE, col_types = "ccciicc")
# Check that the SNP table corresponds to the pre-imputation manifests:
snp.table %>%
filter(is.na(STATUS) & is.na(STRATUM_EXCLUDE_REASON) & is.na(IMPUTE_STATUS)) %>%
anti_join(pre.imputation.manifests, by = c("NEW_CONS" = "CONS", "NEW_STRATUM" = "STRATUM", "SNP", "CHR", "POS"))
pre.imputation.manifests %>%
anti_join(snp.table %>%
filter(is.na(STATUS) & is.na(STRATUM_EXCLUDE_REASON) & is.na(IMPUTE_STATUS)),
by = c("CONS" = "NEW_CONS", "STRATUM" = "NEW_STRATUM", "SNP", "CHR", "POS")) %>%
head()
### Yes, they agree
rm(pre.imputation.manifests)
################################################################################
################## Section 6: Second Imputation Filtering ##################
################################################################################
# After the first round of imputation, we identified a number of spurious SNPs
# and a number of SNPs that were poorly concordant with the reference when
# imputed from their neighbours. These likely interfere with the overall quality
# of the imputation, so we repeated the imputation after removing these SNPs.
# Here, we account for these variants.
# Exclude SNPs that were missing in more than 1% of individuals:
impute.missing.snps <- read_table2("logs/imputation/second.imputation.all.strata.snp.miss.txt.gz") %>%
filter(F_MISS > 0.01) %>%
select(CONS, STRATUM, SNP) %>%
mutate(POSTIMPUTE_STATUS = "EXCLUDE_MISSING_0.01")
# Exclude SNPs with MAF < 5%:
impute.rare.snps <- read_table2("logs/imputation/second.imputation.all.strata.maf.txt.gz") %>%
filter(MAF < 0.05) %>%
select(CONS, STRATUM, SNP) %>%
mutate(POSTIMPUTE_STATUS = "EXCLUDE_MAF_0.05")
# Exclude SNPs that exhibited extreme deviation from Hardy-Weinberg equilibrium:
impute.hwe.snps <- read_table2("logs/imputation/second.imputation.all.strata.hwe.txt.gz") %>%
filter(P_HWE < 0.00001 & TEST == "UNAFF") %>%
select(CONS, STRATUM, SNP, CHR) %>%
mutate(POSTIMPUTE_STATUS = "EXCLUDE_HWE_0.00001")
# Exclude SNPs that exhibited extreme differential missingness:
impute.diff.missing.snps <- read_table2("logs/imputation/second.imputation.all.strata.diff.miss.txt.gz") %>%
filter(P_MISS < 0.00001) %>%
select(CONS, STRATUM, SNP, CHR) %>%
mutate(POSTIMPUTE_STATUS = "EXCLUDE_DIFF_MISSING_0.00001")
# Exclude SNPs that are pooly concordant with imputed genotypes when masked and
# imputed from neighbours (i.e. info_type0 > 0.8 and concord_type0 < 0.75):
impute.nonconcordant.snps <- read_table2("results/imputation/info.scores.txt.gz", col_types = "ccicciccdddiddd") %>%
filter(type == 2 & info_type0 > 0.8 & concord_type0 < 0.75) %>%
select(CONS = cons, STRATUM = stratum, SNP = rs_id, CHR = snp_id, POS = position) %>%
mutate(CHR = as.integer(CHR)) %>%
mutate(POSTIMPUTE_STATUS = "EXCLUDE_IMPUTATION_CONCORDANCE")
# Exclude spurious SNPs that were identified as possibly mismapped:
impute.mismapped.snps <- read_table2("results/imputation/all.strata.mismapped.snps.txt",
col_names = c("CONS", "STRATUM", "SNP")) %>%
# Exclude these from ALL strata of an affected disease:
select(-STRATUM) %>%
unique() %>%
mutate(POSTIMPUTE_STATUS = "EXCLUDE_MISMAPPED")
# Update SNP table to reflect second imputation filtering:
snp.table <- snp.table %>%
# Remove SNPs missing in >1% of individuals (relatives removed):
left_join(impute.missing.snps, by = c("NEW_CONS" = "CONS", "NEW_STRATUM" = "STRATUM", "SNP")) %>%
mutate(IMPUTE_STATUS = ifelse(is.na(IMPUTE_STATUS), POSTIMPUTE_STATUS, IMPUTE_STATUS)) %>%
select(-POSTIMPUTE_STATUS) %>%
# Remove SNPs with MAF <5% (relatives removed):
left_join(impute.rare.snps, by = c("NEW_CONS" = "CONS", "NEW_STRATUM" = "STRATUM", "SNP")) %>%
mutate(IMPUTE_STATUS = ifelse(is.na(IMPUTE_STATUS), POSTIMPUTE_STATUS, IMPUTE_STATUS)) %>%
select(-POSTIMPUTE_STATUS) %>%
# Remove SNPs that exhibited extreme Hardy-Weinberg disequilibrium (relatives removed):
left_join(impute.hwe.snps, by = c("NEW_CONS" = "CONS", "NEW_STRATUM" = "STRATUM", "SNP", "CHR")) %>%
mutate(IMPUTE_STATUS = ifelse(is.na(IMPUTE_STATUS), POSTIMPUTE_STATUS, IMPUTE_STATUS)) %>%
select(-POSTIMPUTE_STATUS) %>%
# Remove extreme differentially missing SNPs:
left_join(impute.diff.missing.snps, by = c("NEW_CONS" = "CONS", "NEW_STRATUM" = "STRATUM", "SNP", "CHR")) %>%
mutate(IMPUTE_STATUS = ifelse(is.na(IMPUTE_STATUS), POSTIMPUTE_STATUS, IMPUTE_STATUS)) %>%
select(-POSTIMPUTE_STATUS) %>%
# Remove SNPs that were not concordant when imputed:
left_join(impute.nonconcordant.snps, by = c("NEW_CONS" = "CONS", "NEW_STRATUM" = "STRATUM", "SNP", "CHR", "POS")) %>%
mutate(IMPUTE_STATUS = ifelse(is.na(IMPUTE_STATUS), POSTIMPUTE_STATUS, IMPUTE_STATUS)) %>%
select(-POSTIMPUTE_STATUS) %>%
# Remove SNPs that were flagged as possibly mismapped:
left_join(impute.mismapped.snps, by = c("NEW_CONS" = "CONS", "SNP")) %>%
mutate(IMPUTE_STATUS = ifelse(is.na(IMPUTE_STATUS), POSTIMPUTE_STATUS, IMPUTE_STATUS)) %>%
select(-POSTIMPUTE_STATUS) # %>%
# group_by(IMPUTE_STATUS, POSTIMPUTE_STATUS) %>%
# summarise(n = n()) %>% data.frame()
rm(impute.missing.snps, impute.rare.snps, impute.hwe.snps, impute.diff.missing.snps, impute.nonconcordant.snps, impute.mismapped.snps)
# Read final imputation manifests:
second.imputation.manifest <- read_table2("logs/imputation/second.imputation.manifests.txt.gz", col_types = "cccii")
### Check that SNP table agrees with preimputation manifests:
snp.table %>%
filter(is.na(STATUS) & is.na(STRATUM_EXCLUDE_REASON) & is.na(IMPUTE_STATUS)) %>%
anti_join(second.imputation.manifest, by = c("NEW_CONS" = "CONS", "NEW_STRATUM" = "STRATUM", "SNP", "CHR", "POS" = "BP"))
second.imputation.manifest %>%
anti_join(snp.table %>%
filter(is.na(STATUS) & is.na(STRATUM_EXCLUDE_REASON) & is.na(IMPUTE_STATUS)),
by = c("CONS" = "NEW_CONS", "STRATUM" = "NEW_STRATUM", "SNP", "CHR", "BP" = "POS"))
rm(second.imputation.manifest)
# Write SNP and sample tables to file:
write_tsv(snp.table, "logs/snp.status.table.txt.gz")
write_tsv(subject.table, "logs/subject.status.table.txt.gz")
### Still need to deal with trans-consortium duplicates (?between every pair
### of traits?)
|
/src/account.snp.sample.fate.R
|
no_license
|
cotsapaslab/CrossDiseaseImmunochip
|
R
| false
| false
| 46,917
|
r
|
#!/bin/Rscript
# account.snp.sample.fate.R
#
# This script accounts for the fate of each SNP and sample through the QC
# process.
library(dplyr)
library(readr)
library(purrr)
library(tidyr)
################################################################################
################ Section 1: Manifest Resolution and Liftover ###############
################################################################################
# The original ImmunoChip manifests that we received contain multiple types of
# conflicts--SNPs mapped to multiple positions across manifests, positions
# mapped to multiple SNPs, and names varied for the same SNP.
#
# Note that we did not have RA data when we performed the initial manifest
# resolution. RA is subsequently coerced to be consistent with the consensus
# derived from the original 7 datasets.
#
# The datasets that we received were mapped to hg18. In this section, we also
# account for liftover failures.
# Read manifests for original consortia:
original.consortia <- tibble(CONS = c("ced", "ibd", "ms", "sle_g", "sle_o", "t1d", "t1d_asp"),
dataset_stem = c("CeD_phen", "ibdrelease5_QCI", "MS",
"Genentech_phenos", "OMRF_all_chr_phenos", "UK",
"ASP"))
snp.table <- original.consortia %>%
mutate(manifest_file = paste0("data/immchip/", dataset_stem, ".bim")) %>%
mutate(manifest_data = map(manifest_file, ~ read_tsv(.,
col_names = c("MANIFEST_CHR", "MANIFEST_SNP",
"CM", "MANIFEST_POS",
"MANIFEST_A1", "MANIFEST_A2"),
col_types = "icdicc"))) %>%
unnest() %>%
select(-dataset_stem, -manifest_file, -CM)
# Read SNPs to remap, rename and remove:
snps.to.remap <- original.consortia %>%
mutate(remap_file = paste0("logs/manifest/", CONS, ".snp.newpos.txt")) %>%
mutate(remap_data = map(remap_file, ~ read_tsv(.,
col_names = c("SNP", "CHR_new", "POS_new"),
col_types = "cii"))) %>%
unnest() %>%
select(-dataset_stem, -remap_file)
snps.to.rename <- original.consortia %>%
mutate(rename_file = paste0("logs/manifest/", CONS, ".snp.rename.txt")) %>%
mutate(rename_data = map(rename_file, ~ read_tsv(.,
col_names = c("SNP_old", "SNP_new"),
col_types = "cc"))) %>%
unnest() %>%
select(-dataset_stem, -rename_file)
snps.to.remove <- original.consortia %>%
mutate(remove_file = paste0("logs/manifest/", CONS, ".snp.remove.txt")) %>%
mutate(remove_data = map(remove_file, ~ read_tsv(.,
col_names = c("SNP"),
col_types = "c"))) %>%
unnest() %>%
select(-dataset_stem, -remove_file) %>%
mutate(STATUS = "REMOVE_MANIFEST_INCONSISTENT")
# Update original consortia to reflect manifest resolution:
snp.table <- snp.table %>%
select(CONS, MANIFEST_SNP, MANIFEST_CHR, MANIFEST_POS, MANIFEST_A1, MANIFEST_A2) %>%
# Resolve multimapping SNPs:
left_join(snps.to.remap, by = c("CONS", "MANIFEST_SNP" = "SNP")) %>%
mutate(CHR = ifelse(is.na(CHR_new), MANIFEST_CHR, CHR_new),
POS = ifelse(is.na(POS_new), MANIFEST_POS, POS_new)) %>%
# Remove redundant SNPs:
left_join(snps.to.remove, by = c("CONS", "MANIFEST_SNP" = "SNP")) %>%
# Rename remaining SNPs:
left_join(snps.to.rename, by = c("CONS", "MANIFEST_SNP" = "SNP_old")) %>%
# Do not assign new names to SNPs that have been removed:
mutate(SNP = ifelse(!is.na(STATUS), NA,
ifelse(!is.na(SNP_new), SNP_new, MANIFEST_SNP))) %>%
# group_by(STATUS, is.na(SNP)) %>% summarize(n = n()) %>% ungroup()
# filter(CONS == "ced" & MANIFEST_CHR == 1 & MANIFEST_POS == 159785154)
# mutate(SNP = ifelse(!is.na(SNP_new), SNP_new, MANIFEST_SNP)) %>%
select(CONS, MANIFEST_SNP, MANIFEST_CHR, MANIFEST_POS, MANIFEST_A1, MANIFEST_A2, SNP, CHR, POS,
STATUS)
rm(snps.to.remap, snps.to.remove, snps.to.rename)
# Expand SNP table to include all strata for original consortia:
cons.strata <- tibble(
CONS = c("ced", "ced", "ced", "ced", "ced", "ced", "ced", "ced", "ced",
"ibd", "ibd", "ibd", "ibd", "ibd", "ibd", "ibd", "ibd", "ibd", "ibd", "ibd", "ibd",
"ibd", "ibd", "ibd", "ibd", "ibd", "ibd",
"ms", "ms", "ms", "ms", "ms", "ms", "ms", "ms", "ms", "ms", "ms", "ms",
"ra", "ra", "ra", "ra", "ra", "ra",
"sle_g", "sle_g", "sle_g",
"sle_o",
"t1d",
"t1d_asp"),
STRATUM = c("British", "Dutch", "Gosias_mystery", "Indian", "Italian", "Polish", "Romanian",
"Spanish", "Unknown",
"Australia", "Belgium", "China", "Denmark", "Germany", "IMSGC", "Iran", "Italy",
"Lithuania-Baltic", "Netherlands", "New_Zealand", "Norway", "Slovenia", "Spain",
"Sweden", "UK", "Unknown", "USA-Canada",
"AUSNZ", "Belgium", "Denmark", "Finland", "France", "Germany", "Italy", "Norway",
"Sweden", "UK", "Unknown", "US",
"ES", "NL", "SE-E", "SE-U", "UK", "US",
"AA", "EA", "Others",
NA,
NA,
NA))
snp.table <- cons.strata %>%
# Exclude RA (QC'd separately) and ms.Unknown (was not lifted over):
filter(CONS != "ra" & !(CONS == "ms" & STRATUM == "Unknown")) %>%
left_join(snp.table, by = "CONS")
# The ms.Unknown stratum was not subjected to manifest harmonization or liftover:
ms.Unknown <- read_tsv("data/immchip/MS.bim",
col_names = c("MANIFEST_CHR", "MANIFEST_SNP", "CM", "MANIFEST_POS",
"MANIFEST_A1", "MANIFEST_A2"),
col_types = "icdicc") %>%
mutate(CONS = "ms",
STRATUM = "Unknown") %>%
select(CONS, STRATUM, MANIFEST_SNP, MANIFEST_CHR, MANIFEST_POS, MANIFEST_A1, MANIFEST_A2) %>%
mutate(SNP = MANIFEST_SNP, CHR = MANIFEST_CHR, POS = MANIFEST_POS)
snp.table <- bind_rows(snp.table,
ms.Unknown) %>%
arrange(CONS, STRATUM, CHR, POS)
rm(ms.Unknown)
# RA was added later; it arrived with separate files for each stratum:
ra.snp.table <- tibble(CONS = c("ra", "ra", "ra", "ra", "ra", "ra"),
STRATUM = c("ES", "NL", "SE-E", "SE-U", "UK", "US")) %>%
mutate(manifest_file = paste0("data/immchip/iChip_RACI_PhaseII_", STRATUM, ".bim")) %>%
mutate(manifest_data = map(manifest_file, ~ read_tsv(.,
col_names = c("MANIFEST_CHR", "MANIFEST_SNP",
"CM", "MANIFEST_POS",
"MANIFEST_A1", "MANIFEST_A2"),
col_types = "icdicc"))) %>%
unnest() %>%
select(-manifest_file, -CM)
# RA manifest resolution was done later, with outputs stored in QC directory:
ra.snps.to.remap <- read_tsv(paste0("logs/qc/ra/ra.snp.newpos.txt"),
col_names = c("SNP", "CHR_new", "POS_new"),
col_types = "cii")
ra.snps.to.rename <- read_tsv(paste0("logs/qc/ra/ra.snp.rename.txt"),
col_names = c("SNP_old", "SNP_new"),
col_types = "cc")
### Note that there are no RA SNPs to remove
# Update RA dataset to reflect manifest resolution:
ra.snp.table <- ra.snp.table %>%
select(CONS, STRATUM, MANIFEST_SNP, MANIFEST_CHR, MANIFEST_POS, MANIFEST_A1, MANIFEST_A2) %>%
# Resolve multimapping SNPs:
left_join(ra.snps.to.remap, by = c("MANIFEST_SNP" = "SNP")) %>%
mutate(CHR = ifelse(is.na(CHR_new), MANIFEST_CHR, CHR_new),
POS = ifelse(is.na(POS_new), MANIFEST_POS, POS_new)) %>%
# Rename SNPs:
left_join(ra.snps.to.rename, by = c("MANIFEST_SNP" = "SNP_old")) %>%
mutate(SNP = ifelse(!is.na(SNP_new), SNP_new, MANIFEST_SNP)) %>%
select(CONS, STRATUM, MANIFEST_SNP, MANIFEST_CHR, MANIFEST_POS, MANIFEST_A1, MANIFEST_A2, SNP,
CHR, POS)
rm(ra.snps.to.remap, ra.snps.to.rename)
# Combine RA dataset with the other consortia:
snp.table <- bind_rows(snp.table,
ra.snp.table)
rm(ra.snp.table)
### We now deal with liftover failures for each consortium. Note that the
### Unknown MS stratum was not subjected to liftover. So we temporarily set its
### SNP names to NA (so no errors are recorded). We will then set them back to
### the original manifest names (since these were not renamed, either).
# Read SNPs that failed liftover:
liftover.failures <- cons.strata %>%
select(CONS) %>%
unique() %>%
mutate(liftover_file = ifelse(CONS == "ra",
paste0("logs/qc/ra/ra.liftover.hg19.removed.txt"),
paste0("logs/manifest/", CONS, ".liftover.hg19.removed.txt"))) %>%
mutate(liftover_data = map(liftover_file, ~ read_tsv(.,
col_names = c("SNP"),
col_types = "c"))) %>%
unnest() %>%
select(-liftover_file) %>%
mutate(LIFT_STATUS = "REMOVE_LIFTOVER_FAIL")
# Update SNP table to incorporate liftover failures:
snp.table <- snp.table %>%
mutate(SNP = ifelse((CONS == "ms" & STRATUM == "Unknown"), NA, SNP),
CHR = ifelse((CONS == "ms" & STRATUM == "Unknown"), MANIFEST_CHR, CHR),
POS = ifelse((CONS == "ms" & STRATUM == "Unknown"), MANIFEST_POS, POS)) %>%
left_join(liftover.failures, by = c("CONS", "SNP")) %>%
mutate(STATUS = ifelse(is.na(STATUS), LIFT_STATUS, STATUS)) %>%
select(-LIFT_STATUS) %>%
mutate(SNP = ifelse((CONS == "ms" & STRATUM == "Unknown"), MANIFEST_SNP, SNP))
rm(liftover.failures)
# Read liftover results:
liftover.coords <- cons.strata %>%
select(CONS) %>%
unique() %>%
mutate(liftover_file = ifelse(CONS == "ra",
paste0("logs/qc/ra/ra.liftover.out.map"),
paste0("logs/manifest/", CONS, ".liftover.out.map"))) %>%
mutate(liftover_data = map(liftover_file, ~ read_tsv(.,
col_names = c("new_CHR", "SNP", "CM",
"new_POS"),
col_types = "icdi"))) %>%
unnest() %>%
select(-liftover_file)
# Check that the number of SNPs agrees:
# liftover.coords %>%
# group_by(CONS) %>%
# summarize(n = n()) %>%
# ungroup()
# snp.table %>%
# filter(is.na(STATUS)) %>%
# filter(!(CONS == "ms" & STRATUM == "Unknown")) %>%
# select(CONS, MANIFEST_SNP, MANIFEST_CHR, MANIFEST_POS) %>%
# unique() %>%
# group_by(CONS) %>%
# summarize(n = n()) %>%
# ungroup()
# Update coordinates for liftover:
snp.table <- snp.table %>%
left_join(liftover.coords, by = c("CONS", "SNP")) %>%
# Adjust positions for SNPs that were lifted over:
mutate(CHR = ifelse(is.na(STATUS) & !(CONS == "ms" & STRATUM == "Unknown"), new_CHR, CHR),
POS = ifelse(is.na(STATUS) & !(CONS == "ms" & STRATUM == "Unknown"), new_POS, POS)) %>%
select(-new_CHR, -CM, -new_POS)
rm(liftover.coords)
################################################################################
################### Section 2: Consortium-Level QC (SNPs) ##################
################################################################################
# QC was performed on each consortium independently. In this section, we
# identify SNPs that were filtered for missingness or violation of
# Hardy-Weinberg equilibrium.
snp.miss.05 <- cons.strata %>%
# Files differ depending on whether the consortium has strata:
mutate(miss_05_file = ifelse(is.na(STRATUM),
paste0("logs/qc/", CONS, "/", CONS, ".snp.miss.05.removed.txt"),
paste0("logs/qc/", CONS, "/", CONS, ".", STRATUM,
".snp.miss.05.removed.txt"))) %>%
# Some strata did not have SNPs removed:
filter(file.exists(miss_05_file)) %>%
# Read SNP list for each stratum:
mutate(miss_data = map(miss_05_file, ~ read_tsv(., col_names = "SNP", col_types = "c"))) %>%
unnest() %>%
select(-miss_05_file) %>%
mutate(MISS_05_STATUS = "REMOVE_MISS_05")
snp.hwe.initial <- cons.strata %>%
# Files differ depending on whether the consortium has strata:
mutate(hwe_initial_file = ifelse(is.na(STRATUM),
paste0("logs/qc/", CONS, "/", CONS,
".hwe.snps.removed.lenient.txt"),
paste0("logs/qc/", CONS, "/", CONS, ".", STRATUM,
".hwe.snps.removed.lenient.txt"))) %>%
# Some strata did not have SNPs removed:
filter(file.exists(hwe_initial_file)) %>%
# Read SNP list for each stratum:
mutate(hwe_data = map(hwe_initial_file, ~ read_table2(., col_names = "SNP", col_types = "c"))) %>%
unnest() %>%
select(-hwe_initial_file) %>%
mutate(HWE_INITIAL_STATUS = "REMOVE_HWE_INITIAL")
### Note that sle_g.Others has A TONNE of HWE failures. There is initial white
### space on each line, so we use read_table2 to parse correctly.
snp.miss.01 <- cons.strata %>%
# Files differ depending on whether the consortium has strata:
mutate(miss_01_file = ifelse(is.na(STRATUM),
paste0("logs/qc/", CONS, "/", CONS, ".snp.miss.01.removed.txt"),
paste0("logs/qc/", CONS, "/", CONS, ".", STRATUM,
".snp.miss.01.removed.txt"))) %>%
# Some strata did not have SNPs removed:
filter(file.exists(miss_01_file)) %>%
# Read SNP list for each stratum:
mutate(miss_data = map(miss_01_file, ~ read_tsv(., col_names = "SNP", col_types = "c"))) %>%
unnest() %>%
select(-miss_01_file) %>%
mutate(MISS_01_STATUS = "REMOVE_MISS_01")
snp.hwe.strict <- cons.strata %>%
# Files differ depending on whether the consortium has strata:
mutate(hwe_strict_file = ifelse(is.na(STRATUM),
paste0("logs/qc/", CONS, "/", CONS,
".hwe.snps.removed.strict.txt"),
paste0("logs/qc/", CONS, "/", CONS, ".", STRATUM,
".hwe.snps.removed.strict.txt"))) %>%
# Some strata did not have SNPs removed:
filter(file.exists(hwe_strict_file)) %>%
# Read SNP list for each stratum:
mutate(hwe_data = map(hwe_strict_file, ~ read_tsv(., col_names = "SNP", col_types = "c"))) %>%
unnest() %>%
select(-hwe_strict_file) %>%
mutate(HWE_STRICT_STATUS = "REMOVE_HWE_STRICT")
# Update SNP table to reflect QC results:
snp.table <- snp.table %>%
# Initial (5%) SNP missingness:
left_join(snp.miss.05, by = c("CONS", "STRATUM", "SNP")) %>%
# filter(!is.na(STATUS) & !is.na(MISS_05_STATUS)) %>%
mutate(STATUS = ifelse(is.na(STATUS), MISS_05_STATUS, STATUS)) %>%
select(-MISS_05_STATUS) %>%
# Lenient (0.00000001) HWE:
left_join(snp.hwe.initial, by = c("CONS", "STRATUM", "SNP")) %>%
mutate(STATUS = ifelse(is.na(STATUS), HWE_INITIAL_STATUS, STATUS)) %>%
select(-HWE_INITIAL_STATUS) %>%
# Strict (1%) SNP missingness:
left_join(snp.miss.01, by = c("CONS", "STRATUM", "SNP")) %>%
mutate(STATUS = ifelse(is.na(STATUS), MISS_01_STATUS, STATUS)) %>%
select(-MISS_01_STATUS) %>%
# Strict (0.00001) HWE:
left_join(snp.hwe.strict, by = c("CONS", "STRATUM", "SNP")) %>%
mutate(STATUS = ifelse(is.na(STATUS), HWE_STRICT_STATUS, STATUS)) %>%
select(-HWE_STRICT_STATUS)
rm(snp.miss.05, snp.hwe.initial, snp.miss.01, snp.hwe.strict)
# A number of strata were excluded from analysis:
excluded.strata <- tibble(
CONS = c("ced",
# "ced",
"ibd",
"ibd",
# "ms",
"sle_g",
"sle_g"),
STRATUM = c("Indian",
# "Unknown",
"China",
"Iran",
# "Unknown",
"AA",
"Others"),
STRATUM_EXCLUDE_REASON = c("INSUFFICIENT_SUBJECTS_AFTER_OUTLIER_REMOVAL",
# "ALL_PHENOS_UNKNOWN",
"INSUFFICIENT_SUBJECTS",
"INSUFFICIENT_SUBJECTS_AFTER_OUTLIER_REMOVAL",
# "INSUFFICIENT_SUBJECTS",
"INSUFFICIENT_SUBJECTS_AFTER_OUTLIER_REMOVAL",
"INSUFFICIENT_SUBJECTS_AFTER_OUTLIER_REMOVAL"))
snp.table <- snp.table %>%
left_join(excluded.strata, by = c("CONS", "STRATUM"))
# Read final post-QC manifests:
final.snp.manifest <- cons.strata %>%
mutate(final_manifest = ifelse(is.na(STRATUM),
paste0("results/qc/final_counts/", CONS, ".all.qc.bim"),
paste0("results/qc/final_counts/", CONS, ".", STRATUM,
".all.qc.bim"))) %>%
# Not all strata survived QC:
filter(file.exists(final_manifest)) %>%
mutate(manifest_data = map(final_manifest, ~ read_tsv(.,
col_names = c("CHR", "SNP", "CM", "POS",
"A1", "A2"),
col_types = "icdicc"))) %>%
unnest() %>%
select(-final_manifest)
# Do all remaining SNPs agree with the QC'd manifests (where available)?
final.snp.manifest %>%
anti_join(snp.table %>% filter(!grepl("^REMOVE_", STATUS) & is.na(STRATUM_EXCLUDE_REASON)),
by = c("CONS", "STRATUM", "SNP", "CHR", "POS"))
snp.table %>%
filter(!grepl("^REMOVE_", STATUS) & is.na(STRATUM_EXCLUDE_REASON)) %>%
anti_join(final.snp.manifest, by = c("CONS", "STRATUM", "SNP", "CHR", "POS"))
snp.table %>%
# Ignore SNPs or strata that were removed:
filter(!grepl("^REMOVE_", STATUS) & is.na(STRATUM_EXCLUDE_REASON)) %>%
# anti_join(final.snp.manifest, by = c("CONS", "STRATUM", "SNP", "CHR", "POS"))
full_join(final.snp.manifest, by = c("CONS", "STRATUM", "SNP", "CHR", "POS")) %>%
filter(is.na(MANIFEST_SNP) | is.na(CM))
### We have successfully accounted for SNPs up to this point
rm(final.snp.manifest)
# ced.Unknown and ms.Unknown strata were excluded from subsequent analysis:
snp.table <- snp.table %>%
mutate(STRATUM_EXCLUDE_REASON = ifelse(CONS == "ced" & STRATUM == "Unknown",
"ALL_PHENOTYPES_UNKNOWN", STRATUM_EXCLUDE_REASON)) %>%
mutate(STRATUM_EXCLUDE_REASON = ifelse(CONS == "ms" & STRATUM == "Unknown",
"INSUFFICIENT_SUBJECTS", STRATUM_EXCLUDE_REASON))
################################################################################
############### Section 3: Consortium-Level QC (Individuals) ###############
################################################################################
# In this section, we turn our attention to the individuals that were removed in
# the QC process.
# Read manifests for original consortia:
subject.table <- original.consortia %>%
mutate(manifest_file = paste0("data/immchip/", dataset_stem, ".fam")) %>%
mutate(manifest_data = map(manifest_file, ~ read_table2(.,
col_names = c("FID", "IID", "FATHER",
"MOTHER", "SEX",
"PHENOTYPE"),
col_types = "cciiii"))) %>%
unnest() %>%
select(-dataset_stem, -manifest_file)
# For consortia with strata, read stratum information:
stratum.data <- original.consortia %>%
select(CONS) %>%
mutate(stratum_file = paste0("logs/qc/", CONS, "/", CONS, ".subjects.by.stratum.txt")) %>%
filter(file.exists(stratum_file)) %>%
mutate(stratum_data = map(stratum_file, ~ read_table2(.,
col_names = c("FID", "IID", "STRATUM"),
col_types = "ccc"))) %>%
unnest() %>%
select(-stratum_file) %>%
# Note that a few samples appear to be duplicated!
unique()
subject.table <- subject.table %>%
left_join(stratum.data, by = c("CONS", "FID", "IID")) %>%
select(CONS, STRATUM, FID, IID, FATHER, MOTHER, SEX, PHENOTYPE)
rm(stratum.data)
# RA arrived in separate files for each stratum:
ra.subject.table <- cons.strata %>%
filter(CONS == "ra") %>%
mutate(manifest_file = paste0("data/immchip/iChip_RACI_PhaseII_", STRATUM, ".fam")) %>%
mutate(manifest_data = map(manifest_file, ~ read_table2(.,
col_names = c("FID", "IID", "FATHER",
"MOTHER", "SEX",
"PHENOTYPE"),
col_types = "cciiii"))) %>%
unnest() %>%
select(-manifest_file)
subject.table <- bind_rows(subject.table,
ra.subject.table) %>%
arrange(CONS, STRATUM, FID, IID)
rm(ra.subject.table)
# Liftover changed unknown phenotypes to -9:
liftover.ped <- cons.strata %>%
select(CONS) %>%
unique() %>%
mutate(liftover_ped = paste0("results/qc/manifests_post_liftover/", CONS,
".liftover.out.subjects")) %>%
mutate(liftover_data = map(liftover_ped, ~ read_table2(.,
col_names = c("FID", "IID", "FATHER",
"MOTHER", "SEX",
"LIFTOVER_PHENO"),
col_types = "cciiii"))) %>%
unnest() %>%
select(-liftover_ped)
subject.table <- subject.table %>%
left_join(liftover.ped, by = c("CONS", "FID", "IID", "FATHER", "MOTHER", "SEX")) %>%
mutate(PHENOTYPE = ifelse(is.na(LIFTOVER_PHENO), PHENOTYPE, LIFTOVER_PHENO)) %>%
select(-LIFTOVER_PHENO)
# Initial missingness (10%):
sub.miss.10 <- cons.strata %>%
# Files differ depending on whether the consortium has strata:
mutate(miss_10_file = ifelse(is.na(STRATUM),
paste0("logs/qc/", CONS, "/", CONS, ".sub.miss.10.removed.txt"),
paste0("logs/qc/", CONS, "/", CONS, ".", STRATUM,
".sub.miss.10.removed.txt"))) %>%
# Some strata did not have subjects removed:
filter(file.exists(miss_10_file)) %>%
# Read removal list for each stratum:
mutate(miss_data = map(miss_10_file, ~ read_tsv(., col_names = c("FID", "IID"),
col_types = "cc"))) %>%
unnest() %>%
select(-miss_10_file) %>%
mutate(MISS_10_STATUS = "REMOVE_MISS_10")
# Sex inconsistencies to remove:
sex.remove <- cons.strata %>%
# Files differ depending on whether the consortium has strata:
mutate(sex_rem_file = ifelse(is.na(STRATUM),
paste0("logs/qc/", CONS, "/", CONS, ".sex.prob.sub.removed.txt"),
paste0("logs/qc/", CONS, "/", CONS, ".", STRATUM,
".sex.prob.sub.removed.txt"))) %>%
# Some strata did not have subjects removed:
filter(file.exists(sex_rem_file)) %>%
# Read removal list for each stratum:
mutate(sex_data = map(sex_rem_file, ~ read_table2(., col_names = c("FID", "IID"),
col_types = "cc"))) %>%
unnest() %>%
select(-sex_rem_file) %>%
mutate(SEX_REM_STATUS = "REMOVE_SEX_INCONSISTENT")
# Sex inconsistencies to correct:
sex.correct <- cons.strata %>%
# Files differ depending on whether the consortium has strata:
mutate(sex_corr_file = ifelse(is.na(STRATUM),
paste0("logs/qc/", CONS, "/", CONS, ".sex.prob.sub.corrected.txt"),
paste0("logs/qc/", CONS, "/", CONS, ".", STRATUM,
".sex.prob.sub.corrected.txt"))) %>%
# Some strata did not have subjects removed:
filter(file.exists(sex_corr_file)) %>%
# Read removal list for each stratum:
mutate(sex_data = map(sex_corr_file, ~ read_table2(., col_names = c("FID", "IID", "new_SEX"),
col_types = "cci"))) %>%
unnest() %>%
select(-sex_corr_file)
# Samples that survived population outlier removal:
eur.samples <- cons.strata %>%
mutate(european_file = ifelse(is.na(STRATUM),
paste0("results/qc/population_outliers/", CONS, ".europeans.fam"),
paste0("results/qc/population_outliers/", CONS, ".", STRATUM,
".europeans.fam"))) %>%
# Some consortia did not survive to the population outliers stage:
filter(file.exists(european_file)) %>%
mutate(european_data = map(european_file, ~ read_table2(.,
col_names = c("FID", "IID", "FATHER",
"MOTHER", "SEX", "PHENOTYPE"),
col_types = "cciiii"))) %>%
unnest() %>%
select(-european_file) %>%
mutate(TYPE = "European")
# Heterozygosity outliers:
het.outliers <- cons.strata %>%
# Files differ depending on whether the consortium has strata:
mutate(het_file = ifelse(is.na(STRATUM),
paste0("logs/qc/", CONS, "/", CONS, ".het.outliers.remove.txt"),
paste0("logs/qc/", CONS, "/", CONS, ".", STRATUM,
".het.outliers.remove.txt"))) %>%
# Some strata did not have subjects removed:
filter(file.exists(het_file)) %>%
# Read removal list for each stratum:
mutate(het_data = map(het_file, ~ read_table2(., col_names = c("FID", "IID"),
col_types = "cc"))) %>%
unnest() %>%
select(-het_file) %>%
mutate(HET_REM_STATUS = "REMOVE_HETEROZYGOSITY")
# Stringent missingness (1%):
sub.miss.01 <- cons.strata %>%
# Files differ depending on whether the consortium has strata:
mutate(miss_01_file = ifelse(is.na(STRATUM),
paste0("logs/qc/", CONS, "/", CONS, ".sub.miss.01.removed.txt"),
paste0("logs/qc/", CONS, "/", CONS, ".", STRATUM,
".sub.miss.01.removed.txt"))) %>%
# Some strata did not have subjects removed:
filter(file.exists(miss_01_file)) %>%
# Read removal list for each stratum:
mutate(miss_data = map(miss_01_file, ~ read_tsv(., col_names = c("FID", "IID"),
col_types = "cc"))) %>%
unnest() %>%
select(-miss_01_file) %>%
mutate(MISS_01_STATUS = "REMOVE_MISS_01")
# Duplicates to remove:
dups.to.remove <- cons.strata %>%
# Files differ depending on whether the consortium has strata:
mutate(dup_file = ifelse(is.na(STRATUM),
paste0("logs/qc/", CONS, "/", CONS, ".duplicates.to.remove.txt"),
paste0("logs/qc/", CONS, "/", CONS, ".", STRATUM,
".duplicates.to.remove.txt"))) %>%
# Some strata did not have subjects removed:
filter(file.exists(dup_file)) %>%
# Read subjects to remove for each stratum:
mutate(dup_data = map(dup_file, ~ read_table2(., col_names = c("FID", "IID"),
col_types = "cc"))) %>%
unnest() %>%
select(-dup_file) %>%
mutate(DUP_STATUS = "REMOVE_DUPLICATE")
# Update sample table to reflect QC results:
subject.table <- subject.table %>%
# Initial (10%) subject missingness:
left_join(sub.miss.10, by = c("CONS", "STRATUM", "FID", "IID")) %>%
rename(STATUS = MISS_10_STATUS) %>%
# Sex inconsistencies to remove:
left_join(sex.remove, by = c("CONS", "STRATUM", "FID", "IID")) %>%
mutate(STATUS = ifelse(is.na(STATUS), SEX_REM_STATUS, STATUS)) %>%
select(-SEX_REM_STATUS) %>%
# Sex inconsistencies to correct:
left_join(sex.correct, by = c("CONS", "STRATUM", "FID", "IID")) %>%
mutate(SEX = ifelse(!is.na(new_SEX), new_SEX, SEX)) %>%
select(-new_SEX) %>%
# Population outliers:
left_join(eur.samples, by = c("CONS", "STRATUM", "FID", "IID", "FATHER", "MOTHER", "SEX",
"PHENOTYPE")) %>%
mutate(STATUS = ifelse(!(CONS == "ibd" & STRATUM == "China") & is.na(STATUS) & is.na(TYPE),
"REMOVE_POPULATION_OUTLIER", STATUS)) %>%
select(-TYPE) %>%
# Heterozygosity outliers:
left_join(het.outliers, by = c("CONS", "STRATUM", "FID", "IID")) %>%
mutate(STATUS = ifelse(is.na(STATUS), HET_REM_STATUS, STATUS)) %>%
select(-HET_REM_STATUS) %>%
# Stringent missingness:
left_join(sub.miss.01, by = c("CONS", "STRATUM", "FID", "IID")) %>%
mutate(STATUS = ifelse(is.na(STATUS), MISS_01_STATUS, STATUS)) %>%
select(-MISS_01_STATUS) %>%
# Remove duplicates:
left_join(dups.to.remove, by = c("CONS", "STRATUM", "FID", "IID")) %>%
mutate(STATUS = ifelse(is.na(STATUS), DUP_STATUS, STATUS)) %>%
select(-DUP_STATUS)
rm(sub.miss.10, sex.remove, sex.correct, eur.samples, het.outliers, sub.miss.01, dups.to.remove, liftover.ped)
# Indicate which strata were excluded:
subject.table <- subject.table %>%
left_join(excluded.strata, by = c("CONS", "STRATUM"))
# Read final post-QC manifests:
final.sub.manifest <- cons.strata %>%
mutate(final_manifest = ifelse(is.na(STRATUM),
paste0("results/qc/final_counts/", CONS, ".all.qc.fam"),
paste0("results/qc/final_counts/", CONS, ".", STRATUM,
".all.qc.fam"))) %>%
# Not all strata survived QC:
filter(file.exists(final_manifest)) %>%
mutate(manifest_data = map(final_manifest, ~ read_table2(.,
col_names = c("FID", "IID", "FATHER",
"MOTHER", "SEX",
"PHENOTYPE"),
col_types = "cciiii"))) %>%
unnest() %>%
select(-final_manifest)
# Do all remaining subjects agree with the QC'd manifests (where available)?
subject.table %>%
# Ignore SNPs or strata that were removed:
filter(!grepl("^REMOVE_", STATUS) & is.na(STRATUM_EXCLUDE_REASON)) %>%
mutate(TABLE = "Table") %>%
full_join(final.sub.manifest %>% mutate(MANIFEST = "Manifest"),
by = c("CONS", "STRATUM", "FID", "IID", "FATHER", "MOTHER", "SEX", "PHENOTYPE")) %>%
filter(is.na(TABLE) | is.na(MANIFEST))
### We have successfully accounted for subjects up to this point
rm(final.sub.manifest, excluded.strata)
# Relatives to remove:
rels.to.remove <- cons.strata %>%
# Files differ depending on whether the consortium has strata:
mutate(rel_file = ifelse(is.na(STRATUM),
paste0("logs/qc/", CONS, "/", CONS, ".relatives.to.remove.txt"),
paste0("logs/qc/", CONS, "/", CONS, ".", STRATUM,
".relatives.to.remove.txt"))) %>%
# Some strata did not have subjects removed:
filter(file.exists(rel_file)) %>%
# Read subjects to remove for each stratum:
mutate(rel_data = map(rel_file, ~ read_table2(., col_names = c("FID", "IID"),
col_types = "cc"))) %>%
unnest() %>%
select(-rel_file) %>%
mutate(REL_STATUS = "REMOVE_RELATIVE")
# Update subject table with relatives to remove:
subject.table <- subject.table %>%
left_join(rels.to.remove, by = c("CONS", "STRATUM", "FID", "IID")) %>%
mutate(STATUS = ifelse(is.na(STATUS), REL_STATUS, STATUS)) %>%
select(-REL_STATUS)
rm(rels.to.remove)
################################################################################
############# Section 4: Combine Consortia and Recode Subjects #############
################################################################################
# After completing QC, we reorganized our datasets into disease-level consortia
# (i.e. SLE Genentech and SLE OMRF, and T1D and T1D_ASP were combined. We also
# recoded subjects to ensure that FID and IID are unique across all strata.
# In this section, we also remove relatives and duplicates that are shared
# across datasets.
# Define disease-level datasets:
new.cons.strata <- cons.strata %>%
mutate(NEW_CONS = ifelse(CONS == "sle_g" | CONS == "sle_o", "sle",
ifelse(CONS == "t1d" | CONS == "t1d_asp", "t1d", CONS)),
NEW_STRATUM = ifelse(CONS == "sle_g", paste(CONS, STRATUM, sep = "."),
ifelse(CONS == "sle_o", CONS,
ifelse(CONS == "t1d", "GRID",
ifelse(CONS == "t1d_asp", "ASP", STRATUM)))))
# Add new dataset names to SNP and subject tables:
snp.table <- snp.table %>%
left_join(new.cons.strata, by = c("CONS", "STRATUM"))
subject.table <- subject.table %>%
left_join(new.cons.strata, by = c("CONS", "STRATUM"))
# Read subject recoding:
new.fid.iid <- new.cons.strata %>%
select(NEW_CONS) %>%
unique() %>%
mutate(recode_file = paste0("logs/manifest/", NEW_CONS, ".recoding.txt")) %>%
filter(file.exists(recode_file)) %>%
mutate(recode_data = map(recode_file, ~ read_table2(.,
col_names = c("FID", "IID", "NEW_FID",
"NEW_IID"),
col_types = "cccc"))) %>%
unnest() %>%
select(-recode_file)
# Add recoded FID and IID:
subject.table <- subject.table %>%
left_join(new.fid.iid, by = c("NEW_CONS", "FID", "IID"))
rm(new.fid.iid)
################################################################################
#################### Section 5: Pre-Imputation Filtering ###################
################################################################################
# In this section, we deal with SNPs that were filtered prior to imputation
# Indels that were excluded:
exclude.indels <- new.cons.strata %>%
mutate(indel_file = paste0("logs/imputation/", NEW_CONS, "/", NEW_STRATUM, "/", NEW_CONS, ".",
NEW_STRATUM, ".indels.txt")) %>%
filter(file.exists(indel_file)) %>%
mutate(indel_data = map(indel_file, ~ read_table2(., col_names = c("SNP"),
col_types = "c"))) %>%
unnest() %>%
select(-CONS, -STRATUM, -indel_file) %>%
mutate(INDEL_STATUS = "EXCLUDE_INDEL")
# SNPs that were excluded from imputation for MAF < 0.05:
exclude.maf.05 <- new.cons.strata %>%
mutate(maf_file = paste0("logs/imputation/", NEW_CONS, "/", NEW_STRATUM, "/", NEW_CONS, ".",
NEW_STRATUM, ".maf_0.05.txt")) %>%
filter(file.exists(maf_file)) %>%
mutate(maf_data = map(maf_file, ~ read_table2(., col_names = c("SNP"),
col_types = "c"))) %>%
unnest() %>%
select(-CONS, -STRATUM, -maf_file) %>%
mutate(MAF_STATUS = "EXCLUDE_MAF_0.05")
# SNPs that were excluded for differential missingness (P < 0.00001):
exclude.diff.missing <- new.cons.strata %>%
mutate(diff_missing_file = paste0("logs/assoc_test/", NEW_CONS, "/", NEW_CONS, ".", NEW_STRATUM,
".diff.miss.snps.to.remove.txt")) %>%
filter(file.exists(diff_missing_file)) %>%
mutate(diff_missing_data = map(diff_missing_file, ~ read_table2(., col_names = c("SNP"),
col_types = "c"))) %>%
unnest() %>%
select(-CONS, -STRATUM, -diff_missing_file) %>%
mutate(DIFF_MISSING_STATUS = "EXCLUDE_DIFF_MISSING_0.00001")
# Subsequent filtering is done per chromosome, so we add chromosomes to stratum data:
cons.strata.chr <- new.cons.strata %>%
mutate(dummy = "join") %>%
left_join(tibble(dummy = rep("join", 22),
CHR = 1:22),
by = "dummy") %>%
select(-dummy)
# SNPs that were missing from the 1,000 Genomes reference panel:
exclude.ref.missing <- cons.strata.chr %>%
mutate(ref_missing_file = paste0("logs/imputation/", NEW_CONS, "/", NEW_STRATUM, "/", NEW_CONS,
".", NEW_STRATUM, ".chr_", CHR, ".1kg.missing.snps.txt")) %>%
filter(file.exists(ref_missing_file)) %>%
mutate(ref_missing_data = map(ref_missing_file, ~ read_table2(., col_names = c("SNP"),
col_types = "c"))) %>%
unnest() %>%
select(-CONS, -STRATUM, -ref_missing_file) %>%
mutate(REF_MISSING_STATUS = "EXCLUDE_MISSING_1KG_REFERENCE")
# SNPs that disagree with 1,000 Genomes reference panel:
exclude.ref.inconsistent <- cons.strata.chr %>%
mutate(ref_inconsistent_file = paste0("logs/imputation/", NEW_CONS, "/", NEW_STRATUM, "/", NEW_CONS,
".", NEW_STRATUM, ".chr_", CHR, ".1kg.inconsistent.snps.txt")) %>%
filter(file.exists(ref_inconsistent_file)) %>%
mutate(ref_inconsistent_data = map(ref_inconsistent_file, ~ read_table2(., col_names = c("SNP"),
col_types = "c"))) %>%
unnest() %>%
select(-CONS, -STRATUM, -ref_inconsistent_file) %>%
mutate(REF_INCONSISTENT_STATUS = "EXCLUDE_INCONSISTENT_1KG_REFERENCE")
# Update SNP table to reflect pre-imputation filtering:
snp.table <- snp.table %>%
# Exclude non-autosomal SNPs:
left_join(tibble(
CHR = c(23,24,25,26),
IMPUTE_STATUS = c("EXCLUDE_CHR_X", "EXCLUDE_CHR_Y", "EXCLUDE_CHR_XY", "EXCLUDE_CHR_MT")), by = "CHR") %>%
# Exclude indels:
left_join(exclude.indels, by = c("NEW_CONS", "NEW_STRATUM", "SNP")) %>%
mutate(IMPUTE_STATUS = ifelse(is.na(IMPUTE_STATUS), INDEL_STATUS, IMPUTE_STATUS)) %>%
select(-INDEL_STATUS) %>%
# Exclude minor allele frequency < 0.05 (note that some of these are also indels):
left_join(exclude.maf.05, by = c("NEW_CONS", "NEW_STRATUM", "SNP")) %>%
mutate(IMPUTE_STATUS = ifelse(is.na(IMPUTE_STATUS), MAF_STATUS, IMPUTE_STATUS)) %>%
select(-MAF_STATUS) %>%
# Differential missingness P < 0.0001:
left_join(exclude.diff.missing, by = c("NEW_CONS", "NEW_STRATUM", "SNP")) %>%
mutate(IMPUTE_STATUS = ifelse(is.na(IMPUTE_STATUS), DIFF_MISSING_STATUS, IMPUTE_STATUS)) %>%
select(-DIFF_MISSING_STATUS) %>%
# Exclude SNPs not found on 1,000 Genomes reference:
left_join(exclude.ref.missing, by = c("NEW_CONS", "NEW_STRATUM", "CHR", "SNP")) %>%
mutate(IMPUTE_STATUS = ifelse(is.na(IMPUTE_STATUS), REF_MISSING_STATUS, IMPUTE_STATUS)) %>%
select(-REF_MISSING_STATUS) %>%
# Exclude SNPs inconsistent with 1,000 Genomes reference:
left_join(exclude.ref.inconsistent, by = c("NEW_CONS", "NEW_STRATUM", "CHR", "SNP")) %>%
mutate(IMPUTE_STATUS = ifelse(is.na(IMPUTE_STATUS), REF_INCONSISTENT_STATUS, IMPUTE_STATUS)) %>%
select(-REF_INCONSISTENT_STATUS) # %>%
# group_by(STATUS, STRATUM_EXCLUDE_REASON, IMPUTE_STATUS) # %>%
# summarize(n = n()) %>%
# ungroup()
# How many indels are non-autosomal?
# exclude.indels %>%
# left_join(snp.table, by = c("NEW_CONS", "NEW_STRATUM", "SNP")) %>%
# filter(CHR > 22) %>%
# group_by(CHR) %>%
# summarize(n = n()) %>%
# ungroup()
# How many MAF are not indels?
# anti_join(exclude.maf.05, exclude.indels, by = c("NEW_CONS", "NEW_STRATUM", "SNP")) %>% nrow()
# How many diff missing are not either indels or MAF?
# exclude.diff.missing %>%
# anti_join(exclude.indels, by = c("NEW_CONS", "NEW_STRATUM", "SNP")) %>%
# anti_join(exclude.maf.05, by = c("NEW_CONS", "NEW_STRATUM", "SNP")) %>% nrow()
rm(exclude.indels, exclude.maf.05, exclude.diff.missing, exclude.ref.missing, exclude.ref.inconsistent)
# Read pre-imputation manifests:
pre.imputation.manifests <- read_table2("logs/imputation/pre.phasing.manifests.txt.gz",
col_names = TRUE, col_types = "ccciicc")
# Check that the SNP table corresponds to the pre-imputation manifests:
snp.table %>%
filter(is.na(STATUS) & is.na(STRATUM_EXCLUDE_REASON) & is.na(IMPUTE_STATUS)) %>%
anti_join(pre.imputation.manifests, by = c("NEW_CONS" = "CONS", "NEW_STRATUM" = "STRATUM", "SNP", "CHR", "POS"))
pre.imputation.manifests %>%
anti_join(snp.table %>%
filter(is.na(STATUS) & is.na(STRATUM_EXCLUDE_REASON) & is.na(IMPUTE_STATUS)),
by = c("CONS" = "NEW_CONS", "STRATUM" = "NEW_STRATUM", "SNP", "CHR", "POS")) %>%
head()
### Yes, they agree
rm(pre.imputation.manifests)
################################################################################
################## Section 6: Second Imputation Filtering ##################
################################################################################
# After the first round of imputation, we identified a number of spurious SNPs
# and a number of SNPs that were poorly concordant with the reference when
# imputed from their neighbours. These likely interfere with the overall quality
# of the imputation, so we repeated the imputation after removing these SNPs.
# Here, we account for these variants.
# Exclude SNPs that were missing in more than 1% of individuals:
impute.missing.snps <- read_table2("logs/imputation/second.imputation.all.strata.snp.miss.txt.gz") %>%
filter(F_MISS > 0.01) %>%
select(CONS, STRATUM, SNP) %>%
mutate(POSTIMPUTE_STATUS = "EXCLUDE_MISSING_0.01")
# Exclude SNPs with MAF < 5%:
impute.rare.snps <- read_table2("logs/imputation/second.imputation.all.strata.maf.txt.gz") %>%
filter(MAF < 0.05) %>%
select(CONS, STRATUM, SNP) %>%
mutate(POSTIMPUTE_STATUS = "EXCLUDE_MAF_0.05")
# Exclude SNPs that exhibited extreme deviation from Hardy-Weinberg equilibrium:
impute.hwe.snps <- read_table2("logs/imputation/second.imputation.all.strata.hwe.txt.gz") %>%
filter(P_HWE < 0.00001 & TEST == "UNAFF") %>%
select(CONS, STRATUM, SNP, CHR) %>%
mutate(POSTIMPUTE_STATUS = "EXCLUDE_HWE_0.00001")
# Exclude SNPs that exhibited extreme differential missingness:
impute.diff.missing.snps <- read_table2("logs/imputation/second.imputation.all.strata.diff.miss.txt.gz") %>%
filter(P_MISS < 0.00001) %>%
select(CONS, STRATUM, SNP, CHR) %>%
mutate(POSTIMPUTE_STATUS = "EXCLUDE_DIFF_MISSING_0.00001")
# Exclude SNPs that are pooly concordant with imputed genotypes when masked and
# imputed from neighbours (i.e. info_type0 > 0.8 and concord_type0 < 0.75):
impute.nonconcordant.snps <- read_table2("results/imputation/info.scores.txt.gz", col_types = "ccicciccdddiddd") %>%
filter(type == 2 & info_type0 > 0.8 & concord_type0 < 0.75) %>%
select(CONS = cons, STRATUM = stratum, SNP = rs_id, CHR = snp_id, POS = position) %>%
mutate(CHR = as.integer(CHR)) %>%
mutate(POSTIMPUTE_STATUS = "EXCLUDE_IMPUTATION_CONCORDANCE")
# Exclude spurious SNPs that were identified as possibly mismapped:
impute.mismapped.snps <- read_table2("results/imputation/all.strata.mismapped.snps.txt",
col_names = c("CONS", "STRATUM", "SNP")) %>%
# Exclude these from ALL strata of an affected disease:
select(-STRATUM) %>%
unique() %>%
mutate(POSTIMPUTE_STATUS = "EXCLUDE_MISMAPPED")
# Update SNP table to reflect second imputation filtering:
snp.table <- snp.table %>%
# Remove SNPs missing in >1% of individuals (relatives removed):
left_join(impute.missing.snps, by = c("NEW_CONS" = "CONS", "NEW_STRATUM" = "STRATUM", "SNP")) %>%
mutate(IMPUTE_STATUS = ifelse(is.na(IMPUTE_STATUS), POSTIMPUTE_STATUS, IMPUTE_STATUS)) %>%
select(-POSTIMPUTE_STATUS) %>%
# Remove SNPs with MAF <5% (relatives removed):
left_join(impute.rare.snps, by = c("NEW_CONS" = "CONS", "NEW_STRATUM" = "STRATUM", "SNP")) %>%
mutate(IMPUTE_STATUS = ifelse(is.na(IMPUTE_STATUS), POSTIMPUTE_STATUS, IMPUTE_STATUS)) %>%
select(-POSTIMPUTE_STATUS) %>%
# Remove SNPs that exhibited extreme Hardy-Weinberg disequilibrium (relatives removed):
left_join(impute.hwe.snps, by = c("NEW_CONS" = "CONS", "NEW_STRATUM" = "STRATUM", "SNP", "CHR")) %>%
mutate(IMPUTE_STATUS = ifelse(is.na(IMPUTE_STATUS), POSTIMPUTE_STATUS, IMPUTE_STATUS)) %>%
select(-POSTIMPUTE_STATUS) %>%
# Remove extreme differentially missing SNPs:
left_join(impute.diff.missing.snps, by = c("NEW_CONS" = "CONS", "NEW_STRATUM" = "STRATUM", "SNP", "CHR")) %>%
mutate(IMPUTE_STATUS = ifelse(is.na(IMPUTE_STATUS), POSTIMPUTE_STATUS, IMPUTE_STATUS)) %>%
select(-POSTIMPUTE_STATUS) %>%
# Remove SNPs that were not concordant when imputed:
left_join(impute.nonconcordant.snps, by = c("NEW_CONS" = "CONS", "NEW_STRATUM" = "STRATUM", "SNP", "CHR", "POS")) %>%
mutate(IMPUTE_STATUS = ifelse(is.na(IMPUTE_STATUS), POSTIMPUTE_STATUS, IMPUTE_STATUS)) %>%
select(-POSTIMPUTE_STATUS) %>%
# Remove SNPs that were flagged as possibly mismapped:
left_join(impute.mismapped.snps, by = c("NEW_CONS" = "CONS", "SNP")) %>%
mutate(IMPUTE_STATUS = ifelse(is.na(IMPUTE_STATUS), POSTIMPUTE_STATUS, IMPUTE_STATUS)) %>%
select(-POSTIMPUTE_STATUS) # %>%
# group_by(IMPUTE_STATUS, POSTIMPUTE_STATUS) %>%
# summarise(n = n()) %>% data.frame()
rm(impute.missing.snps, impute.rare.snps, impute.hwe.snps, impute.diff.missing.snps, impute.nonconcordant.snps, impute.mismapped.snps)
# Read final imputation manifests:
second.imputation.manifest <- read_table2("logs/imputation/second.imputation.manifests.txt.gz", col_types = "cccii")
### Check that SNP table agrees with preimputation manifests:
snp.table %>%
filter(is.na(STATUS) & is.na(STRATUM_EXCLUDE_REASON) & is.na(IMPUTE_STATUS)) %>%
anti_join(second.imputation.manifest, by = c("NEW_CONS" = "CONS", "NEW_STRATUM" = "STRATUM", "SNP", "CHR", "POS" = "BP"))
second.imputation.manifest %>%
anti_join(snp.table %>%
filter(is.na(STATUS) & is.na(STRATUM_EXCLUDE_REASON) & is.na(IMPUTE_STATUS)),
by = c("CONS" = "NEW_CONS", "STRATUM" = "NEW_STRATUM", "SNP", "CHR", "BP" = "POS"))
rm(second.imputation.manifest)
# Write SNP and sample tables to file:
write_tsv(snp.table, "logs/snp.status.table.txt.gz")
write_tsv(subject.table, "logs/subject.status.table.txt.gz")
### Still need to deal with trans-consortium duplicates (?between every pair
### of traits?)
|
### LOADING THE DATA ###
# Let's read in the dataset describing NYC taxi trips on May 14, 2013.
# Don't forget to start by navigating to the directory where you've saved the CSV file, using Session -> Set Working Directory, or setwd("YOUR_PATH_HERE")
# We'll set stringsAsFactors to FALSE just to make sure nothing gets converted to a factor variable that we don't want.
trips = read.csv("2013-05-14_neighborhoods.csv",stringsAsFactors=F)
# This might take a few seconds since it's a fairly large file.
# As usual, the first thing we want to do any time we load a new dataset is look at what we just loaded. The Environment pane in RStudio is a great way to do this, as is the str() function.
str(trips)
# We have 490,347 observations of 21 variables.
# Now that we've examined the variables, there are a few variables we should probably convert to factors
trips$vendor_id = factor(trips$vendor_id)
trips$rate_code = factor(trips$rate_code)
trips$store_and_fwd_flag = factor(trips$store_and_fwd_flag)
trips$payment_type = factor(trips$payment_type)
# We'll deal with the datetime variables in one of the exercises.
# Now let's check out str() again to see what changed.
str(trips)
### 0. CHAINING AND OTHER PRELIMINARIES ###
### ___________________________________ ###
# Before we get to wrangling our data, let's start by learning a few tricks built in to the dplyr package that will make all of our data wrangling tasks easier.
# First we'll need to load the dplyr package
# You should have already installed the dplyr and tidyr packages. If not, bow your head in shame and run the following commands:
install.packages("dplyr")
install.packages("tidyr")
# Then load the package
library(dplyr)
# The first trick that will make our lives a bit easier is to convert our data frame to a special kind of data frame called a "tbl_df"
trips = tbl_df(trips)
# tbl_df's operate in exactly the same way as regular data frames. The advantage is that they display in the console in an abbreviated format. This allows us to easily peak at our data along the way, without R attempting to print out the entire data frame. Anyone who doesn't want to follow along for the next couple minutes can skip this command and type trips into the console to see what we mean.
# Now we can just run trips any time we want to take a look at what we're working with.
# The second trick that we'll use throughout the rest of today's session is chaining.Chaining helps us keep our code and workspace clean. Let's learn how it works.
###-------------
# Let's look at an example of how chaining can make our code more legible.
# Say we want to calculate the standard deviation of the taxi fare amount in our data.
# We know how to calculate the standard deviation using baseR.
# Remember that the standard deviation is the root mean square deviation from the mean.
# (Let's pretend for now that R doesn't provide the sd() function for this very purpose)
sqrt(mean((trips$fare_amount - mean(trips$fare_amount))^2))
# But that's a bit of a mess, with lots of open and closed parentheses to keep track of. Notice that I had to code 'inside out' as I wrote this.
# We could use chaining to make this code more legible and write-able.
# We'll start with the squared differences from the mean.
(trips$fare_amount - mean(trips$fare_amount))^2
# Then we'll take the mean of that expression
(trips$fare_amount - mean(trips$fare_amount))^2 %>% mean()
# Finally we'll take that entire expression and take the square root of it.
(trips$fare_amount - mean(trips$fare_amount))^2 %>% mean() %>% sqrt()
# Now we have more legible code that doesn't require any intermediate variables stored in memory.
###-------------
# Here's another example.
# Suppose we want to compute a histogram of the number of passengers on each trip.
# In words, we want to:
## Take the passenger_count column of trips.
trips$passenger_count
## Then use the table() function to count the number of trips for each count.
trips$passenger_count %>% table()
## Then plot() the result.
trips$passenger_count %>% table() %>% plot()
# Chaining makes it easy to complete these steps in a legible way without storing intermediate objects. Again, we could make our code even more terse by dropping parentheses:
trips$passenger_count %>% table %>% plot
## Side note: We could also use the hist() function
trips$passenger_count %>% hist()
# Furthermore, if we first looked at the table and then decided we want to plot it, we can just use chaining to tack on the plot() function, rather than having to add it "around" our original expression.
plot(table(trips$passenger_count))
# Summing up: chaining sometimes requires slightly more typing, but it is much more writable and readable, and helps keep your virtual workspace clean by avoiding storing intermediate objects in memory. It's a powerful tool that we'll use throughout this session. And on that high note, Alex is going to get us started as we wrangle some data!
# OK, now let's wrangle some data.
### 1. EXPLORING AND SUMMARIZING DATA SET ###
### _____________________________________ ###
# The first thing we typically want to do with any dataset is data exploration.
# We saw on Tuesday how the summary() function can provide some useful summary statistics for any data frame.
summary(trips)
# That's pretty helpful. But what if we want to compute aggregate statistics at a more granular level?
# How can we answer the question, "What are the mean and median fare amounts by number of passengers?"
# To answer that question, we'll need to learn our first set of dplyr verbs: group_by and summarize.
###-------------
# Ok, now we're ready to go back and answer our question: what is the mean and median fare by number of passengers.
# First, we can find the mean.
# We take trips, group it by passenger_count, and then summarize the mean fare_amount within each group.
trips %>%
group_by(passenger_count) %>%
summarize(fare_mean = mean(fare_amount))
# Note that this chained set of commands creates a new data frame that gets printed to the console, but doesn't get stored.
# We could choose to assign this object to a name.
mean_fare_by_passenger_count = trips %>%
group_by(passenger_count) %>%
summarize(fare_mean = mean(fare_amount))
mean_fare_by_passenger_count
# When doing data exploration, we may choose not to store the object. It may be sufficient to print it to the console.
# Just out of curiosity, what happens if we use group_by without summarize?
trips %>%
group_by(passenger_count)
# Interesting, it looks the same as trips, but it is a grouped data frame.
# We could save this as its own object.
grouped_trips = trips %>%
group_by(passenger_count)
# Then if we apply summarize to the new object, and it returns aggregate statistics for each group.
grouped_trips %>%
summarize(fare_mean = mean(fare_amount))
# Another question: What if we use summarize without group_by?
trips %>%
summarize(fare_mean = mean(fare_amount))
# Cool. It just yields an overall aggregate statistic on the un-grouped data frame.
# Something interesting just happened: When we apply summarize to trips, it yields the overall mean. But when we apply the same summarize statement to grouped_trips, it yields grouped means.
# What if we want to get the overall fare mean from grouped_trips? We can use ungroup().
grouped_trips %>%
ungroup()
# Ungroup removes all groups. Now we can calculate the overall mean with summarize.
grouped_trips %>%
ungroup() %>%
summarize(fare_mean = mean(fare_amount))
# Ok, back to the task at hand:
# So far we've only calculated mean, but we also wanted median.
# We can just add another argument within summarize, separated by commas, to create another column.
# I like to write each new column on a separate line of code.
trips %>%
group_by(passenger_count) %>%
summarize(
fare_mean = mean(fare_amount),
fare_median = median(fare_amount)
)
# Notice anything weird about the mean and median?
# It seems a lot higher for trips with 0 passengers.
# But what is a trip with 0 passengers?
# Let's add a column "n" to our data frame showing how many trips are in dataset for each passenger count.
# We can use the n() function which is a helper verb for summarize that reports the "n" of each group.
trips %>%
group_by(passenger_count) %>%
summarize(
fare_mean = mean(fare_amount),
fare_median = median(fare_amount),
n = n()
)
# Ok, that explains it. There are only 3 trips with 0 passengers. Maybe they were coding errors.
# What if don't want to include these weird trips in our summary table?
# We learned one way to do this on Tuesday using the subset() function. dplyr provides a similar function called filter().
# Let's explore these functions
###-------------
# Ok, let's filter trips before computing our summary stats.
# We'll include only trips where passenger_count is greater than zero.
trips %>%
filter(passenger_count > 0)
# Good - our new data frame has 3 fewer rows.
# Let's double-check it did what we wanted.
summary(trips$passenger_count)
# Now let's calculate our summary table for this subset of the data. We'll just tack on the same commands we used before.
trips %>%
filter(passenger_count > 0) %>%
group_by(passenger_count) %>%
summarize(
fare_mean = mean(fare_amount),
fare_median = median(fare_amount),
n = n()
)
# In this case, because we are only filtering based on one condition, we could have also used subset.
trips %>%
subset(passenger_count > 0) %>%
group_by(passenger_count) %>%
summarize(
fare_mean = mean(fare_amount),
fare_median = median(fare_amount),
n = n()
)
# Also, we chose to filter out the 0's before we calculated our summary stats.
# But we could have calculated summary stats and then filtered, with the same results (Warning: may not always be the case)
# Notice how easy it is to slide the filter statement "down the chain"
trips %>%
group_by(passenger_count) %>%
summarize(
fare_mean = mean(fare_amount),
fare_median = median(fare_amount),
n = n()
) %>%
filter(passenger_count > 0)
# Now that we've got some summary stats, we might have some other questions as we continue our data exploratin.
# What number of passengers tend to give the largest fares?
# To answer this, we need to learn how to sort our data frame.
# To sort, we use the arrange() verb from dplyr.
###-------------
# Let's sort our summary table by fare_mean
# We'll just add an arrange clause at the end of the chain.
trips %>%
filter(passenger_count > 0) %>%
group_by(passenger_count) %>%
summarize(
fare_mean = mean(fare_amount),
fare_median = median(fare_amount),
n = n()
) %>%
arrange(fare_mean)
# This sorted our summary data in ascending order of fare_mean.
# We can see that trips with 1 passenger have the lowest average fare.
# It might be convenient at this point to save our summary stats as an object since we are just planning to sort the data frame by various columns.
trips_summary = trips %>%
filter(passenger_count > 0) %>%
group_by(passenger_count) %>%
summarize(
fare_mean = mean(fare_amount),
fare_median = median(fare_amount),
n = n()
)
trips_summary %>%
arrange(fare_mean)
# What if we wanted to sort in descending order of fare_mean?
# We can use desc()
trips_summary %>%
arrange(desc(fare_mean))
# Since we observe that the fare median is often 9.5, we could sort by fare median and then by descending order of n.
trips_summary %>%
arrange(fare_median, desc(n))
# Ok, we've learned a bunch of dplyr verbs that helped us compute aggregate statistics (group_by, summarize), filter our data (filter), and sort our data (arrange).
# Let's practice!
# Open up exercises.R and work your way through Exercise 1. Feel free to discuss with your neighbors as you go along.
### 2. PREPPING DATA FOR ANALYSIS ###
### ----------------------------- ###
# ______
# But first let's learn how we can derive new columns/covariates.
# Let's say we want to build a linear regression model to see if we can predict tip percentage based on passenger count and fare amount
# Right now we have 21 columns. We only need two of them (passenger_count and fare_amount). We also need to create a tip percentage column.
# Since we only need a few of our existing columns, let's just work with those ones - the full dataset is a bit unwieldy.
# We can use the select() function.
# ______
# Ok, let's select just the passenger count, fare amount, and also tip amount, which we'll need to calculate the tip percentage.
trips %>%
select(passenger_count,fare_amount,tip_amount)
# Now we can see all three columns we are dealing with.
# Ok, we are almost ready to supply this data frame to lm().
# But we're missing our dependent variable!
# We know we want a new column that calculates tip percentage by dividing tip amount by fare amount.
# We could do this using baseR
trips$tip_percent = trips$tip_amount / trips$fare_amount
# But dplyr provides the mutate() verb to create new columns.
# mutate() has some advantages over the baseR approach.
# Before we explore those advantages, let's use select to remove that tip_percent column we just created.
trips = trips %>%
select(-tip_percent)
# ______
# Let's use dplyr to create a new tip_percent column.
trips %>%
select(passenger_count,fare_amount,tip_amount) %>%
mutate(tip_percent = tip_amount / fare_amount)
# Let's see what the new column looks like
trips %>%
select(passenger_count,fare_amount,tip_amount) %>%
mutate(tip_percent = tip_amount / fare_amount) %>%
summary()
# Ok, there's a bit of a weird outlier, but everything else looks good.
# Let's save our linear regression data and remove tip_amount, which we don't need anymore.
linregdata = trips %>%
select(passenger_count,fare_amount,tip_amount) %>%
mutate(tip_percent = tip_amount / fare_amount) %>%
select(-tip_amount)
# Now we'll run the linear regression and see how it looks.
mod = lm(tip_percent ~ ., data=linregdata)
summary(mod)
# 3. ORIGIN-DESTINATION MATRIX --------------------------------------------
# One good question for a data set like this is: what 'trip patterns' tend to be most common? One answer to this question is an origin-destination (OD) matrix, in which the ij-th entry counts the number of trips from origin i to destination j. Let's explore how to create an OD matrix using functions from tidyr, a great companion to dplyr for reshaping data sets.
### 3. ORIGIN-DESTINATION MATRIX ###
### ----------------------------- ###
# One good question for a data set like this is: what 'trip patterns' tend to be most common? One answer to this question is an origin-destination (O-D) matrix, in which the ij-th entry counts the number of trips from origin i to destination j. Let's explore how to create an O-D matrix using functions from # tidyr, a great companion to dplyr for reshaping data sets.
# First, we'll create an OD matrix on the district-number level. We'll use the pdistrict and ddistrict columns. Let's take a look:
trips %>% select(pdistrict, ddistrict)
# Each of these columns is an integer ID that labels the analysis district in which the trip started (pdistrict) or ended (ddistrict). Looks like we've got some NAs; how many? We can find out using our old friends filter() and summarise(). Note the use of the 'or' operator | in the filter call.
trips %>% select(pdistrict, ddistrict) %>%
filter(is.na(pdistrict) | is.na(ddistrict)) %>%
summarise(n = n())
# So, there are about 16K NA missing data rows out of almost 500K observations; not too bad. We'll need to filter them out later.
# Ok, now we're ready to make that matrix. The first tool we'll want is the count() function, which is a convenient shortcut for situations where you would use groupby() and n() to count objects by category. To illustrate, note that we could rewrite the last block of code using count:
trips %>% select(pdistrict, ddistrict) %>%
filter(is.na(pdistrict) | is.na(ddistrict)) %>%
count()
# For a more interesting usage, let's look at how many trips there were for each O-D pair
trips %>% count(pdistrict,ddistrict)
# Not bad, but let's filter out those NAs. If you know you want to filter out some data, it's usually safest to do this first, so I'll put this at the beginning of the chain.
trips %>%
filter(!is.na(pdistrict),
!is.na(ddistrict)) %>%
count(pdistrict, ddistrict)
# Our data is in 'long' or 'tidy' format, where each possible value of pdistrict and ddistrict has its own row. A row in this table is a count of the number of trips from pdistrict to ddistrict. This is great in many applications because there are few columns, filtering is easy, etc. But we wanted a matrix! What do we want to do to make this data matrix-shaped?
# Right--we want to generate a separate column for each value of ddistrict, and we want the counts to follow along. This is a version of 'wide format'. The tidyr package supplies two verbs, spread() and gather(), for converting between wide and long format. Since we need to make our data wider, let's talk about spread().
# -------
library(tidyr)
trips %>%
filter(!is.na(pdistrict),
!is.na(ddistrict)) %>%
count(pdistrict, ddistrict) %>%
spread(key = ddistrict, value = n)
# Nice -- we're matrix-shaped, just like we wanted. Each row corresponds to an origin, each column to a destination, and the corresponding entry is the number of trips between them.
# Note that spread() filled in NAs whenever it couldn't find a pdistrict-ddistrict pair. That's often correct behavior, but in this context, a missing pair just means that there were zero trips from that pdistrict to that ddistrict. So, we should fill our matrix with 0s instead of NAs. spread() gives us the fill parameter to accomplish just that.
trips %>%
filter(!is.na(pdistrict),
!is.na(ddistrict)) %>%
count(pdistrict, ddistrict) %>%
spread(key = ddistrict,value = n, fill = 0)
# This is a good start, but we have a data overload problem: there's just too much here to handle! It would be more interpretable if we could group by borough instead of by individual district. To do that, we'll need to add two columns for the pickup borough and dropoff borough. This information is coded in pdistrict and ddistrict, but we need to access it by referring to another table. Let's do that using joins.
# --------------------------------------------------------------------------
# First, let's load in the table that maps district codes to boroughs:
areas = read.csv('area_info.csv', stringsAsFactors=F) %>% tbl_df
# What do we have here?
areas
# Each row maps a district id number to a borough, just as we'd expect.
# Ok, now let's grab just the columns we need from trips:
trips %>%
select(pdistrict, ddistrict)
# We'd like to join with the areas table, but we have a problem: should we join by pdistrict or ddistrict? One good answer is 'both' -- but how should that work? We'll solve this problem by combining pdistrict and ddistrict into a single column, and then joining on the new column. To do so, we'll use gather() from tidyr, which is the inverse of spread(). First, since we are about to combine the two columns, we need a unique trip_id to keep track of which pdistricts and ddistricts should go together. An easy way to make a unique ID is the row_number() function, which does exactly what you think it does (on ungrouped data).
trips %>%
select(pdistrict, ddistrict) %>%
mutate(trip_id = row_number())
# Now we're ready to use gather(). We'd like to get a new df where each row is either a pickup or a dropoff district, the first column tells us whether the row is a pickup or a dropoff, the second tells us the trip_id, and the third gives the district number.
trips %>%
select(pdistrict, ddistrict) %>%
mutate(trip_id = row_number()) %>%
gather(key = p_or_d, value = district, pdistrict, ddistrict)
# Nice, this is what we wanted. What did we do with that gather() call?
# - "Create a key (label) new column and call it p_or_d"
# - "Create a new values column and call it district"
# - "Take the column NAMES of pdistrict and ddistrict and gather them into the new p_or_d column"
# - "Take the corresponding column VALUES of pdistrict and ddistrict" and gather them into the new district column"
# So, what does our new data frame look like? We can see what's going on a bit better if we sort the data on trip_id:
trips %>%
select(pdistrict, ddistrict) %>%
mutate(trip_id = row_number()) %>%
gather(key = p_or_d, value = district, pdistrict, ddistrict) %>%
arrange(trip_id)
# Each trip has a row corresponding to pickup and another corresponding to dropoff.
# Now have just one column with district IDs in it, so we are ready to join. We'll use dplyr's left_join(), and we'll tell it that the district column of trips is supposed to match the id column of areas.
trips %>%
select(pdistrict, ddistrict) %>%
mutate(trip_id = row_number()) %>%
gather(key = p_or_d, value = district, pdistrict, ddistrict) %>%
left_join(areas, by = c('district' = 'id'))
# Looks like we got a bunch of NAs, which makes sense, since we had a bunch of NAs in the district column. That's ok for now. If we wanted to drop all entries with NAs in either trips or areas, we could use an inner_join instead.
# So we've done our join, and we want to get our data back into a more familiar
# format. First, let's take out the trash: we don't need district anymore, so let's drop that column using our old friend select():
trips %>%
select(pdistrict, ddistrict) %>%
mutate(trip_id = row_number()) %>%
gather(key = p_or_d, value = district, pdistrict, ddistrict) %>%
left_join(areas, by = c('district' = 'id')) %>%
select(-district)
# Now we're ready to spread(). We want to use the values of p_or_d to code two new columns, with values that come from borough.
# Since we have the trip_id column, spread() knows to keep those entries together -- that's why we needed it in the first place.
trips %>%
select(pdistrict, ddistrict) %>%
mutate(trip_id = row_number()) %>%
gather(key = p_or_d, value = district, pdistrict, ddistrict) %>%
left_join(areas, by = c('district' = 'id')) %>%
select(-district) %>%
spread(key = p_or_d, value = borough)
# How to read the spread() call:
# - "Create new columns labelled with the values of p_or_d."
# - "In those new columns, put as values the values of the borough column."
# - "Don't do anything with trip_id."
# Since trip_id has to match, we ensure that pdistrict and ddistrict will line up like they should.
# Great, now we are ready to count again. However, our column names are out of date (they're not districts anymore), so let's rename them using an intuitive function:
trips %>%
select(pdistrict, ddistrict) %>%
mutate(trip_id = row_number()) %>%
gather(key = p_or_d, value = district, pdistrict, ddistrict) %>%
left_join(areas, by = c('district' = 'id')) %>%
select(-district) %>%
spread(key = p_or_d, value = borough) %>%
rename(pborough = pdistrict, dborough = ddistrict)
# Just like last time, our final steps are to count() and spread() again.
trips %>%
select(pdistrict, ddistrict) %>%
mutate(trip_id = row_number()) %>%
gather(key = p_or_d, value = district, pdistrict, ddistrict) %>%
left_join(areas, by = c('district' = 'id')) %>%
select(-district) %>%
spread(key = p_or_d, value = borough) %>%
rename(pborough = pdistrict, dborough = ddistrict) %>%
count(pborough, dborough)
# Finally, we can get this into 'matrix form' by spreading again. We wantdborough to be the columns, and the values to be n.
# Note that we still have some NAs. We could filter them out using familiar methods, or we could use the 'fill' parameter of spread to write 'Unknown' there instead:
trips %>%
select(pdistrict, ddistrict) %>%
mutate(trip_id = row_number()) %>%
gather(key = p_or_d, value = district, pdistrict, ddistrict) %>%
left_join(areas, by = c('district' = 'id')) %>%
select(-district) %>%
spread(key = p_or_d, value = borough) %>%
rename(pborough = pdistrict, dborough = ddistrict) %>%
count(pborough, dborough) %>%
spread(key = dborough, value = n, fill = 0)
# Wait, we got an error! What happened?
# Right, we have some NAs in here, and R isn't letting us use NA for a column name. Just like last time, the NAs are coming from spread() (the first call), and just like last time, we can use the fill parameter to do something else with them. I am going to call them 'Unknown':
trips %>%
select(pdistrict, ddistrict) %>%
mutate(trip_id = row_number()) %>%
gather(key = p_or_d, value = district, pdistrict, ddistrict) %>%
left_join(areas, by = c('district' = 'id')) %>%
select(-district) %>%
spread(key = p_or_d, value = borough, fill = 'Unknown') %>%
rename(pborough = pdistrict, dborough = ddistrict) %>%
count(pborough, dborough) %>%
spread(key = dborough, value = n, fill = 0)
# That's what we wanted! Again, each row corresponds to an origin and each column to a destination. For the sake of visualization, we'll save this as an
# object m:
m = trips %>%
select(pdistrict, ddistrict) %>%
mutate(trip_id = row_number()) %>%
gather(key = p_or_d, value = district, pdistrict, ddistrict) %>%
left_join(areas, by = c('district' = 'id')) %>%
select(-district) %>%
spread(key = p_or_d, value = borough, fill = 'Unknown') %>%
rename(pborough = pdistrict, dborough = ddistrict) %>%
count(pborough, dborough) %>%
spread(key = dborough, value = n, fill = 0)
# And then we'll use the code below to draw a heatmap.
m = m %>% select(-pborough) %>% data.matrix
rownames(m) = colnames(m)
heatmap(m, symm = TRUE, scale = 'row')
# Each row corresponds to an origin, and each column to a destination. We see that
# most trips tend to stay within the same borough, but that almost all
# have a high likelihood to end in Manhattan.
|
/2-data-wrangling/script.R
|
no_license
|
joehuchette/OR-software-tools-2016
|
R
| false
| false
| 25,895
|
r
|
### LOADING THE DATA ###
# Let's read in the dataset describing NYC taxi trips on May 14, 2013.
# Don't forget to start by navigating to the directory where you've saved the CSV file, using Session -> Set Working Directory, or setwd("YOUR_PATH_HERE")
# We'll set stringsAsFactors to FALSE just to make sure nothing gets converted to a factor variable that we don't want.
trips = read.csv("2013-05-14_neighborhoods.csv",stringsAsFactors=F)
# This might take a few seconds since it's a fairly large file.
# As usual, the first thing we want to do any time we load a new dataset is look at what we just loaded. The Environment pane in RStudio is a great way to do this, as is the str() function.
str(trips)
# We have 490,347 observations of 21 variables.
# Now that we've examined the variables, there are a few variables we should probably convert to factors
trips$vendor_id = factor(trips$vendor_id)
trips$rate_code = factor(trips$rate_code)
trips$store_and_fwd_flag = factor(trips$store_and_fwd_flag)
trips$payment_type = factor(trips$payment_type)
# We'll deal with the datetime variables in one of the exercises.
# Now let's check out str() again to see what changed.
str(trips)
### 0. CHAINING AND OTHER PRELIMINARIES ###
### ___________________________________ ###
# Before we get to wrangling our data, let's start by learning a few tricks built in to the dplyr package that will make all of our data wrangling tasks easier.
# First we'll need to load the dplyr package
# You should have already installed the dplyr and tidyr packages. If not, bow your head in shame and run the following commands:
install.packages("dplyr")
install.packages("tidyr")
# Then load the package
library(dplyr)
# The first trick that will make our lives a bit easier is to convert our data frame to a special kind of data frame called a "tbl_df"
trips = tbl_df(trips)
# tbl_df's operate in exactly the same way as regular data frames. The advantage is that they display in the console in an abbreviated format. This allows us to easily peak at our data along the way, without R attempting to print out the entire data frame. Anyone who doesn't want to follow along for the next couple minutes can skip this command and type trips into the console to see what we mean.
# Now we can just run trips any time we want to take a look at what we're working with.
# The second trick that we'll use throughout the rest of today's session is chaining.Chaining helps us keep our code and workspace clean. Let's learn how it works.
###-------------
# Let's look at an example of how chaining can make our code more legible.
# Say we want to calculate the standard deviation of the taxi fare amount in our data.
# We know how to calculate the standard deviation using baseR.
# Remember that the standard deviation is the root mean square deviation from the mean.
# (Let's pretend for now that R doesn't provide the sd() function for this very purpose)
sqrt(mean((trips$fare_amount - mean(trips$fare_amount))^2))
# But that's a bit of a mess, with lots of open and closed parentheses to keep track of. Notice that I had to code 'inside out' as I wrote this.
# We could use chaining to make this code more legible and write-able.
# We'll start with the squared differences from the mean.
(trips$fare_amount - mean(trips$fare_amount))^2
# Then we'll take the mean of that expression
(trips$fare_amount - mean(trips$fare_amount))^2 %>% mean()
# Finally we'll take that entire expression and take the square root of it.
(trips$fare_amount - mean(trips$fare_amount))^2 %>% mean() %>% sqrt()
# Now we have more legible code that doesn't require any intermediate variables stored in memory.
###-------------
# Here's another example.
# Suppose we want to compute a histogram of the number of passengers on each trip.
# In words, we want to:
## Take the passenger_count column of trips.
trips$passenger_count
## Then use the table() function to count the number of trips for each count.
trips$passenger_count %>% table()
## Then plot() the result.
trips$passenger_count %>% table() %>% plot()
# Chaining makes it easy to complete these steps in a legible way without storing intermediate objects. Again, we could make our code even more terse by dropping parentheses:
trips$passenger_count %>% table %>% plot
## Side note: We could also use the hist() function
trips$passenger_count %>% hist()
# Furthermore, if we first looked at the table and then decided we want to plot it, we can just use chaining to tack on the plot() function, rather than having to add it "around" our original expression.
plot(table(trips$passenger_count))
# Summing up: chaining sometimes requires slightly more typing, but it is much more writable and readable, and helps keep your virtual workspace clean by avoiding storing intermediate objects in memory. It's a powerful tool that we'll use throughout this session. And on that high note, Alex is going to get us started as we wrangle some data!
# OK, now let's wrangle some data.
### 1. EXPLORING AND SUMMARIZING DATA SET ###
### _____________________________________ ###
# The first thing we typically want to do with any dataset is data exploration.
# We saw on Tuesday how the summary() function can provide some useful summary statistics for any data frame.
summary(trips)
# That's pretty helpful. But what if we want to compute aggregate statistics at a more granular level?
# How can we answer the question, "What are the mean and median fare amounts by number of passengers?"
# To answer that question, we'll need to learn our first set of dplyr verbs: group_by and summarize.
###-------------
# Ok, now we're ready to go back and answer our question: what is the mean and median fare by number of passengers.
# First, we can find the mean.
# We take trips, group it by passenger_count, and then summarize the mean fare_amount within each group.
trips %>%
group_by(passenger_count) %>%
summarize(fare_mean = mean(fare_amount))
# Note that this chained set of commands creates a new data frame that gets printed to the console, but doesn't get stored.
# We could choose to assign this object to a name.
mean_fare_by_passenger_count = trips %>%
group_by(passenger_count) %>%
summarize(fare_mean = mean(fare_amount))
mean_fare_by_passenger_count
# When doing data exploration, we may choose not to store the object. It may be sufficient to print it to the console.
# Just out of curiosity, what happens if we use group_by without summarize?
trips %>%
group_by(passenger_count)
# Interesting, it looks the same as trips, but it is a grouped data frame.
# We could save this as its own object.
grouped_trips = trips %>%
group_by(passenger_count)
# Then if we apply summarize to the new object, and it returns aggregate statistics for each group.
grouped_trips %>%
summarize(fare_mean = mean(fare_amount))
# Another question: What if we use summarize without group_by?
trips %>%
summarize(fare_mean = mean(fare_amount))
# Cool. It just yields an overall aggregate statistic on the un-grouped data frame.
# Something interesting just happened: When we apply summarize to trips, it yields the overall mean. But when we apply the same summarize statement to grouped_trips, it yields grouped means.
# What if we want to get the overall fare mean from grouped_trips? We can use ungroup().
grouped_trips %>%
ungroup()
# Ungroup removes all groups. Now we can calculate the overall mean with summarize.
grouped_trips %>%
ungroup() %>%
summarize(fare_mean = mean(fare_amount))
# Ok, back to the task at hand:
# So far we've only calculated mean, but we also wanted median.
# We can just add another argument within summarize, separated by commas, to create another column.
# I like to write each new column on a separate line of code.
trips %>%
group_by(passenger_count) %>%
summarize(
fare_mean = mean(fare_amount),
fare_median = median(fare_amount)
)
# Notice anything weird about the mean and median?
# It seems a lot higher for trips with 0 passengers.
# But what is a trip with 0 passengers?
# Let's add a column "n" to our data frame showing how many trips are in dataset for each passenger count.
# We can use the n() function which is a helper verb for summarize that reports the "n" of each group.
trips %>%
group_by(passenger_count) %>%
summarize(
fare_mean = mean(fare_amount),
fare_median = median(fare_amount),
n = n()
)
# Ok, that explains it. There are only 3 trips with 0 passengers. Maybe they were coding errors.
# What if don't want to include these weird trips in our summary table?
# We learned one way to do this on Tuesday using the subset() function. dplyr provides a similar function called filter().
# Let's explore these functions
###-------------
# Ok, let's filter trips before computing our summary stats.
# We'll include only trips where passenger_count is greater than zero.
trips %>%
filter(passenger_count > 0)
# Good - our new data frame has 3 fewer rows.
# Let's double-check it did what we wanted.
summary(trips$passenger_count)
# Now let's calculate our summary table for this subset of the data. We'll just tack on the same commands we used before.
trips %>%
filter(passenger_count > 0) %>%
group_by(passenger_count) %>%
summarize(
fare_mean = mean(fare_amount),
fare_median = median(fare_amount),
n = n()
)
# In this case, because we are only filtering based on one condition, we could have also used subset.
trips %>%
subset(passenger_count > 0) %>%
group_by(passenger_count) %>%
summarize(
fare_mean = mean(fare_amount),
fare_median = median(fare_amount),
n = n()
)
# Also, we chose to filter out the 0's before we calculated our summary stats.
# But we could have calculated summary stats and then filtered, with the same results (Warning: may not always be the case)
# Notice how easy it is to slide the filter statement "down the chain"
trips %>%
group_by(passenger_count) %>%
summarize(
fare_mean = mean(fare_amount),
fare_median = median(fare_amount),
n = n()
) %>%
filter(passenger_count > 0)
# Now that we've got some summary stats, we might have some other questions as we continue our data exploratin.
# What number of passengers tend to give the largest fares?
# To answer this, we need to learn how to sort our data frame.
# To sort, we use the arrange() verb from dplyr.
###-------------
# Let's sort our summary table by fare_mean
# We'll just add an arrange clause at the end of the chain.
trips %>%
filter(passenger_count > 0) %>%
group_by(passenger_count) %>%
summarize(
fare_mean = mean(fare_amount),
fare_median = median(fare_amount),
n = n()
) %>%
arrange(fare_mean)
# This sorted our summary data in ascending order of fare_mean.
# We can see that trips with 1 passenger have the lowest average fare.
# It might be convenient at this point to save our summary stats as an object since we are just planning to sort the data frame by various columns.
trips_summary = trips %>%
filter(passenger_count > 0) %>%
group_by(passenger_count) %>%
summarize(
fare_mean = mean(fare_amount),
fare_median = median(fare_amount),
n = n()
)
trips_summary %>%
arrange(fare_mean)
# What if we wanted to sort in descending order of fare_mean?
# We can use desc()
trips_summary %>%
arrange(desc(fare_mean))
# Since we observe that the fare median is often 9.5, we could sort by fare median and then by descending order of n.
trips_summary %>%
arrange(fare_median, desc(n))
# Ok, we've learned a bunch of dplyr verbs that helped us compute aggregate statistics (group_by, summarize), filter our data (filter), and sort our data (arrange).
# Let's practice!
# Open up exercises.R and work your way through Exercise 1. Feel free to discuss with your neighbors as you go along.
### 2. PREPPING DATA FOR ANALYSIS ###
### ----------------------------- ###
# ______
# But first let's learn how we can derive new columns/covariates.
# Let's say we want to build a linear regression model to see if we can predict tip percentage based on passenger count and fare amount
# Right now we have 21 columns. We only need two of them (passenger_count and fare_amount). We also need to create a tip percentage column.
# Since we only need a few of our existing columns, let's just work with those ones - the full dataset is a bit unwieldy.
# We can use the select() function.
# ______
# Ok, let's select just the passenger count, fare amount, and also tip amount, which we'll need to calculate the tip percentage.
trips %>%
select(passenger_count,fare_amount,tip_amount)
# Now we can see all three columns we are dealing with.
# Ok, we are almost ready to supply this data frame to lm().
# But we're missing our dependent variable!
# We know we want a new column that calculates tip percentage by dividing tip amount by fare amount.
# We could do this using baseR
trips$tip_percent = trips$tip_amount / trips$fare_amount
# But dplyr provides the mutate() verb to create new columns.
# mutate() has some advantages over the baseR approach.
# Before we explore those advantages, let's use select to remove that tip_percent column we just created.
trips = trips %>%
select(-tip_percent)
# ______
# Let's use dplyr to create a new tip_percent column.
trips %>%
select(passenger_count,fare_amount,tip_amount) %>%
mutate(tip_percent = tip_amount / fare_amount)
# Let's see what the new column looks like
trips %>%
select(passenger_count,fare_amount,tip_amount) %>%
mutate(tip_percent = tip_amount / fare_amount) %>%
summary()
# Ok, there's a bit of a weird outlier, but everything else looks good.
# Let's save our linear regression data and remove tip_amount, which we don't need anymore.
linregdata = trips %>%
select(passenger_count,fare_amount,tip_amount) %>%
mutate(tip_percent = tip_amount / fare_amount) %>%
select(-tip_amount)
# Now we'll run the linear regression and see how it looks.
mod = lm(tip_percent ~ ., data=linregdata)
summary(mod)
# 3. ORIGIN-DESTINATION MATRIX --------------------------------------------
# One good question for a data set like this is: what 'trip patterns' tend to be most common? One answer to this question is an origin-destination (OD) matrix, in which the ij-th entry counts the number of trips from origin i to destination j. Let's explore how to create an OD matrix using functions from tidyr, a great companion to dplyr for reshaping data sets.
### 3. ORIGIN-DESTINATION MATRIX ###
### ----------------------------- ###
# One good question for a data set like this is: what 'trip patterns' tend to be most common? One answer to this question is an origin-destination (O-D) matrix, in which the ij-th entry counts the number of trips from origin i to destination j. Let's explore how to create an O-D matrix using functions from # tidyr, a great companion to dplyr for reshaping data sets.
# First, we'll create an OD matrix on the district-number level. We'll use the pdistrict and ddistrict columns. Let's take a look:
trips %>% select(pdistrict, ddistrict)
# Each of these columns is an integer ID that labels the analysis district in which the trip started (pdistrict) or ended (ddistrict). Looks like we've got some NAs; how many? We can find out using our old friends filter() and summarise(). Note the use of the 'or' operator | in the filter call.
trips %>% select(pdistrict, ddistrict) %>%
filter(is.na(pdistrict) | is.na(ddistrict)) %>%
summarise(n = n())
# So, there are about 16K NA missing data rows out of almost 500K observations; not too bad. We'll need to filter them out later.
# Ok, now we're ready to make that matrix. The first tool we'll want is the count() function, which is a convenient shortcut for situations where you would use groupby() and n() to count objects by category. To illustrate, note that we could rewrite the last block of code using count:
trips %>% select(pdistrict, ddistrict) %>%
filter(is.na(pdistrict) | is.na(ddistrict)) %>%
count()
# For a more interesting usage, let's look at how many trips there were for each O-D pair
trips %>% count(pdistrict,ddistrict)
# Not bad, but let's filter out those NAs. If you know you want to filter out some data, it's usually safest to do this first, so I'll put this at the beginning of the chain.
trips %>%
filter(!is.na(pdistrict),
!is.na(ddistrict)) %>%
count(pdistrict, ddistrict)
# Our data is in 'long' or 'tidy' format, where each possible value of pdistrict and ddistrict has its own row. A row in this table is a count of the number of trips from pdistrict to ddistrict. This is great in many applications because there are few columns, filtering is easy, etc. But we wanted a matrix! What do we want to do to make this data matrix-shaped?
# Right--we want to generate a separate column for each value of ddistrict, and we want the counts to follow along. This is a version of 'wide format'. The tidyr package supplies two verbs, spread() and gather(), for converting between wide and long format. Since we need to make our data wider, let's talk about spread().
# -------
library(tidyr)
trips %>%
filter(!is.na(pdistrict),
!is.na(ddistrict)) %>%
count(pdistrict, ddistrict) %>%
spread(key = ddistrict, value = n)
# Nice -- we're matrix-shaped, just like we wanted. Each row corresponds to an origin, each column to a destination, and the corresponding entry is the number of trips between them.
# Note that spread() filled in NAs whenever it couldn't find a pdistrict-ddistrict pair. That's often correct behavior, but in this context, a missing pair just means that there were zero trips from that pdistrict to that ddistrict. So, we should fill our matrix with 0s instead of NAs. spread() gives us the fill parameter to accomplish just that.
trips %>%
filter(!is.na(pdistrict),
!is.na(ddistrict)) %>%
count(pdistrict, ddistrict) %>%
spread(key = ddistrict,value = n, fill = 0)
# This is a good start, but we have a data overload problem: there's just too much here to handle! It would be more interpretable if we could group by borough instead of by individual district. To do that, we'll need to add two columns for the pickup borough and dropoff borough. This information is coded in pdistrict and ddistrict, but we need to access it by referring to another table. Let's do that using joins.
# --------------------------------------------------------------------------
# First, let's load in the table that maps district codes to boroughs:
areas = read.csv('area_info.csv', stringsAsFactors=F) %>% tbl_df
# What do we have here?
areas
# Each row maps a district id number to a borough, just as we'd expect.
# Ok, now let's grab just the columns we need from trips:
trips %>%
select(pdistrict, ddistrict)
# We'd like to join with the areas table, but we have a problem: should we join by pdistrict or ddistrict? One good answer is 'both' -- but how should that work? We'll solve this problem by combining pdistrict and ddistrict into a single column, and then joining on the new column. To do so, we'll use gather() from tidyr, which is the inverse of spread(). First, since we are about to combine the two columns, we need a unique trip_id to keep track of which pdistricts and ddistricts should go together. An easy way to make a unique ID is the row_number() function, which does exactly what you think it does (on ungrouped data).
trips %>%
select(pdistrict, ddistrict) %>%
mutate(trip_id = row_number())
# Now we're ready to use gather(). We'd like to get a new df where each row is either a pickup or a dropoff district, the first column tells us whether the row is a pickup or a dropoff, the second tells us the trip_id, and the third gives the district number.
trips %>%
select(pdistrict, ddistrict) %>%
mutate(trip_id = row_number()) %>%
gather(key = p_or_d, value = district, pdistrict, ddistrict)
# Nice, this is what we wanted. What did we do with that gather() call?
# - "Create a key (label) new column and call it p_or_d"
# - "Create a new values column and call it district"
# - "Take the column NAMES of pdistrict and ddistrict and gather them into the new p_or_d column"
# - "Take the corresponding column VALUES of pdistrict and ddistrict" and gather them into the new district column"
# So, what does our new data frame look like? We can see what's going on a bit better if we sort the data on trip_id:
trips %>%
select(pdistrict, ddistrict) %>%
mutate(trip_id = row_number()) %>%
gather(key = p_or_d, value = district, pdistrict, ddistrict) %>%
arrange(trip_id)
# Each trip has a row corresponding to pickup and another corresponding to dropoff.
# Now have just one column with district IDs in it, so we are ready to join. We'll use dplyr's left_join(), and we'll tell it that the district column of trips is supposed to match the id column of areas.
trips %>%
select(pdistrict, ddistrict) %>%
mutate(trip_id = row_number()) %>%
gather(key = p_or_d, value = district, pdistrict, ddistrict) %>%
left_join(areas, by = c('district' = 'id'))
# Looks like we got a bunch of NAs, which makes sense, since we had a bunch of NAs in the district column. That's ok for now. If we wanted to drop all entries with NAs in either trips or areas, we could use an inner_join instead.
# So we've done our join, and we want to get our data back into a more familiar
# format. First, let's take out the trash: we don't need district anymore, so let's drop that column using our old friend select():
trips %>%
select(pdistrict, ddistrict) %>%
mutate(trip_id = row_number()) %>%
gather(key = p_or_d, value = district, pdistrict, ddistrict) %>%
left_join(areas, by = c('district' = 'id')) %>%
select(-district)
# Now we're ready to spread(). We want to use the values of p_or_d to code two new columns, with values that come from borough.
# Since we have the trip_id column, spread() knows to keep those entries together -- that's why we needed it in the first place.
trips %>%
select(pdistrict, ddistrict) %>%
mutate(trip_id = row_number()) %>%
gather(key = p_or_d, value = district, pdistrict, ddistrict) %>%
left_join(areas, by = c('district' = 'id')) %>%
select(-district) %>%
spread(key = p_or_d, value = borough)
# How to read the spread() call:
# - "Create new columns labelled with the values of p_or_d."
# - "In those new columns, put as values the values of the borough column."
# - "Don't do anything with trip_id."
# Since trip_id has to match, we ensure that pdistrict and ddistrict will line up like they should.
# Great, now we are ready to count again. However, our column names are out of date (they're not districts anymore), so let's rename them using an intuitive function:
trips %>%
select(pdistrict, ddistrict) %>%
mutate(trip_id = row_number()) %>%
gather(key = p_or_d, value = district, pdistrict, ddistrict) %>%
left_join(areas, by = c('district' = 'id')) %>%
select(-district) %>%
spread(key = p_or_d, value = borough) %>%
rename(pborough = pdistrict, dborough = ddistrict)
# Just like last time, our final steps are to count() and spread() again.
trips %>%
select(pdistrict, ddistrict) %>%
mutate(trip_id = row_number()) %>%
gather(key = p_or_d, value = district, pdistrict, ddistrict) %>%
left_join(areas, by = c('district' = 'id')) %>%
select(-district) %>%
spread(key = p_or_d, value = borough) %>%
rename(pborough = pdistrict, dborough = ddistrict) %>%
count(pborough, dborough)
# Finally, we can get this into 'matrix form' by spreading again. We wantdborough to be the columns, and the values to be n.
# Note that we still have some NAs. We could filter them out using familiar methods, or we could use the 'fill' parameter of spread to write 'Unknown' there instead:
trips %>%
select(pdistrict, ddistrict) %>%
mutate(trip_id = row_number()) %>%
gather(key = p_or_d, value = district, pdistrict, ddistrict) %>%
left_join(areas, by = c('district' = 'id')) %>%
select(-district) %>%
spread(key = p_or_d, value = borough) %>%
rename(pborough = pdistrict, dborough = ddistrict) %>%
count(pborough, dborough) %>%
spread(key = dborough, value = n, fill = 0)
# Wait, we got an error! What happened?
# Right, we have some NAs in here, and R isn't letting us use NA for a column name. Just like last time, the NAs are coming from spread() (the first call), and just like last time, we can use the fill parameter to do something else with them. I am going to call them 'Unknown':
trips %>%
select(pdistrict, ddistrict) %>%
mutate(trip_id = row_number()) %>%
gather(key = p_or_d, value = district, pdistrict, ddistrict) %>%
left_join(areas, by = c('district' = 'id')) %>%
select(-district) %>%
spread(key = p_or_d, value = borough, fill = 'Unknown') %>%
rename(pborough = pdistrict, dborough = ddistrict) %>%
count(pborough, dborough) %>%
spread(key = dborough, value = n, fill = 0)
# That's what we wanted! Again, each row corresponds to an origin and each column to a destination. For the sake of visualization, we'll save this as an
# object m:
m = trips %>%
select(pdistrict, ddistrict) %>%
mutate(trip_id = row_number()) %>%
gather(key = p_or_d, value = district, pdistrict, ddistrict) %>%
left_join(areas, by = c('district' = 'id')) %>%
select(-district) %>%
spread(key = p_or_d, value = borough, fill = 'Unknown') %>%
rename(pborough = pdistrict, dborough = ddistrict) %>%
count(pborough, dborough) %>%
spread(key = dborough, value = n, fill = 0)
# And then we'll use the code below to draw a heatmap.
m = m %>% select(-pborough) %>% data.matrix
rownames(m) = colnames(m)
heatmap(m, symm = TRUE, scale = 'row')
# Each row corresponds to an origin, and each column to a destination. We see that
# most trips tend to stay within the same borough, but that almost all
# have a high likelihood to end in Manhattan.
|
\name{radialtext}
\alias{radialtext}
\title{Display text in a radial line}
\description{
Displays a string in a radial line, rotating it to flow in the radial
direction and optionally scaling each letter's size according to its
distance from the center.
}
\usage{
radialtext(x, center=c(0,0), start=NA, middle=1, end=NA, angle=0,
deg=NA, expand=0, stretch=1, nice=TRUE, cex=NA, ...)
}
\arguments{
\item{x}{A character string.}
\item{center}{The center of the circular area in x/y user units.}
\item{start}{The starting distance of the string from the center in
x/y user units.}
\item{middle}{The middle distance of the string from the center in
x/y user units.}
\item{end}{The ending distance of the string from the center in
x/y user units.}
\item{angle}{The angular position of the string in radians.}
\item{deg}{The angular position of the string in degrees
(takes precedence if not NA).}
\item{expand}{Size expansion factor for characters, used only if
\samp{start} specified.}
\item{stretch}{How much to stretch the string for appearance, 1 for none.}
\item{nice}{TRUE to auto-flip text to keep it upright, FALSE to let
it be upside down.}
\item{cex}{The overall character expansion factor, NA for par("cex").}
\item{...}{Additional arguments passed to \samp{text}.}
}
\value{nil}
\details{
This may not work on all devices, as not all graphic devices can rotate text to
arbitrary angles. The output looks best on a Postscript or similar device that can
rotate text without distortion. Rotated text often looks very ragged on small bitmaps.
If the user passes a value for \samp{start}, this will override a value for
\samp{middle} or \samp{end}. Likewise, a value for \samp{end} will override a
value for \samp{middle}. Also, a value for \samp{deg} overrides any value passed
to \samp{angle}. If \samp{expand} is 0, all characters will be the same size,
while a value of 1 will scale characters so that one that is twice
as far from the center will be twice as large. Negative values are permitted too,
but \samp{expand} is only used if \samp{start} was specified.
}
\author{Ted Toal}
\seealso{\link{text}, \link{arctext}}
\examples{
plot(0, xlim=c(1,5), ylim=c(1,5), main="Test of radialtext",
xlab="", ylab="", type="n")
points(3, 3, pch=20)
radialtext("uncooked spaghetti", center=c(3,3),
col="blue")
radialtext("uncooked spaghetti", center=c(3,3),
start=1.2, angle=pi/4, cex=0.8)
radialtext("uncooked spaghetti", center=c(3,3),
middle=1.2, angle=pi/4+0.1, cex=0.8)
radialtext("uncooked spaghetti", center=c(3,3),
end=1.2, angle=pi/4+0.2, cex=0.8)
radialtext("uncooked spaghetti", center=c(3,3),
start=0.5, deg=135, cex=0.8, col="green")
radialtext("uncooked spaghetti", center=c(3,3),
start=0.5, deg=145, cex=0.8, stretch=2)
radialtext("uncooked spaghetti", center=c(3,3),
start=0.5, deg=20, expand=0, col="red")
radialtext("uncooked spaghetti", center=c(3,3),
start=0.5, deg=250, expand=0.35)
radialtext("uncooked spaghetti", center=c(3,3),
start=0.75, deg=225, expand=1, col="gold")
radialtext("uncooked spaghetti", center=c(3,3),
start=0.5, deg=325, expand=-0.25, cex=2)
}
\keyword{misc}
|
/man/radialtext.Rd
|
no_license
|
plotrix/plotrix
|
R
| false
| false
| 3,174
|
rd
|
\name{radialtext}
\alias{radialtext}
\title{Display text in a radial line}
\description{
Displays a string in a radial line, rotating it to flow in the radial
direction and optionally scaling each letter's size according to its
distance from the center.
}
\usage{
radialtext(x, center=c(0,0), start=NA, middle=1, end=NA, angle=0,
deg=NA, expand=0, stretch=1, nice=TRUE, cex=NA, ...)
}
\arguments{
\item{x}{A character string.}
\item{center}{The center of the circular area in x/y user units.}
\item{start}{The starting distance of the string from the center in
x/y user units.}
\item{middle}{The middle distance of the string from the center in
x/y user units.}
\item{end}{The ending distance of the string from the center in
x/y user units.}
\item{angle}{The angular position of the string in radians.}
\item{deg}{The angular position of the string in degrees
(takes precedence if not NA).}
\item{expand}{Size expansion factor for characters, used only if
\samp{start} specified.}
\item{stretch}{How much to stretch the string for appearance, 1 for none.}
\item{nice}{TRUE to auto-flip text to keep it upright, FALSE to let
it be upside down.}
\item{cex}{The overall character expansion factor, NA for par("cex").}
\item{...}{Additional arguments passed to \samp{text}.}
}
\value{nil}
\details{
This may not work on all devices, as not all graphic devices can rotate text to
arbitrary angles. The output looks best on a Postscript or similar device that can
rotate text without distortion. Rotated text often looks very ragged on small bitmaps.
If the user passes a value for \samp{start}, this will override a value for
\samp{middle} or \samp{end}. Likewise, a value for \samp{end} will override a
value for \samp{middle}. Also, a value for \samp{deg} overrides any value passed
to \samp{angle}. If \samp{expand} is 0, all characters will be the same size,
while a value of 1 will scale characters so that one that is twice
as far from the center will be twice as large. Negative values are permitted too,
but \samp{expand} is only used if \samp{start} was specified.
}
\author{Ted Toal}
\seealso{\link{text}, \link{arctext}}
\examples{
plot(0, xlim=c(1,5), ylim=c(1,5), main="Test of radialtext",
xlab="", ylab="", type="n")
points(3, 3, pch=20)
radialtext("uncooked spaghetti", center=c(3,3),
col="blue")
radialtext("uncooked spaghetti", center=c(3,3),
start=1.2, angle=pi/4, cex=0.8)
radialtext("uncooked spaghetti", center=c(3,3),
middle=1.2, angle=pi/4+0.1, cex=0.8)
radialtext("uncooked spaghetti", center=c(3,3),
end=1.2, angle=pi/4+0.2, cex=0.8)
radialtext("uncooked spaghetti", center=c(3,3),
start=0.5, deg=135, cex=0.8, col="green")
radialtext("uncooked spaghetti", center=c(3,3),
start=0.5, deg=145, cex=0.8, stretch=2)
radialtext("uncooked spaghetti", center=c(3,3),
start=0.5, deg=20, expand=0, col="red")
radialtext("uncooked spaghetti", center=c(3,3),
start=0.5, deg=250, expand=0.35)
radialtext("uncooked spaghetti", center=c(3,3),
start=0.75, deg=225, expand=1, col="gold")
radialtext("uncooked spaghetti", center=c(3,3),
start=0.5, deg=325, expand=-0.25, cex=2)
}
\keyword{misc}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/chspider.batch.annotation.child.R
\name{chspider.batch.annotation.child}
\alias{chspider.batch.annotation.child}
\title{Human Metabolome Database', 'ChEMBL', 'ChEBI', 'NIAID','Pesticide Common Names','SMPDB Small Molecule Pathway Database',
MeSH','LipidMAPS','ChemBank','BioCyc'),token=tokenstr)}
\usage{
chspider.batch.annotation.child(mz.val, max.mz.diff, adductname,
datasources = c("KEGG"), tokenstr, maxhits = 30, syssleep = 10,
adduct_table)
}
\description{
Human Metabolome Database', 'ChEMBL', 'ChEBI', 'NIAID','Pesticide Common Names','SMPDB Small Molecule Pathway Database',
MeSH','LipidMAPS','ChemBank','BioCyc'),token=tokenstr)
}
|
/man/chspider.batch.annotation.child.Rd
|
no_license
|
stolltho/xMSannotator
|
R
| false
| true
| 724
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/chspider.batch.annotation.child.R
\name{chspider.batch.annotation.child}
\alias{chspider.batch.annotation.child}
\title{Human Metabolome Database', 'ChEMBL', 'ChEBI', 'NIAID','Pesticide Common Names','SMPDB Small Molecule Pathway Database',
MeSH','LipidMAPS','ChemBank','BioCyc'),token=tokenstr)}
\usage{
chspider.batch.annotation.child(mz.val, max.mz.diff, adductname,
datasources = c("KEGG"), tokenstr, maxhits = 30, syssleep = 10,
adduct_table)
}
\description{
Human Metabolome Database', 'ChEMBL', 'ChEBI', 'NIAID','Pesticide Common Names','SMPDB Small Molecule Pathway Database',
MeSH','LipidMAPS','ChemBank','BioCyc'),token=tokenstr)
}
|
set.seed(123)
tau_1_tru <- 0.25
x <- y <- rep(0,1000)
for ( i in 1:1000){
if( runif(1) < tau_1_tru ) {
x[i] <- rnorm(1, mean=1)
y[i] <- "heads"
}else {
x[i] <- rnorm(1, mean=7)
y[i] <- "tails"
}
}
densityPlot(~x, col=as.factor(y))
print( x[1] )
dnorm( x[1], mean=0)
mu_1 <- 0
mu_2 <- 1
tau_1 <- 0.5
tau_2 <- 0.5
for ( i in 1:10) {
T_1 <- tau_1 * dnorm( x, mu_1)
T_2 <- tau_2 * dnorm( x, mu_2)
P_1 <- T_1 / (T_1 + T_2)
P_2 <- T_2 / (T_1 + T_2)
tau_1 <- mean(P_1)
tau_2 <- mean(P_2)
mu_1 <- sum( P_1 * x) / sum(P_1)
mu_2 <- sum( P_2 * x) / sum(P_2)
print( c(mu_1, mu_2, mean(P_1)) )
}
|
/EM.R
|
no_license
|
rafael-bianchi/MAI-BDA
|
R
| false
| false
| 616
|
r
|
set.seed(123)
tau_1_tru <- 0.25
x <- y <- rep(0,1000)
for ( i in 1:1000){
if( runif(1) < tau_1_tru ) {
x[i] <- rnorm(1, mean=1)
y[i] <- "heads"
}else {
x[i] <- rnorm(1, mean=7)
y[i] <- "tails"
}
}
densityPlot(~x, col=as.factor(y))
print( x[1] )
dnorm( x[1], mean=0)
mu_1 <- 0
mu_2 <- 1
tau_1 <- 0.5
tau_2 <- 0.5
for ( i in 1:10) {
T_1 <- tau_1 * dnorm( x, mu_1)
T_2 <- tau_2 * dnorm( x, mu_2)
P_1 <- T_1 / (T_1 + T_2)
P_2 <- T_2 / (T_1 + T_2)
tau_1 <- mean(P_1)
tau_2 <- mean(P_2)
mu_1 <- sum( P_1 * x) / sum(P_1)
mu_2 <- sum( P_2 * x) / sum(P_2)
print( c(mu_1, mu_2, mean(P_1)) )
}
|
#IMPLMENTATION OF DECISION TREE
#PACKAGES REQUIRED
# -> RPART
# -> RPART.PLOT
# STEPS:
# 1. Import Dataset
# 2. Split Training and Testing Data
# 3. Build Tree
# 4. Acquire Tree Information -> mytree and printcp(mytree)
# 5. Plot Tree
# 6. Prune the Decision Tree
# 7. Again Acquire Tree Information -> mytree and printcp(mytree)
# 8. Again Plot Tree
# 9. Use Test Set for Predicting Results
install.packages("rpart")
install.packages("rpart.plot")
library(rpart)
library(rpart.plot)
#Getting Data
my_data<- read.csv("/Users/mabook/Documents/German_state_results_New.csv",sep = ";")
head(my_data)
#Breaking into Training and Testing Sets
n = nrow(my_data)
split = sample(c(TRUE, FALSE), n, replace=TRUE, prob=c(0.75, 0.25))
TrainingSet = my_data[split, ]
TestingSet = my_data[!split, ]
View(TrainingSet)
View(TestingSet)
# Using rpart Function for Making the Tree
#mytree <- rpart(Result ~ Wealth + Biology + History + Litrature + State + City , data = my_data, method = "class", split = "information gain" )
mytree <- rpart(Result ~ Mathematics + Biology + History + Litrature + State + City + Wealth , data = TrainingSet)
mytree
# Plotting the Tree
rpart.plot(mytree, extra = 4)
# Printing Complexity Parameter
printcp(mytree)
#Plotting Complexity Parameter
plotcp(mytree)
#Measurng Importance of Variables
mytree$variable.importance
#Pruning the tree to Reduce Overfitting
mytree <- prune(mytree, cp = 0.21)
rpart.plot(mytree, extra = 4)
mytree
printcp(mytree)
#Predicting Output
TestingSet$PassClass <- predict(mytree, newdata = TestingSet, type = "class")
TestingSet$Prob <- predict(mytree, newdata = TestingSet, type = "prob")
TestingSet
|
/RScripts/DecisionTree/DecisionTree.R
|
no_license
|
sbalci/Statistical-Methods-and-Machine-Learning-in-R
|
R
| false
| false
| 1,673
|
r
|
#IMPLMENTATION OF DECISION TREE
#PACKAGES REQUIRED
# -> RPART
# -> RPART.PLOT
# STEPS:
# 1. Import Dataset
# 2. Split Training and Testing Data
# 3. Build Tree
# 4. Acquire Tree Information -> mytree and printcp(mytree)
# 5. Plot Tree
# 6. Prune the Decision Tree
# 7. Again Acquire Tree Information -> mytree and printcp(mytree)
# 8. Again Plot Tree
# 9. Use Test Set for Predicting Results
install.packages("rpart")
install.packages("rpart.plot")
library(rpart)
library(rpart.plot)
#Getting Data
my_data<- read.csv("/Users/mabook/Documents/German_state_results_New.csv",sep = ";")
head(my_data)
#Breaking into Training and Testing Sets
n = nrow(my_data)
split = sample(c(TRUE, FALSE), n, replace=TRUE, prob=c(0.75, 0.25))
TrainingSet = my_data[split, ]
TestingSet = my_data[!split, ]
View(TrainingSet)
View(TestingSet)
# Using rpart Function for Making the Tree
#mytree <- rpart(Result ~ Wealth + Biology + History + Litrature + State + City , data = my_data, method = "class", split = "information gain" )
mytree <- rpart(Result ~ Mathematics + Biology + History + Litrature + State + City + Wealth , data = TrainingSet)
mytree
# Plotting the Tree
rpart.plot(mytree, extra = 4)
# Printing Complexity Parameter
printcp(mytree)
#Plotting Complexity Parameter
plotcp(mytree)
#Measurng Importance of Variables
mytree$variable.importance
#Pruning the tree to Reduce Overfitting
mytree <- prune(mytree, cp = 0.21)
rpart.plot(mytree, extra = 4)
mytree
printcp(mytree)
#Predicting Output
TestingSet$PassClass <- predict(mytree, newdata = TestingSet, type = "class")
TestingSet$Prob <- predict(mytree, newdata = TestingSet, type = "prob")
TestingSet
|
library(visNetwork)
library(RColorBrewer)
library(igraph)
library(dplyr)
nodes <- read.csv("scott-nodes.csv", header = T, as.is = T)
links <- read.csv("scott-edges-no-id.csv", header = T, as.is = T)
head(nodes)
head(links)
str(nodes)
str(links)
glimpse(nodes)
glimpse(links)
length(unique(nodes$id))
unique(nodes$Attribute)
nrow(links)
nrow(unique(links[,c("source", "target")]))
net1 <- graph_from_data_frame(d = links, vertices = nodes)
plot(net1)
|
/scott_data_igraph.R
|
no_license
|
lexicondevil12/networkGraphPractice
|
R
| false
| false
| 457
|
r
|
library(visNetwork)
library(RColorBrewer)
library(igraph)
library(dplyr)
nodes <- read.csv("scott-nodes.csv", header = T, as.is = T)
links <- read.csv("scott-edges-no-id.csv", header = T, as.is = T)
head(nodes)
head(links)
str(nodes)
str(links)
glimpse(nodes)
glimpse(links)
length(unique(nodes$id))
unique(nodes$Attribute)
nrow(links)
nrow(unique(links[,c("source", "target")]))
net1 <- graph_from_data_frame(d = links, vertices = nodes)
plot(net1)
|
getAggrAWSData_allVars <- function(tstep, net_aws, start, end, aws_dir){
tz <- Sys.getenv("TZ")
origin <- "1970-01-01"
net_aws <- strsplit(net_aws, "_")[[1]]
out <- data.frame(Date = NA, status = "no.data")
######
adt_args <- readRDS(file.path(aws_dir, "AWS_DATA", "AUTH", "adt.con"))
conn <- try(connect.database(adt_args$connection,
RMySQL::MySQL()), silent = TRUE)
if(inherits(conn, "try-error")){
out$status <- 'unable to connect to database'
return(out)
}
######
datyRg <- getAggrDateRange(tstep, start, end, tz)
start <- as.numeric(datyRg[1])
end <- as.numeric(datyRg[2])
if(tstep == 'hourly'){
data_table <- 'aws_hourly'
pars_file <- 'AWS_dataHourVarObj.json'
qc_name <- 'spatial_check'
}else{
data_table <- 'aws_daily'
pars_file <- 'AWS_dataDayVarObj.json'
qc_name <- 'qc_output'
}
######
query <- paste0("SELECT * FROM ", data_table, " WHERE (",
"network=", net_aws[1], " AND id='", net_aws[2], "') AND (",
"obs_time >= ", start, " AND obs_time <= ", end, ")")
qres <- DBI::dbGetQuery(conn, query)
DBI::dbDisconnect(conn)
if(nrow(qres) == 0) return(out)
qres[!is.na(qres[, qc_name]), 'value'] <- NA
qres_var_hgt <- paste0(qres$var_code, "_", qres$height)
######
parsFile <- file.path(aws_dir, "AWS_DATA", "JSON", pars_file)
pars_info <- jsonlite::read_json(parsFile)
pars_info <- pars_info$variables
info_var_hgt <- sapply(pars_info, function(v) paste0(v$var_code, '_', v$height))
qres <- qres[qres_var_hgt %in% info_var_hgt, , drop = FALSE]
if(nrow(qres) == 0) return(out)
######
qres$all_vars <- paste0(qres$var_code, "_", qres$height, "_", qres$stat_code)
don <- reshape2::acast(qres, obs_time~all_vars, mean, value.var = 'value')
don[is.nan(don)] <- NA
d_row <- as.integer(dimnames(don)[[1]])
d_col <- strsplit(dimnames(don)[[2]], "_")
stat_name <- c('Ave', 'Min', 'Max', 'Tot')
col_name <- sapply(d_col, function(x){
var_hgt <- paste0(x[1], '_', x[2])
ix <- which(info_var_hgt == var_hgt)
vvr <- pars_info[[ix]]
vvr <- paste0(vvr$var_name, "_", vvr$height, "m")
vvr <- gsub(" ", "-", vvr)
vst <- stat_name[as.integer(x[3])]
paste0(vvr, "_", vst)
})
######
if(tstep == "hourly"){
daty <- as.POSIXct(d_row, origin = origin, tz = tz)
odaty <- format(daty, "%Y%m%d%H")
}else{
daty <- as.Date(d_row, origin = origin)
odaty <- format(daty, "%Y%m%d")
}
###########
if(tstep %in% c("pentad", "dekadal", "monthly")){
mfracFile <- paste0("Min_Frac_", tools::toTitleCase(tstep), ".json")
mfracFile <- file.path(aws_dir, "AWS_DATA", "JSON", mfracFile)
minFrac <- jsonlite::read_json(mfracFile)
yymm <- format(daty, "%Y%m")
if(tstep == "pentad"){
jour <- as.numeric(format(daty, "%d"))
jour <- cut(jour, c(1, 5, 10, 15, 20, 25, 31),
labels = FALSE, include.lowest = TRUE)
index <- split(seq_along(daty), paste0(yymm, jour))
nbday_fun <- nb_day_of_pentad
}
if(tstep == "dekadal"){
jour <- as.numeric(format(daty, "%d"))
jour <- cut(jour, c(1, 10, 20, 31),
labels = FALSE, include.lowest = TRUE)
index <- split(seq_along(daty), paste0(yymm, jour))
nbday_fun <- nb_day_of_dekad
}
if(tstep == "monthly"){
index <- split(seq_along(daty), yymm)
nbday_fun <- nb_day_of_month
}
odaty <- names(index)
pmon <- lapply(index, function(x) as.numeric(unique(format(daty[x], "%m"))))
nbd0 <- sapply(seq_along(pmon), function(j) nbday_fun(names(pmon[j])))
nobs <- sapply(index, length)
avail_frac <- nobs/nbd0
tmp <- lapply(seq_along(d_col), function(j){
ina <- avail_frac >= minFrac[[d_col[[j]][1]]]
fun_agg <- switch(d_col[[j]][3],
"4" = sum, "1" = mean,
"2" = min, "3" = max)
xout <- rep(NA, length(index))
xout[ina] <- sapply(index[ina], function(ix){
x <- don[ix, j]
if(all(is.na(x))) return(NA)
fun_agg(x, na.rm = TRUE)
})
xout
})
don <- do.call(cbind, tmp)
}
ina <- rowSums(!is.na(don)) > 0
don <- don[ina, , drop = FALSE]
odaty <- odaty[ina]
out <- data.frame(odaty, don)
names(out) <- c('Date', col_name)
return(out)
}
##########
getAggrAWSData_oneVar <- function(tstep, net_aws, var_hgt, start, end, aws_dir){
tz <- Sys.getenv("TZ")
origin <- "1970-01-01"
out <- list(date = NULL, data = NULL, status = "no-data")
######
adt_args <- readRDS(file.path(aws_dir, "AWS_DATA", "AUTH", "adt.con"))
conn <- try(connect.database(adt_args$connection,
RMySQL::MySQL()), silent = TRUE)
if(inherits(conn, "try-error")){
out$status <- 'failed-connection'
return(out)
}
######
datyRg <- getAggrDateRange(tstep, start, end, tz)
start <- as.numeric(datyRg[1])
end <- as.numeric(datyRg[2])
if(tstep == 'hourly'){
data_table <- 'aws_hourly'
qc_name <- 'spatial_check'
}else{
data_table <- 'aws_daily'
qc_name <- 'qc_output'
}
######
net_aws <- strsplit(net_aws, "_")[[1]]
var_hgt <- strsplit(var_hgt, "_")[[1]]
######
if(var_hgt[1] == "5"){
query <- paste0("SELECT obs_time, stat_code, value, ", qc_name, " FROM ", data_table,
" WHERE (", "network=", net_aws[1], " AND id='", net_aws[2],
"' AND height=", var_hgt[2], " AND var_code=", var_hgt[1],
") AND (obs_time >= ", start, " AND obs_time <= ", end, ")")
}else{
query <- paste0("SELECT obs_time, stat_code, value, ", qc_name, " FROM ", data_table,
" WHERE (", "network=", net_aws[1], " AND id='", net_aws[2],
"' AND height=", var_hgt[2], " AND var_code=", var_hgt[1],
" AND stat_code IN (1, 2, 3)) AND (",
"obs_time >= ", start, " AND obs_time <= ", end, ")")
}
qres <- DBI::dbGetQuery(conn, query)
DBI::dbDisconnect(conn)
if(nrow(qres) == 0) return(out)
qres[!is.na(qres[, qc_name]), 'value'] <- NA
don <- reshape2::acast(qres, obs_time~stat_code, mean, value.var = 'value')
d_row <- as.integer(dimnames(don)[[1]])
d_col <- dimnames(don)[[2]]
if(tstep == "hourly"){
daty <- as.POSIXct(d_row, origin = origin, tz = tz)
odaty <- format(daty, "%Y%m%d%H")
}else{
daty <- as.Date(d_row, origin = origin)
odaty <- format(daty, "%Y%m%d")
}
###########
if(tstep %in% c("pentad", "dekadal", "monthly")){
mfracFile <- paste0("Min_Frac_", tools::toTitleCase(tstep), ".json")
mfracFile <- file.path(aws_dir, "AWS_DATA", "JSON", mfracFile)
minFrac <- jsonlite::read_json(mfracFile)
yymm <- format(daty, "%Y%m")
if(tstep == "pentad"){
jour <- as.numeric(format(daty, "%d"))
jour <- cut(jour, c(1, 5, 10, 15, 20, 25, 31),
labels = FALSE, include.lowest = TRUE)
index <- split(seq_along(daty), paste0(yymm, jour))
nbday_fun <- nb_day_of_pentad
}
if(tstep == "dekadal"){
jour <- as.numeric(format(daty, "%d"))
jour <- cut(jour, c(1, 10, 20, 31),
labels = FALSE, include.lowest = TRUE)
index <- split(seq_along(daty), paste0(yymm, jour))
nbday_fun <- nb_day_of_dekad
}
if(tstep == "monthly"){
index <- split(seq_along(daty), yymm)
nbday_fun <- nb_day_of_month
}
odaty <- names(index)
pmon <- lapply(index, function(x) as.numeric(unique(format(daty[x], "%m"))))
nbd0 <- sapply(seq_along(pmon), function(j) nbday_fun(names(pmon[j])))
nobs <- sapply(index, length)
avail_frac <- nobs/nbd0
ina <- avail_frac >= minFrac[[var_hgt[1]]]
xout <- don[1, , drop = FALSE]
xout[] <- NA
xout <- xout[rep(1, length(index)), , drop = FALSE]
tmp <- lapply(index[ina], function(ix){
x <- don[ix, , drop = FALSE]
agg <- lapply(d_col, function(n){
fun <- switch(n, "4" = sum, "1" = mean,
"2" = min, "3" = max)
if(all(is.na(x[, n]))) return(NA)
fun(x[, n], na.rm = TRUE)
})
agg <- do.call(cbind, agg)
return(agg)
})
xout[ina, ] <- do.call(rbind, tmp)
don <- xout
}
don <- data.frame(don)
names(don) <- d_col
rownames(don) <- NULL
out <- list(date = odaty, data = don, status = "ok")
return(out)
}
##########
getAggrAWSData_awsSel <- function(tstep, net_aws, var_hgt, pars,
start, end, aws_dir)
{
tz <- Sys.getenv("TZ")
origin <- "1970-01-01"
parsFile <- file.path(aws_dir, "AWS_DATA", "JSON", "aws_parameters.json")
awsPars <- jsonlite::read_json(parsFile)
net_aws <- strsplit(net_aws, "_")
var_hgt <- strsplit(var_hgt, "_")[[1]]
net_code <- sapply(awsPars, "[[", "network_code")
aws_id <- sapply(awsPars, "[[", "id")
istn <- lapply(net_aws, function(a) which(net_code == a[1] & aws_id == a[2]))
nz <- sapply(istn, length) > 0
awsPars <- awsPars[unlist(istn[nz])]
sel_net <- sapply(awsPars, '[[', 'network_code')
sel_id <- sapply(awsPars, '[[', 'id')
sel_name <- sapply(awsPars, function(a) paste0(a$name, " [ID = " , a$id, " ; ", a$network, "]"))
sel_aws <- paste0(sel_net, '_', sel_id)
var_name <- awsPars[[1]]$PARS_Info[[var_hgt[1]]][[1]]$name
stat_code <- (1:4)[c('Ave', 'Min', 'Max', 'Tot') %in% pars]
par_name <- switch(pars, "Ave" = "Average", "Tot" = "Total",
"Min" = "Minimum", "Max" = "Maximum")
out <- list(var_name = var_name, stat_name = par_name,
var_code = var_hgt[1], height = var_hgt[2],
stat_code = stat_code, net_aws = NULL,
date = NULL, data = NULL, status = "no-data")
######
adt_args <- readRDS(file.path(aws_dir, "AWS_DATA", "AUTH", "adt.con"))
conn <- try(connect.database(adt_args$connection,
RMySQL::MySQL()), silent = TRUE)
if(inherits(conn, "try-error")){
out$status <- 'failed-connection'
return(out)
}
######
datyRg <- getAggrDateRange(tstep, start, end, tz)
start <- as.numeric(datyRg[1])
end <- as.numeric(datyRg[2])
if(tstep == 'hourly'){
data_table <- 'aws_hourly'
qc_name <- 'spatial_check'
}else{
data_table <- 'aws_daily'
qc_name <- 'qc_output'
}
######
all_aws <- paste0("(", sel_net, ", ", "'", sel_id, "'", ")")
all_aws <- paste0(all_aws, collapse = ", ")
query <- paste0("SELECT * FROM ", data_table, " WHERE (",
"(network, id) IN (", all_aws, ") AND height=", var_hgt[2],
" AND var_code=", var_hgt[1], " AND stat_code=", stat_code,
") AND (", "obs_time >= ", start, " AND obs_time <= ", end, ")")
qres <- DBI::dbGetQuery(conn, query)
DBI::dbDisconnect(conn)
if(nrow(qres) == 0){
out$status <- "no-data"
return(out)
}
qres[!is.na(qres[, qc_name]), 'value'] <- NA
qres$aws <- paste0(qres$network, "_", qres$id)
don <- reshape2::acast(qres, obs_time~aws, mean, value.var = 'value')
don[is.nan(don)] <- NA
isel <- match(sel_aws, dimnames(don)[[2]])
don <- don[, isel, drop = FALSE]
dimnames(don)[[2]] <- sel_aws
d_row <- as.integer(dimnames(don)[[1]])
if(tstep == "hourly"){
daty <- as.POSIXct(d_row, origin = origin, tz = tz)
odaty <- format(daty, "%Y%m%d%H")
}else{
daty <- as.Date(d_row, origin = origin)
odaty <- format(daty, "%Y%m%d")
}
###########
if(tstep %in% c("pentad", "dekadal", "monthly")){
mfracFile <- paste0("Min_Frac_", tools::toTitleCase(tstep), ".json")
mfracFile <- file.path(aws_dir, "AWS_DATA", "JSON", mfracFile)
minFrac <- jsonlite::read_json(mfracFile)
yymm <- format(daty, "%Y%m")
if(tstep == "pentad"){
jour <- as.numeric(format(daty, "%d"))
jour <- cut(jour, c(1, 5, 10, 15, 20, 25, 31),
labels = FALSE, include.lowest = TRUE)
index <- split(seq_along(daty), paste0(yymm, jour))
nbday_fun <- nb_day_of_pentad
}
if(tstep == "dekadal"){
jour <- as.numeric(format(daty, "%d"))
jour <- cut(jour, c(1, 10, 20, 31),
labels = FALSE, include.lowest = TRUE)
index <- split(seq_along(daty), paste0(yymm, jour))
nbday_fun <- nb_day_of_dekad
}
if(tstep == "monthly"){
index <- split(seq_along(daty), yymm)
nbday_fun <- nb_day_of_month
}
odaty <- names(index)
pmon <- lapply(index, function(x) as.numeric(unique(format(daty[x], "%m"))))
nbd0 <- sapply(seq_along(pmon), function(j) nbday_fun(names(pmon[j])))
nobs <- sapply(index, length)
avail_frac <- nobs/nbd0
ina <- avail_frac >= minFrac[[var_hgt[1]]]
fun_agg <- switch(pars,
"Tot" = colSums,
"Ave" = colMeans,
"Min" = matrixStats::colMins,
"Max" = matrixStats::colMaxs)
xout <- don[1, , drop = FALSE]
xout[] <- NA
xout <- xout[rep(1, length(index)), , drop = FALSE]
tmp <- lapply(seq_along(index[ina]), function(j){
ix <- index[ina][[j]]
x <- don[ix, , drop = FALSE]
nna <- colSums(!is.na(x))/nbd0[j] >= minFrac[[var_hgt[1]]]
x <- fun_agg(x, na.rm = TRUE)
x[!nna] <- NA
x
})
xout[ina, ] <- do.call(rbind, tmp)
don <- xout
}
ina <- colSums(!is.na(don)) == 0
don <- don[, !ina, drop = FALSE]
out$net_aws <- dimnames(don)[[2]]
dimnames(don) <- NULL
out$date <- odaty
out$data <- don
out$status <- "ok"
return(out)
}
##########
wind2hourly <- function(dates, ws, wd){
wu <- -ws * sin(pi * wd / 180)
wv <- -ws * cos(pi * wd / 180)
index <- split(seq_along(dates), substr(dates, 1, 10))
uvhr <- lapply(index, function(i){
u <- mean(wu[i], na.rm = TRUE)
v <- mean(wv[i], na.rm = TRUE)
if(is.nan(u)) u <- NA
if(is.nan(v)) v <- NA
c(u, v)
})
uvhr <- do.call(rbind, uvhr)
ff <- sqrt(uvhr[, 1]^2 + uvhr[, 2]^2)
dd <- (atan2(uvhr[, 1], uvhr[, 2]) * 180/pi) + ifelse(ff < 1e-14, 0, 180)
ff <- round(ff, 2)
dd <- round(dd, 2)
wsd <- list(date = names(index), ws = as.numeric(ff), wd = as.numeric(dd))
return(wsd)
}
getWindData <- function(net_aws, height, tstep, start, end, aws_dir)
{
tz <- Sys.getenv("TZ")
origin <- "1970-01-01"
parsFile <- file.path(aws_dir, "AWS_DATA", "JSON", "aws_parameters.json")
awsPars <- jsonlite::read_json(parsFile)
net_aws <- strsplit(net_aws, "_")[[1]]
net_code <- sapply(awsPars, "[[", "network_code")
aws_id <- sapply(awsPars, "[[", "id")
istn <- which(net_code == net_aws[1] & aws_id == net_aws[2])
awsPars <- awsPars[[istn]][c('network_code', 'network', 'id', 'name')]
frmt <- if(tstep == "hourly") "%Y-%m-%d-%H" else "%Y-%m-%d-%H-%M"
start <- strptime(start, frmt, tz = tz)
end <- strptime(end, frmt, tz = tz)
start <- as.numeric(start)
end <- as.numeric(end)
######
adt_args <- readRDS(file.path(aws_dir, "AWS_DATA", "AUTH", "adt.con"))
conn <- try(connect.database(adt_args$connection,
RMySQL::MySQL()), silent = TRUE)
if(inherits(conn, "try-error"))
return(list(status = 'failed-connection'))
query <- paste0("SELECT obs_time, var_code, value, limit_check FROM aws_data WHERE (",
"network=", net_aws[1], " AND id='", net_aws[2], "' AND height=", height,
" AND var_code IN (9, 10) AND stat_code=1) AND (",
"obs_time >= ", start, " AND obs_time <= ", end, ")")
qres <- DBI::dbGetQuery(conn, query)
DBI::dbDisconnect(conn)
if(nrow(qres) == 0) return(list(status = 'no-data'))
qres[!is.na(qres$limit_check), 'value'] <- NA
qres <- reshape2::acast(qres, obs_time~var_code, mean, value.var = 'value')
daty <- as.integer(dimnames(qres)[[1]])
daty <- as.POSIXct(daty, origin = origin, tz = tz)
ws <- as.numeric(qres[, '10'])
wd <- as.numeric(qres[, '9'])
if(tstep == "hourly"){
wind <- wind2hourly(format(daty, '%Y%m%d%H%M'), ws, wd)
ws <- wind$ws
wd <- wind$wd
dts <- strptime(wind$date, "%Y%m%d%H", tz = tz)
tstep.seq <- 'hour'
tstep.out <- 1
}else{
dts <- sort(daty)
tstep.seq <- '15 min'
tstep.out <- 15
}
daty <- seq(min(dts), max(dts), tstep.seq)
nb_obs <- length(daty)
ddif <- diff(dts)
idf <- ddif > tstep.out
if(any(idf)){
idt <- which(idf)
addmul <- if(tstep == "hourly") 3600 else tstep.out * 60
miss.daty <- dts[idt] + addmul
miss.daty <- format(miss.daty, "%Y%m%d%H%M%S", tz = tz)
daty1 <- rep(NA, length(dts) + length(miss.daty))
ws1 <- rep(NA, length(daty1))
wd1 <- rep(NA, length(daty1))
daty1[idt + seq(length(miss.daty))] <- miss.daty
ix <- is.na(daty1)
daty1[ix] <- format(dts, "%Y%m%d%H%M%S", tz = tz)
ws1[ix] <- ws
wd1[ix] <- wd
ws <- ws1
wd <- wd1
dts <- strptime(daty1, "%Y%m%d%H%M%S", tz = tz)
}
avail <- round(100 * sum(!is.na(ws)) / nb_obs, 1)
wind <- list(date = dts, ws = ws, wd = wd)
out <- list(avail = avail, status = 'ok')
return(c(awsPars, wind, out))
}
|
/R/app_get_awsdata.R
|
no_license
|
rijaf-iri/mtoadtNMA
|
R
| false
| false
| 18,448
|
r
|
getAggrAWSData_allVars <- function(tstep, net_aws, start, end, aws_dir){
tz <- Sys.getenv("TZ")
origin <- "1970-01-01"
net_aws <- strsplit(net_aws, "_")[[1]]
out <- data.frame(Date = NA, status = "no.data")
######
adt_args <- readRDS(file.path(aws_dir, "AWS_DATA", "AUTH", "adt.con"))
conn <- try(connect.database(adt_args$connection,
RMySQL::MySQL()), silent = TRUE)
if(inherits(conn, "try-error")){
out$status <- 'unable to connect to database'
return(out)
}
######
datyRg <- getAggrDateRange(tstep, start, end, tz)
start <- as.numeric(datyRg[1])
end <- as.numeric(datyRg[2])
if(tstep == 'hourly'){
data_table <- 'aws_hourly'
pars_file <- 'AWS_dataHourVarObj.json'
qc_name <- 'spatial_check'
}else{
data_table <- 'aws_daily'
pars_file <- 'AWS_dataDayVarObj.json'
qc_name <- 'qc_output'
}
######
query <- paste0("SELECT * FROM ", data_table, " WHERE (",
"network=", net_aws[1], " AND id='", net_aws[2], "') AND (",
"obs_time >= ", start, " AND obs_time <= ", end, ")")
qres <- DBI::dbGetQuery(conn, query)
DBI::dbDisconnect(conn)
if(nrow(qres) == 0) return(out)
qres[!is.na(qres[, qc_name]), 'value'] <- NA
qres_var_hgt <- paste0(qres$var_code, "_", qres$height)
######
parsFile <- file.path(aws_dir, "AWS_DATA", "JSON", pars_file)
pars_info <- jsonlite::read_json(parsFile)
pars_info <- pars_info$variables
info_var_hgt <- sapply(pars_info, function(v) paste0(v$var_code, '_', v$height))
qres <- qres[qres_var_hgt %in% info_var_hgt, , drop = FALSE]
if(nrow(qres) == 0) return(out)
######
qres$all_vars <- paste0(qres$var_code, "_", qres$height, "_", qres$stat_code)
don <- reshape2::acast(qres, obs_time~all_vars, mean, value.var = 'value')
don[is.nan(don)] <- NA
d_row <- as.integer(dimnames(don)[[1]])
d_col <- strsplit(dimnames(don)[[2]], "_")
stat_name <- c('Ave', 'Min', 'Max', 'Tot')
col_name <- sapply(d_col, function(x){
var_hgt <- paste0(x[1], '_', x[2])
ix <- which(info_var_hgt == var_hgt)
vvr <- pars_info[[ix]]
vvr <- paste0(vvr$var_name, "_", vvr$height, "m")
vvr <- gsub(" ", "-", vvr)
vst <- stat_name[as.integer(x[3])]
paste0(vvr, "_", vst)
})
######
if(tstep == "hourly"){
daty <- as.POSIXct(d_row, origin = origin, tz = tz)
odaty <- format(daty, "%Y%m%d%H")
}else{
daty <- as.Date(d_row, origin = origin)
odaty <- format(daty, "%Y%m%d")
}
###########
if(tstep %in% c("pentad", "dekadal", "monthly")){
mfracFile <- paste0("Min_Frac_", tools::toTitleCase(tstep), ".json")
mfracFile <- file.path(aws_dir, "AWS_DATA", "JSON", mfracFile)
minFrac <- jsonlite::read_json(mfracFile)
yymm <- format(daty, "%Y%m")
if(tstep == "pentad"){
jour <- as.numeric(format(daty, "%d"))
jour <- cut(jour, c(1, 5, 10, 15, 20, 25, 31),
labels = FALSE, include.lowest = TRUE)
index <- split(seq_along(daty), paste0(yymm, jour))
nbday_fun <- nb_day_of_pentad
}
if(tstep == "dekadal"){
jour <- as.numeric(format(daty, "%d"))
jour <- cut(jour, c(1, 10, 20, 31),
labels = FALSE, include.lowest = TRUE)
index <- split(seq_along(daty), paste0(yymm, jour))
nbday_fun <- nb_day_of_dekad
}
if(tstep == "monthly"){
index <- split(seq_along(daty), yymm)
nbday_fun <- nb_day_of_month
}
odaty <- names(index)
pmon <- lapply(index, function(x) as.numeric(unique(format(daty[x], "%m"))))
nbd0 <- sapply(seq_along(pmon), function(j) nbday_fun(names(pmon[j])))
nobs <- sapply(index, length)
avail_frac <- nobs/nbd0
tmp <- lapply(seq_along(d_col), function(j){
ina <- avail_frac >= minFrac[[d_col[[j]][1]]]
fun_agg <- switch(d_col[[j]][3],
"4" = sum, "1" = mean,
"2" = min, "3" = max)
xout <- rep(NA, length(index))
xout[ina] <- sapply(index[ina], function(ix){
x <- don[ix, j]
if(all(is.na(x))) return(NA)
fun_agg(x, na.rm = TRUE)
})
xout
})
don <- do.call(cbind, tmp)
}
ina <- rowSums(!is.na(don)) > 0
don <- don[ina, , drop = FALSE]
odaty <- odaty[ina]
out <- data.frame(odaty, don)
names(out) <- c('Date', col_name)
return(out)
}
##########
getAggrAWSData_oneVar <- function(tstep, net_aws, var_hgt, start, end, aws_dir){
tz <- Sys.getenv("TZ")
origin <- "1970-01-01"
out <- list(date = NULL, data = NULL, status = "no-data")
######
adt_args <- readRDS(file.path(aws_dir, "AWS_DATA", "AUTH", "adt.con"))
conn <- try(connect.database(adt_args$connection,
RMySQL::MySQL()), silent = TRUE)
if(inherits(conn, "try-error")){
out$status <- 'failed-connection'
return(out)
}
######
datyRg <- getAggrDateRange(tstep, start, end, tz)
start <- as.numeric(datyRg[1])
end <- as.numeric(datyRg[2])
if(tstep == 'hourly'){
data_table <- 'aws_hourly'
qc_name <- 'spatial_check'
}else{
data_table <- 'aws_daily'
qc_name <- 'qc_output'
}
######
net_aws <- strsplit(net_aws, "_")[[1]]
var_hgt <- strsplit(var_hgt, "_")[[1]]
######
if(var_hgt[1] == "5"){
query <- paste0("SELECT obs_time, stat_code, value, ", qc_name, " FROM ", data_table,
" WHERE (", "network=", net_aws[1], " AND id='", net_aws[2],
"' AND height=", var_hgt[2], " AND var_code=", var_hgt[1],
") AND (obs_time >= ", start, " AND obs_time <= ", end, ")")
}else{
query <- paste0("SELECT obs_time, stat_code, value, ", qc_name, " FROM ", data_table,
" WHERE (", "network=", net_aws[1], " AND id='", net_aws[2],
"' AND height=", var_hgt[2], " AND var_code=", var_hgt[1],
" AND stat_code IN (1, 2, 3)) AND (",
"obs_time >= ", start, " AND obs_time <= ", end, ")")
}
qres <- DBI::dbGetQuery(conn, query)
DBI::dbDisconnect(conn)
if(nrow(qres) == 0) return(out)
qres[!is.na(qres[, qc_name]), 'value'] <- NA
don <- reshape2::acast(qres, obs_time~stat_code, mean, value.var = 'value')
d_row <- as.integer(dimnames(don)[[1]])
d_col <- dimnames(don)[[2]]
if(tstep == "hourly"){
daty <- as.POSIXct(d_row, origin = origin, tz = tz)
odaty <- format(daty, "%Y%m%d%H")
}else{
daty <- as.Date(d_row, origin = origin)
odaty <- format(daty, "%Y%m%d")
}
###########
if(tstep %in% c("pentad", "dekadal", "monthly")){
mfracFile <- paste0("Min_Frac_", tools::toTitleCase(tstep), ".json")
mfracFile <- file.path(aws_dir, "AWS_DATA", "JSON", mfracFile)
minFrac <- jsonlite::read_json(mfracFile)
yymm <- format(daty, "%Y%m")
if(tstep == "pentad"){
jour <- as.numeric(format(daty, "%d"))
jour <- cut(jour, c(1, 5, 10, 15, 20, 25, 31),
labels = FALSE, include.lowest = TRUE)
index <- split(seq_along(daty), paste0(yymm, jour))
nbday_fun <- nb_day_of_pentad
}
if(tstep == "dekadal"){
jour <- as.numeric(format(daty, "%d"))
jour <- cut(jour, c(1, 10, 20, 31),
labels = FALSE, include.lowest = TRUE)
index <- split(seq_along(daty), paste0(yymm, jour))
nbday_fun <- nb_day_of_dekad
}
if(tstep == "monthly"){
index <- split(seq_along(daty), yymm)
nbday_fun <- nb_day_of_month
}
odaty <- names(index)
pmon <- lapply(index, function(x) as.numeric(unique(format(daty[x], "%m"))))
nbd0 <- sapply(seq_along(pmon), function(j) nbday_fun(names(pmon[j])))
nobs <- sapply(index, length)
avail_frac <- nobs/nbd0
ina <- avail_frac >= minFrac[[var_hgt[1]]]
xout <- don[1, , drop = FALSE]
xout[] <- NA
xout <- xout[rep(1, length(index)), , drop = FALSE]
tmp <- lapply(index[ina], function(ix){
x <- don[ix, , drop = FALSE]
agg <- lapply(d_col, function(n){
fun <- switch(n, "4" = sum, "1" = mean,
"2" = min, "3" = max)
if(all(is.na(x[, n]))) return(NA)
fun(x[, n], na.rm = TRUE)
})
agg <- do.call(cbind, agg)
return(agg)
})
xout[ina, ] <- do.call(rbind, tmp)
don <- xout
}
don <- data.frame(don)
names(don) <- d_col
rownames(don) <- NULL
out <- list(date = odaty, data = don, status = "ok")
return(out)
}
##########
getAggrAWSData_awsSel <- function(tstep, net_aws, var_hgt, pars,
start, end, aws_dir)
{
tz <- Sys.getenv("TZ")
origin <- "1970-01-01"
parsFile <- file.path(aws_dir, "AWS_DATA", "JSON", "aws_parameters.json")
awsPars <- jsonlite::read_json(parsFile)
net_aws <- strsplit(net_aws, "_")
var_hgt <- strsplit(var_hgt, "_")[[1]]
net_code <- sapply(awsPars, "[[", "network_code")
aws_id <- sapply(awsPars, "[[", "id")
istn <- lapply(net_aws, function(a) which(net_code == a[1] & aws_id == a[2]))
nz <- sapply(istn, length) > 0
awsPars <- awsPars[unlist(istn[nz])]
sel_net <- sapply(awsPars, '[[', 'network_code')
sel_id <- sapply(awsPars, '[[', 'id')
sel_name <- sapply(awsPars, function(a) paste0(a$name, " [ID = " , a$id, " ; ", a$network, "]"))
sel_aws <- paste0(sel_net, '_', sel_id)
var_name <- awsPars[[1]]$PARS_Info[[var_hgt[1]]][[1]]$name
stat_code <- (1:4)[c('Ave', 'Min', 'Max', 'Tot') %in% pars]
par_name <- switch(pars, "Ave" = "Average", "Tot" = "Total",
"Min" = "Minimum", "Max" = "Maximum")
out <- list(var_name = var_name, stat_name = par_name,
var_code = var_hgt[1], height = var_hgt[2],
stat_code = stat_code, net_aws = NULL,
date = NULL, data = NULL, status = "no-data")
######
adt_args <- readRDS(file.path(aws_dir, "AWS_DATA", "AUTH", "adt.con"))
conn <- try(connect.database(adt_args$connection,
RMySQL::MySQL()), silent = TRUE)
if(inherits(conn, "try-error")){
out$status <- 'failed-connection'
return(out)
}
######
datyRg <- getAggrDateRange(tstep, start, end, tz)
start <- as.numeric(datyRg[1])
end <- as.numeric(datyRg[2])
if(tstep == 'hourly'){
data_table <- 'aws_hourly'
qc_name <- 'spatial_check'
}else{
data_table <- 'aws_daily'
qc_name <- 'qc_output'
}
######
all_aws <- paste0("(", sel_net, ", ", "'", sel_id, "'", ")")
all_aws <- paste0(all_aws, collapse = ", ")
query <- paste0("SELECT * FROM ", data_table, " WHERE (",
"(network, id) IN (", all_aws, ") AND height=", var_hgt[2],
" AND var_code=", var_hgt[1], " AND stat_code=", stat_code,
") AND (", "obs_time >= ", start, " AND obs_time <= ", end, ")")
qres <- DBI::dbGetQuery(conn, query)
DBI::dbDisconnect(conn)
if(nrow(qres) == 0){
out$status <- "no-data"
return(out)
}
qres[!is.na(qres[, qc_name]), 'value'] <- NA
qres$aws <- paste0(qres$network, "_", qres$id)
don <- reshape2::acast(qres, obs_time~aws, mean, value.var = 'value')
don[is.nan(don)] <- NA
isel <- match(sel_aws, dimnames(don)[[2]])
don <- don[, isel, drop = FALSE]
dimnames(don)[[2]] <- sel_aws
d_row <- as.integer(dimnames(don)[[1]])
if(tstep == "hourly"){
daty <- as.POSIXct(d_row, origin = origin, tz = tz)
odaty <- format(daty, "%Y%m%d%H")
}else{
daty <- as.Date(d_row, origin = origin)
odaty <- format(daty, "%Y%m%d")
}
###########
if(tstep %in% c("pentad", "dekadal", "monthly")){
mfracFile <- paste0("Min_Frac_", tools::toTitleCase(tstep), ".json")
mfracFile <- file.path(aws_dir, "AWS_DATA", "JSON", mfracFile)
minFrac <- jsonlite::read_json(mfracFile)
yymm <- format(daty, "%Y%m")
if(tstep == "pentad"){
jour <- as.numeric(format(daty, "%d"))
jour <- cut(jour, c(1, 5, 10, 15, 20, 25, 31),
labels = FALSE, include.lowest = TRUE)
index <- split(seq_along(daty), paste0(yymm, jour))
nbday_fun <- nb_day_of_pentad
}
if(tstep == "dekadal"){
jour <- as.numeric(format(daty, "%d"))
jour <- cut(jour, c(1, 10, 20, 31),
labels = FALSE, include.lowest = TRUE)
index <- split(seq_along(daty), paste0(yymm, jour))
nbday_fun <- nb_day_of_dekad
}
if(tstep == "monthly"){
index <- split(seq_along(daty), yymm)
nbday_fun <- nb_day_of_month
}
odaty <- names(index)
pmon <- lapply(index, function(x) as.numeric(unique(format(daty[x], "%m"))))
nbd0 <- sapply(seq_along(pmon), function(j) nbday_fun(names(pmon[j])))
nobs <- sapply(index, length)
avail_frac <- nobs/nbd0
ina <- avail_frac >= minFrac[[var_hgt[1]]]
fun_agg <- switch(pars,
"Tot" = colSums,
"Ave" = colMeans,
"Min" = matrixStats::colMins,
"Max" = matrixStats::colMaxs)
xout <- don[1, , drop = FALSE]
xout[] <- NA
xout <- xout[rep(1, length(index)), , drop = FALSE]
tmp <- lapply(seq_along(index[ina]), function(j){
ix <- index[ina][[j]]
x <- don[ix, , drop = FALSE]
nna <- colSums(!is.na(x))/nbd0[j] >= minFrac[[var_hgt[1]]]
x <- fun_agg(x, na.rm = TRUE)
x[!nna] <- NA
x
})
xout[ina, ] <- do.call(rbind, tmp)
don <- xout
}
ina <- colSums(!is.na(don)) == 0
don <- don[, !ina, drop = FALSE]
out$net_aws <- dimnames(don)[[2]]
dimnames(don) <- NULL
out$date <- odaty
out$data <- don
out$status <- "ok"
return(out)
}
##########
wind2hourly <- function(dates, ws, wd){
wu <- -ws * sin(pi * wd / 180)
wv <- -ws * cos(pi * wd / 180)
index <- split(seq_along(dates), substr(dates, 1, 10))
uvhr <- lapply(index, function(i){
u <- mean(wu[i], na.rm = TRUE)
v <- mean(wv[i], na.rm = TRUE)
if(is.nan(u)) u <- NA
if(is.nan(v)) v <- NA
c(u, v)
})
uvhr <- do.call(rbind, uvhr)
ff <- sqrt(uvhr[, 1]^2 + uvhr[, 2]^2)
dd <- (atan2(uvhr[, 1], uvhr[, 2]) * 180/pi) + ifelse(ff < 1e-14, 0, 180)
ff <- round(ff, 2)
dd <- round(dd, 2)
wsd <- list(date = names(index), ws = as.numeric(ff), wd = as.numeric(dd))
return(wsd)
}
getWindData <- function(net_aws, height, tstep, start, end, aws_dir)
{
tz <- Sys.getenv("TZ")
origin <- "1970-01-01"
parsFile <- file.path(aws_dir, "AWS_DATA", "JSON", "aws_parameters.json")
awsPars <- jsonlite::read_json(parsFile)
net_aws <- strsplit(net_aws, "_")[[1]]
net_code <- sapply(awsPars, "[[", "network_code")
aws_id <- sapply(awsPars, "[[", "id")
istn <- which(net_code == net_aws[1] & aws_id == net_aws[2])
awsPars <- awsPars[[istn]][c('network_code', 'network', 'id', 'name')]
frmt <- if(tstep == "hourly") "%Y-%m-%d-%H" else "%Y-%m-%d-%H-%M"
start <- strptime(start, frmt, tz = tz)
end <- strptime(end, frmt, tz = tz)
start <- as.numeric(start)
end <- as.numeric(end)
######
adt_args <- readRDS(file.path(aws_dir, "AWS_DATA", "AUTH", "adt.con"))
conn <- try(connect.database(adt_args$connection,
RMySQL::MySQL()), silent = TRUE)
if(inherits(conn, "try-error"))
return(list(status = 'failed-connection'))
query <- paste0("SELECT obs_time, var_code, value, limit_check FROM aws_data WHERE (",
"network=", net_aws[1], " AND id='", net_aws[2], "' AND height=", height,
" AND var_code IN (9, 10) AND stat_code=1) AND (",
"obs_time >= ", start, " AND obs_time <= ", end, ")")
qres <- DBI::dbGetQuery(conn, query)
DBI::dbDisconnect(conn)
if(nrow(qres) == 0) return(list(status = 'no-data'))
qres[!is.na(qres$limit_check), 'value'] <- NA
qres <- reshape2::acast(qres, obs_time~var_code, mean, value.var = 'value')
daty <- as.integer(dimnames(qres)[[1]])
daty <- as.POSIXct(daty, origin = origin, tz = tz)
ws <- as.numeric(qres[, '10'])
wd <- as.numeric(qres[, '9'])
if(tstep == "hourly"){
wind <- wind2hourly(format(daty, '%Y%m%d%H%M'), ws, wd)
ws <- wind$ws
wd <- wind$wd
dts <- strptime(wind$date, "%Y%m%d%H", tz = tz)
tstep.seq <- 'hour'
tstep.out <- 1
}else{
dts <- sort(daty)
tstep.seq <- '15 min'
tstep.out <- 15
}
daty <- seq(min(dts), max(dts), tstep.seq)
nb_obs <- length(daty)
ddif <- diff(dts)
idf <- ddif > tstep.out
if(any(idf)){
idt <- which(idf)
addmul <- if(tstep == "hourly") 3600 else tstep.out * 60
miss.daty <- dts[idt] + addmul
miss.daty <- format(miss.daty, "%Y%m%d%H%M%S", tz = tz)
daty1 <- rep(NA, length(dts) + length(miss.daty))
ws1 <- rep(NA, length(daty1))
wd1 <- rep(NA, length(daty1))
daty1[idt + seq(length(miss.daty))] <- miss.daty
ix <- is.na(daty1)
daty1[ix] <- format(dts, "%Y%m%d%H%M%S", tz = tz)
ws1[ix] <- ws
wd1[ix] <- wd
ws <- ws1
wd <- wd1
dts <- strptime(daty1, "%Y%m%d%H%M%S", tz = tz)
}
avail <- round(100 * sum(!is.na(ws)) / nb_obs, 1)
wind <- list(date = dts, ws = ws, wd = wd)
out <- list(avail = avail, status = 'ok')
return(c(awsPars, wind, out))
}
|
# read file
elcons <- read.csv("C:/Users/srinivas/Documents/R/exdata_data_household_power_consumption/household_power_consumption.txt",header = TRUE, sep = ";",na.strings = "?",stringsAsFactors = FALSE)
# subset the rows within 2 days 2/1/2007 or 2/2/2007
elconsub <- elcons[elcons$Date %in% c('1/2/2007', '2/2/2007'),]
# paste date and time and add a new column for date time
elconsdf <- transform(elconsub, Date_Time = paste(elconsub$Date,elconsub$Time, sep =" "))
# Convert Date to time format
elconsdf1 <- transform(elconsdf, Date_Time = as.POSIXct(strptime(elconsdf$Date_Time,format = "%d/%m/%Y %H:%M:%S", tz= "")))
#PLOT2:
plot2 <- png("plot2.png", width=480, height = 480)
#plot2 <- plot(elconsub2$Date,elconsub2$Global_active_power,ylab= "Global Active Power (kilowatts)", type= "l",xlab = "")
plot2 <- plot(elconsdf1$Date_Time,elconsdf1$Global_active_power,ylab= "Global Active Power (kilowatts)", type= "l",xlab = "")
dev.off()
|
/plot2.R
|
no_license
|
srini2014/ExData_Plotting1
|
R
| false
| false
| 946
|
r
|
# read file
elcons <- read.csv("C:/Users/srinivas/Documents/R/exdata_data_household_power_consumption/household_power_consumption.txt",header = TRUE, sep = ";",na.strings = "?",stringsAsFactors = FALSE)
# subset the rows within 2 days 2/1/2007 or 2/2/2007
elconsub <- elcons[elcons$Date %in% c('1/2/2007', '2/2/2007'),]
# paste date and time and add a new column for date time
elconsdf <- transform(elconsub, Date_Time = paste(elconsub$Date,elconsub$Time, sep =" "))
# Convert Date to time format
elconsdf1 <- transform(elconsdf, Date_Time = as.POSIXct(strptime(elconsdf$Date_Time,format = "%d/%m/%Y %H:%M:%S", tz= "")))
#PLOT2:
plot2 <- png("plot2.png", width=480, height = 480)
#plot2 <- plot(elconsub2$Date,elconsub2$Global_active_power,ylab= "Global Active Power (kilowatts)", type= "l",xlab = "")
plot2 <- plot(elconsdf1$Date_Time,elconsdf1$Global_active_power,ylab= "Global Active Power (kilowatts)", type= "l",xlab = "")
dev.off()
|
\name{adworld}
\docType{data}
\alias{adworld}
\encoding{latin1}
\title{Geographical coordinates}
\description{Latitude and longitude of all administrative areas.}
\usage{data(adworld)}
\format{A matrix of many rows and 3 columns (Latitude, Longitude and name of the administrative area)}
\source{Latitude and longitude coordinates of the administrative areas were obtained from the web page \url{https://www.openstreetmap.org}.}
\keyword{adworld}
|
/man/adworld.Rd
|
no_license
|
cran/KnowBR
|
R
| false
| false
| 457
|
rd
|
\name{adworld}
\docType{data}
\alias{adworld}
\encoding{latin1}
\title{Geographical coordinates}
\description{Latitude and longitude of all administrative areas.}
\usage{data(adworld)}
\format{A matrix of many rows and 3 columns (Latitude, Longitude and name of the administrative area)}
\source{Latitude and longitude coordinates of the administrative areas were obtained from the web page \url{https://www.openstreetmap.org}.}
\keyword{adworld}
|
Drawdowns <-
function (R, geometric = TRUE, ...)
{ # @author Peter Carl
# DESCRIPTION:
# Calculate the drawdown levels in a timeseries
# FUNCTION:
x = checkData(R)
# Get dimensions and labels
columns = ncol(x)
columnnames = colnames(x)
colDrawdown <- function(x, geometric) {
if(geometric)
Return.cumulative = cumprod(1+x)
else
Return.cumulative = 1+cumsum(x)
maxCumulativeReturn = cummax(c(1,Return.cumulative))[-1]
column.drawdown = Return.cumulative/maxCumulativeReturn - 1
}
for(column in 1:columns) {
column.drawdown <- na.skip(x[,column],FUN=colDrawdown, geometric = geometric)
if(column == 1)
drawdown = column.drawdown
else
drawdown = merge(drawdown,column.drawdown)
}
colnames(drawdown) = columnnames
drawdown = reclass(drawdown, x)
return(drawdown)
}
###############################################################################
# R (http://r-project.org/) Econometrics for Performance and Risk Analysis
#
# Copyright (c) 2004-2014 Peter Carl and Brian G. Peterson
#
# This R package is distributed under the terms of the GNU Public License (GPL)
# for full details see the file COPYING
#
# $Id: Drawdowns.R 3301 2014-01-18 15:26:12Z braverock $
#
###############################################################################
|
/PerformanceAnalytics/R/Drawdowns.R
|
no_license
|
ingted/R-Examples
|
R
| false
| false
| 1,402
|
r
|
Drawdowns <-
function (R, geometric = TRUE, ...)
{ # @author Peter Carl
# DESCRIPTION:
# Calculate the drawdown levels in a timeseries
# FUNCTION:
x = checkData(R)
# Get dimensions and labels
columns = ncol(x)
columnnames = colnames(x)
colDrawdown <- function(x, geometric) {
if(geometric)
Return.cumulative = cumprod(1+x)
else
Return.cumulative = 1+cumsum(x)
maxCumulativeReturn = cummax(c(1,Return.cumulative))[-1]
column.drawdown = Return.cumulative/maxCumulativeReturn - 1
}
for(column in 1:columns) {
column.drawdown <- na.skip(x[,column],FUN=colDrawdown, geometric = geometric)
if(column == 1)
drawdown = column.drawdown
else
drawdown = merge(drawdown,column.drawdown)
}
colnames(drawdown) = columnnames
drawdown = reclass(drawdown, x)
return(drawdown)
}
###############################################################################
# R (http://r-project.org/) Econometrics for Performance and Risk Analysis
#
# Copyright (c) 2004-2014 Peter Carl and Brian G. Peterson
#
# This R package is distributed under the terms of the GNU Public License (GPL)
# for full details see the file COPYING
#
# $Id: Drawdowns.R 3301 2014-01-18 15:26:12Z braverock $
#
###############################################################################
|
# Environment preparation
library(caret)
library(parallel)
library(doParallel)
library(randomForest)
library(rpart)
library(rpart.plot)
library(corrplot)
# Obtain the data
urltrain <- "http://d396qusza40orc.cloudfront.net/predmachlearn/pml-training.csv"
urltest <- "http://d396qusza40orc.cloudfront.net/predmachlearn/pml-testing.csv"
pmltraining <- read.csv(url(urltrain))
pmltesting <- read.csv(url(urltest))
# Data cleaning: use only numeric columns with no empty/NA values
pmltraining <- pmltraining[,-(1:6)]
validcols <- which(colSums(is.na(pmltraining) | pmltraining == "")==0)
pmltraining <- pmltraining[,validcols]
pmltesting <- pmltesting[,validcols]
# Create data partition for training and testing
inTrain <- createDataPartition(pmltraining$classe, p=0.7, list=FALSE)
training <- pmltraining[inTrain, ]
testing <- pmltraining[-inTrain, ]
# Random forest model
fitrf <- randomForest(classe ~ ., data=training, keep.forest=TRUE,proximity=TRUE,ntree=200)
# randomForest(formula = classe ~ ., data = training, method = "rf", keep.forest = TRUE, proximity = TRUE, ntree = 200)
# Type of random forest: classification
# Number of trees: 200
# No. of variables tried at each split: 7
#
# OOB estimate of error rate: 0.31%
# Confusion matrix:
# A B C D E class.error
# A 3905 0 0 0 1 0.0002560164
# B 8 2647 3 0 0 0.0041384500
# C 0 9 2386 1 0 0.0041736227
# D 0 0 17 2235 0 0.0075488455
# E 0 0 0 3 2522 0.0011881188
# Predicitons with random forest
predrf <- predict(fitrf,newdata=testing)
cmrf <- confusionMatrix(predrf, testing$classe)
cmrf$overall[1]
# Accuracy 0.9972812
# Decission Tree model
fitrp <- rpart(classe ~ ., data=training, method="class", maxsurrogate=0)
rpart.plot(fitrp,varlen = -1,cex=0.5)
# Predictions with decission tree
predrp <- predict(fitrp, newdata=testing, type="class")
cmrp <- confusionMatrix(predrp, testing$classe)
cmrp$table
# Reference
# Prediction A B C D E
# A 1522 276 45 115 99
# B 36 596 34 22 87
# C 10 60 829 139 79
# D 86 144 57 637 118
# E 20 63 61 51 699
cmrp$overall[1]
# Accuracy 0.7277825
# Generalized Boosted Model
controlGBM <- trainControl(method = "repeatedcv", number = 5, repeats = 1)
fitgbm <- train(classe ~ ., data=training, method = "gbm",trControl = controlGBM, verbose = FALSE)
# Predicition with GBM
predgbm <- predict(fitgbm, newdata=testing)
cmgbm <- confusionMatrix(predgbm, testing$classe)
cmgbm$table
# Reference
# Prediction A B C D E
# A 1672 14 0 1 0
# B 2 1110 4 2 4
# C 0 13 1018 12 2
# D 0 2 2 949 9
# E 0 0 2 0 1067
cmgbm$overall[1]
# Accuracy 0.9882753
# Testing the best model (RF) with the testing dataset
predict(fitrf,newdata = pmltesting)
# 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
# B A B A A E D B A A B C B A E E A B B B
|
/PMLearningAssignment.r
|
no_license
|
anan4/PMLearning
|
R
| false
| false
| 2,980
|
r
|
# Environment preparation
library(caret)
library(parallel)
library(doParallel)
library(randomForest)
library(rpart)
library(rpart.plot)
library(corrplot)
# Obtain the data
urltrain <- "http://d396qusza40orc.cloudfront.net/predmachlearn/pml-training.csv"
urltest <- "http://d396qusza40orc.cloudfront.net/predmachlearn/pml-testing.csv"
pmltraining <- read.csv(url(urltrain))
pmltesting <- read.csv(url(urltest))
# Data cleaning: use only numeric columns with no empty/NA values
pmltraining <- pmltraining[,-(1:6)]
validcols <- which(colSums(is.na(pmltraining) | pmltraining == "")==0)
pmltraining <- pmltraining[,validcols]
pmltesting <- pmltesting[,validcols]
# Create data partition for training and testing
inTrain <- createDataPartition(pmltraining$classe, p=0.7, list=FALSE)
training <- pmltraining[inTrain, ]
testing <- pmltraining[-inTrain, ]
# Random forest model
fitrf <- randomForest(classe ~ ., data=training, keep.forest=TRUE,proximity=TRUE,ntree=200)
# randomForest(formula = classe ~ ., data = training, method = "rf", keep.forest = TRUE, proximity = TRUE, ntree = 200)
# Type of random forest: classification
# Number of trees: 200
# No. of variables tried at each split: 7
#
# OOB estimate of error rate: 0.31%
# Confusion matrix:
# A B C D E class.error
# A 3905 0 0 0 1 0.0002560164
# B 8 2647 3 0 0 0.0041384500
# C 0 9 2386 1 0 0.0041736227
# D 0 0 17 2235 0 0.0075488455
# E 0 0 0 3 2522 0.0011881188
# Predicitons with random forest
predrf <- predict(fitrf,newdata=testing)
cmrf <- confusionMatrix(predrf, testing$classe)
cmrf$overall[1]
# Accuracy 0.9972812
# Decission Tree model
fitrp <- rpart(classe ~ ., data=training, method="class", maxsurrogate=0)
rpart.plot(fitrp,varlen = -1,cex=0.5)
# Predictions with decission tree
predrp <- predict(fitrp, newdata=testing, type="class")
cmrp <- confusionMatrix(predrp, testing$classe)
cmrp$table
# Reference
# Prediction A B C D E
# A 1522 276 45 115 99
# B 36 596 34 22 87
# C 10 60 829 139 79
# D 86 144 57 637 118
# E 20 63 61 51 699
cmrp$overall[1]
# Accuracy 0.7277825
# Generalized Boosted Model
controlGBM <- trainControl(method = "repeatedcv", number = 5, repeats = 1)
fitgbm <- train(classe ~ ., data=training, method = "gbm",trControl = controlGBM, verbose = FALSE)
# Predicition with GBM
predgbm <- predict(fitgbm, newdata=testing)
cmgbm <- confusionMatrix(predgbm, testing$classe)
cmgbm$table
# Reference
# Prediction A B C D E
# A 1672 14 0 1 0
# B 2 1110 4 2 4
# C 0 13 1018 12 2
# D 0 2 2 949 9
# E 0 0 2 0 1067
cmgbm$overall[1]
# Accuracy 0.9882753
# Testing the best model (RF) with the testing dataset
predict(fitrf,newdata = pmltesting)
# 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
# B A B A A E D B A A B C B A E E A B B B
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/clustering_quality.R
\name{k_homogeneity}
\alias{k_homogeneity}
\title{Measures a degree of mutual dissimilarity between all objects in a cluster}
\usage{
k_homogeneity(my_k, df, sample_size)
}
\arguments{
\item{my_k}{\itemize{
\item a cluster number (from the k column)
}}
\item{df}{\itemize{
\item a tibble with the k column and the signature column
}}
\item{sample_size}{\itemize{
\item size of the sample (~maxhist)
}}
}
\description{
Measures a degree of mutual dissimilarity between all objects in a cluster
}
|
/man/k_homogeneity.Rd
|
permissive
|
Nowosad/motifplus
|
R
| false
| true
| 596
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/clustering_quality.R
\name{k_homogeneity}
\alias{k_homogeneity}
\title{Measures a degree of mutual dissimilarity between all objects in a cluster}
\usage{
k_homogeneity(my_k, df, sample_size)
}
\arguments{
\item{my_k}{\itemize{
\item a cluster number (from the k column)
}}
\item{df}{\itemize{
\item a tibble with the k column and the signature column
}}
\item{sample_size}{\itemize{
\item size of the sample (~maxhist)
}}
}
\description{
Measures a degree of mutual dissimilarity between all objects in a cluster
}
|
library(zoom)
### Name: zm
### Title: Launch interaction on a plot
### Aliases: zm
### Keywords: navigate navigation plot zm zoom
### ** Examples
## Not run:
##D # basic example
##D plot(rnorm(1000),rnorm(1000)) # could be any plot
##D zm() # navigate the plot
##D
##D # use the same xlim/ylim as ended up in the zoom session
##D xylim<-par("usr") # xmin,xmax,ymin,ymax of the final version of the plot
##D dev.off()
##D plot(rnorm(1000),rnorm(1000),xlim=xylim[1:2],ylim=xylim[3:4])
##D
##D # navigate two layers of data at the same time
##D par(mfrow=c(1,2))
##D plot(1,type="n",xlim=c(-3,3),ylim=c(-3,3),main="First Track")
##D polygon(c(-1,1,1,-1)*2,c(-1,-1,1,1)*2,col="blue")
##D lines(rnorm(100),rnorm(100))
##D plot(1,type="n",xlim=c(-3,3),ylim=c(-3,3),main="Second Track")
##D polygon(c(-1,1,1,-1)*2,c(-1,-1,1,1)*2,col="green")
##D lines(rnorm(100),rnorm(100))
##D zm() # it flickers quite a bit as it needs to replot everything every time...
##D
##D # one might want to use the older interface
##D # if attached to cairo under linux or MacOS
##D # it is also sometimes helpful to just define a square you want to zoom on
##D zm(type="s")
## End(Not run)
|
/data/genthat_extracted_code/zoom/examples/zm.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 1,173
|
r
|
library(zoom)
### Name: zm
### Title: Launch interaction on a plot
### Aliases: zm
### Keywords: navigate navigation plot zm zoom
### ** Examples
## Not run:
##D # basic example
##D plot(rnorm(1000),rnorm(1000)) # could be any plot
##D zm() # navigate the plot
##D
##D # use the same xlim/ylim as ended up in the zoom session
##D xylim<-par("usr") # xmin,xmax,ymin,ymax of the final version of the plot
##D dev.off()
##D plot(rnorm(1000),rnorm(1000),xlim=xylim[1:2],ylim=xylim[3:4])
##D
##D # navigate two layers of data at the same time
##D par(mfrow=c(1,2))
##D plot(1,type="n",xlim=c(-3,3),ylim=c(-3,3),main="First Track")
##D polygon(c(-1,1,1,-1)*2,c(-1,-1,1,1)*2,col="blue")
##D lines(rnorm(100),rnorm(100))
##D plot(1,type="n",xlim=c(-3,3),ylim=c(-3,3),main="Second Track")
##D polygon(c(-1,1,1,-1)*2,c(-1,-1,1,1)*2,col="green")
##D lines(rnorm(100),rnorm(100))
##D zm() # it flickers quite a bit as it needs to replot everything every time...
##D
##D # one might want to use the older interface
##D # if attached to cairo under linux or MacOS
##D # it is also sometimes helpful to just define a square you want to zoom on
##D zm(type="s")
## End(Not run)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/psupplementary.R
\name{scale_dim}
\alias{scale_dim}
\title{Scales dimensions for nice plotting}
\usage{
scale_dim(v)
}
\description{
Scales dimensions for nice plotting
}
|
/man/scale_dim.Rd
|
no_license
|
wmacnair/psupplementary
|
R
| false
| true
| 249
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/psupplementary.R
\name{scale_dim}
\alias{scale_dim}
\title{Scales dimensions for nice plotting}
\usage{
scale_dim(v)
}
\description{
Scales dimensions for nice plotting
}
|
library(httr)
library(base64enc)
create_btoken <- function() {
appname <- "Network_Viz_App"
key <- "gHvYsYgHeB9HylcWZ7LNDO91n"
secret <- "iklKrD5R37eawVBrWmQB5OC8uz3LYQBgv8VzCiJDjxXSMxMH1V"
# base64 encoding
kands <- paste(key, secret, sep=":")
base64kands <- base64encode(charToRaw(kands))
base64kandsb <- paste("Basic", base64kands, sep=" ")
# request bearer token
resToken <- POST(url = "https://api.twitter.com/oauth2/token",
add_headers("Authorization" = base64kandsb, "Content-Type" = "application/x-www-form-urlencoded;charset=UTF-8"),
body = "grant_type=client_credentials")
# get bearer token
bearer <- content(resToken)
bearerToken <- bearer[["access_token"]]
bearerTokenb <- paste("Bearer", bearerToken, sep=" ")
return(bearerTokenb)
}
|
/twitter_30day_setup.R
|
no_license
|
fjsackfield/nba-twitter-network-viz
|
R
| false
| false
| 865
|
r
|
library(httr)
library(base64enc)
create_btoken <- function() {
appname <- "Network_Viz_App"
key <- "gHvYsYgHeB9HylcWZ7LNDO91n"
secret <- "iklKrD5R37eawVBrWmQB5OC8uz3LYQBgv8VzCiJDjxXSMxMH1V"
# base64 encoding
kands <- paste(key, secret, sep=":")
base64kands <- base64encode(charToRaw(kands))
base64kandsb <- paste("Basic", base64kands, sep=" ")
# request bearer token
resToken <- POST(url = "https://api.twitter.com/oauth2/token",
add_headers("Authorization" = base64kandsb, "Content-Type" = "application/x-www-form-urlencoded;charset=UTF-8"),
body = "grant_type=client_credentials")
# get bearer token
bearer <- content(resToken)
bearerToken <- bearer[["access_token"]]
bearerTokenb <- paste("Bearer", bearerToken, sep=" ")
return(bearerTokenb)
}
|
spark_dependencies <- function(spark_version, scala_version, ...) {
graphframes_version <- if (spark_version >= "2.4.0") {
"0.8.1"
} else if (spark_version >= "2.2.0") {
"0.6.0"
} else {
"0.5.0"
}
spark_dependency(
jars = NULL,
packages = c(
sprintf(
"graphframes:graphframes:%s-spark%s-s_%s",
graphframes_version,
spark_version,
scala_version
)
),
repositories = "https://repos.spark-packages.org"
)
}
.onLoad <- function(libname, pkgname) {
sparklyr::register_extension(pkgname)
}
|
/R/dependencies.R
|
permissive
|
lgeistlinger/graphframes
|
R
| false
| false
| 571
|
r
|
spark_dependencies <- function(spark_version, scala_version, ...) {
graphframes_version <- if (spark_version >= "2.4.0") {
"0.8.1"
} else if (spark_version >= "2.2.0") {
"0.6.0"
} else {
"0.5.0"
}
spark_dependency(
jars = NULL,
packages = c(
sprintf(
"graphframes:graphframes:%s-spark%s-s_%s",
graphframes_version,
spark_version,
scala_version
)
),
repositories = "https://repos.spark-packages.org"
)
}
.onLoad <- function(libname, pkgname) {
sparklyr::register_extension(pkgname)
}
|
# Load libraries
library(readxl)
library(tidyr)
library(dplyr)
library(ggplot2)
library(plotrix)
library(plotly)
# Read and extract the data
df <- read_excel("mlb2014.xls", sheet=1, col_names = TRUE)
# Arrange by team and then average
grouped <- arrange(df, teamID, avg)
# Find mean and standard deviation of batting averages by team
avg <- setNames(aggregate(grouped$avg, by=list(grouped$teamID, grouped$lg), FUN=mean), c("Team", "League", "Avg"))
stdev <- setNames(aggregate(grouped$avg, by=list(grouped$teamID, grouped$lg), FUN=sd), c("Team", "League", "Stdev"))
totalDF <- bind_cols(avg, stdev)
totalDF <- totalDF[, !duplicated(colnames(totalDF))]
totalDF <- na.omit(totalDF)
# Plot means with sd
ggplot(totalDF, aes(x=Team, y=Avg, fill=League)) + geom_bar(position = "dodge", stat = "identity") +
geom_errorbar(aes(ymax = Avg + Stdev, ymin=Avg - Stdev)) +
labs(title = "Mean Batting Average in 2014", x = "Team",
y = "Batting Average") +
theme(text=element_text(size=20), axis.text.x = element_text(angle = 90, hjust = 1))
# Find mean average by league
leagueDF <- setNames(aggregate(totalDF$Avg, by=list(totalDF$League), FUN=mean), c("League", "Avg"))
# Plot league avg comparison
ggplot(leagueDF, aes(x=League, y=Avg, fill=League)) + geom_bar(position = "dodge", stat = "identity") +
labs(title = "Mean Batting Average in 2014", x = "League",
y = "Batting Average") +
theme(text=element_text(size=20), axis.text.x = element_text(angle = 90, hjust = 1))
# Boxplot of avg by team
ggplot(df, aes(x=teamID, y=avg)) + geom_boxplot(outlier.colour="red", outlier.shape=8, outlier.size=2) +
labs(title = "Mean Batting Average in 2014", x = "Team", y = "Batting Average") +
theme(text=element_text(size=20), axis.text.x = element_text(angle = 90, hjust = 1))
# Boxplot of avg by league
ggplot(totalDF, aes(x=League, y=Avg)) + geom_boxplot() +
stat_summary(fun.y=mean, geom="point", shape=23, size=4) +
labs(title = "Mean Batting Average in 2014", x = "League", y = "Batting Average") +
theme(text=element_text(size=20), axis.text.x = element_text(angle = 90, hjust = 1))
# Boxplot of avg by league with dots
ggplot(totalDF, aes(x=League, y=Avg, color=League)) + geom_boxplot() +
stat_summary(fun.y=mean, geom="point", shape=23, size=4) +
geom_dotplot(binaxis='y', stackdir='center', dotsize=.1, binwidth=.01) +
labs(title = "Mean Batting Average in 2014", x = "League", y = "Batting Average") +
theme(text=element_text(size=20), axis.text.x = element_text(angle = 90, hjust = 1))
# Find mean and standard deviation of home runs by team
avgHR <- setNames(aggregate(grouped$HR, by=list(grouped$teamID, grouped$lg), FUN=mean), c("Team", "League", "Avg"))
stdevHR <- setNames(aggregate(grouped$HR, by=list(grouped$teamID, grouped$lg), FUN=sd), c("Team", "League", "Stdev"))
totalHR <- bind_cols(avgHR, stdevHR)
totalHR <- totalHR[, !duplicated(colnames(totalHR))]
totalHR <- na.omit(totalHR)
# Boxplot of home runs by team
ggplot(df, aes(x=teamID, y=HR)) + geom_boxplot(outlier.colour="red", outlier.shape=8, outlier.size=2) +
labs(title = "Home Runs in 2014", x = "Team", y = "Home Runs") +
theme(text=element_text(size=20), axis.text.x = element_text(angle = 90, hjust = 1))
# Boxplot of home runs by league
ggplot(totalHR, aes(x=League, y=Avg, color=League)) + geom_boxplot() +
stat_summary(fun.y=mean, geom="point", shape=23, size=4) +
geom_dotplot(binaxis='y', stackdir='center', dotsize=.5, binwidth=.1) +
labs(title = "Mean Home Runs in 2014 by League", x = "League", y = "Home Runs") +
theme(text=element_text(size=20), axis.text.x = element_text(angle = 90, hjust = 1))
# Subset ATL
totalATL <- filter(grouped, teamID=="ATL")
# Plot of avg for each player on ATL
ggplot(totalATL, aes(x=nameLast, y=avg)) + geom_bar(stat="identity") +
labs(title = "Batting Average of Atlanta Braves Players in 2014", x = "Player", y = "Avg") +
theme(text=element_text(size=20), axis.text.x = element_text(angle = 90, hjust = 1))
# Plot of home runs for each player on ATL
ggplot(totalATL, aes(x=nameLast, y=HR)) + geom_bar(stat="identity") +
labs(title = "Number of Home Runs by Atlanta Braves Players in 2014", x = "Player", y = "Home Runs") +
theme(text=element_text(size=20), axis.text.x = element_text(angle = 90, hjust = 1))
# Plot batting avg and home runs for each team
teamsList <- na.omit(unique(grouped$teamID))
for (i in teamsList) {
plot <- ggplot(grouped[grep(i, grouped$teamID),], aes(x=nameLast, y=avg)) + geom_bar(stat="identity") +
labs(title = "Batting Averages in 2014", x = "Player", y = "Batting Average") +
theme(text=element_text(size=20), axis.text.x = element_text(angle = 90, hjust = 1))
print (plot)
}
for (i in teamsList) {
plot <- ggplot(grouped[grep(i, grouped$teamID),], aes(x=nameLast, y=HR)) + geom_bar(stat="identity") +
labs(title = "Home Runs in 2014", x = "Player", y = "Home Runs") +
theme(text=element_text(size=20), axis.text.x = element_text(angle = 90, hjust = 1))
print (plot)
}
library(knitr)
knit('mlb2014.R')
|
/mlbDataAnalysis.R
|
no_license
|
staceynlee/MLB2014analysis
|
R
| false
| false
| 5,078
|
r
|
# Load libraries
library(readxl)
library(tidyr)
library(dplyr)
library(ggplot2)
library(plotrix)
library(plotly)
# Read and extract the data
df <- read_excel("mlb2014.xls", sheet=1, col_names = TRUE)
# Arrange by team and then average
grouped <- arrange(df, teamID, avg)
# Find mean and standard deviation of batting averages by team
avg <- setNames(aggregate(grouped$avg, by=list(grouped$teamID, grouped$lg), FUN=mean), c("Team", "League", "Avg"))
stdev <- setNames(aggregate(grouped$avg, by=list(grouped$teamID, grouped$lg), FUN=sd), c("Team", "League", "Stdev"))
totalDF <- bind_cols(avg, stdev)
totalDF <- totalDF[, !duplicated(colnames(totalDF))]
totalDF <- na.omit(totalDF)
# Plot means with sd
ggplot(totalDF, aes(x=Team, y=Avg, fill=League)) + geom_bar(position = "dodge", stat = "identity") +
geom_errorbar(aes(ymax = Avg + Stdev, ymin=Avg - Stdev)) +
labs(title = "Mean Batting Average in 2014", x = "Team",
y = "Batting Average") +
theme(text=element_text(size=20), axis.text.x = element_text(angle = 90, hjust = 1))
# Find mean average by league
leagueDF <- setNames(aggregate(totalDF$Avg, by=list(totalDF$League), FUN=mean), c("League", "Avg"))
# Plot league avg comparison
ggplot(leagueDF, aes(x=League, y=Avg, fill=League)) + geom_bar(position = "dodge", stat = "identity") +
labs(title = "Mean Batting Average in 2014", x = "League",
y = "Batting Average") +
theme(text=element_text(size=20), axis.text.x = element_text(angle = 90, hjust = 1))
# Boxplot of avg by team
ggplot(df, aes(x=teamID, y=avg)) + geom_boxplot(outlier.colour="red", outlier.shape=8, outlier.size=2) +
labs(title = "Mean Batting Average in 2014", x = "Team", y = "Batting Average") +
theme(text=element_text(size=20), axis.text.x = element_text(angle = 90, hjust = 1))
# Boxplot of avg by league
ggplot(totalDF, aes(x=League, y=Avg)) + geom_boxplot() +
stat_summary(fun.y=mean, geom="point", shape=23, size=4) +
labs(title = "Mean Batting Average in 2014", x = "League", y = "Batting Average") +
theme(text=element_text(size=20), axis.text.x = element_text(angle = 90, hjust = 1))
# Boxplot of avg by league with dots
ggplot(totalDF, aes(x=League, y=Avg, color=League)) + geom_boxplot() +
stat_summary(fun.y=mean, geom="point", shape=23, size=4) +
geom_dotplot(binaxis='y', stackdir='center', dotsize=.1, binwidth=.01) +
labs(title = "Mean Batting Average in 2014", x = "League", y = "Batting Average") +
theme(text=element_text(size=20), axis.text.x = element_text(angle = 90, hjust = 1))
# Find mean and standard deviation of home runs by team
avgHR <- setNames(aggregate(grouped$HR, by=list(grouped$teamID, grouped$lg), FUN=mean), c("Team", "League", "Avg"))
stdevHR <- setNames(aggregate(grouped$HR, by=list(grouped$teamID, grouped$lg), FUN=sd), c("Team", "League", "Stdev"))
totalHR <- bind_cols(avgHR, stdevHR)
totalHR <- totalHR[, !duplicated(colnames(totalHR))]
totalHR <- na.omit(totalHR)
# Boxplot of home runs by team
ggplot(df, aes(x=teamID, y=HR)) + geom_boxplot(outlier.colour="red", outlier.shape=8, outlier.size=2) +
labs(title = "Home Runs in 2014", x = "Team", y = "Home Runs") +
theme(text=element_text(size=20), axis.text.x = element_text(angle = 90, hjust = 1))
# Boxplot of home runs by league
ggplot(totalHR, aes(x=League, y=Avg, color=League)) + geom_boxplot() +
stat_summary(fun.y=mean, geom="point", shape=23, size=4) +
geom_dotplot(binaxis='y', stackdir='center', dotsize=.5, binwidth=.1) +
labs(title = "Mean Home Runs in 2014 by League", x = "League", y = "Home Runs") +
theme(text=element_text(size=20), axis.text.x = element_text(angle = 90, hjust = 1))
# Subset ATL
totalATL <- filter(grouped, teamID=="ATL")
# Plot of avg for each player on ATL
ggplot(totalATL, aes(x=nameLast, y=avg)) + geom_bar(stat="identity") +
labs(title = "Batting Average of Atlanta Braves Players in 2014", x = "Player", y = "Avg") +
theme(text=element_text(size=20), axis.text.x = element_text(angle = 90, hjust = 1))
# Plot of home runs for each player on ATL
ggplot(totalATL, aes(x=nameLast, y=HR)) + geom_bar(stat="identity") +
labs(title = "Number of Home Runs by Atlanta Braves Players in 2014", x = "Player", y = "Home Runs") +
theme(text=element_text(size=20), axis.text.x = element_text(angle = 90, hjust = 1))
# Plot batting avg and home runs for each team
teamsList <- na.omit(unique(grouped$teamID))
for (i in teamsList) {
plot <- ggplot(grouped[grep(i, grouped$teamID),], aes(x=nameLast, y=avg)) + geom_bar(stat="identity") +
labs(title = "Batting Averages in 2014", x = "Player", y = "Batting Average") +
theme(text=element_text(size=20), axis.text.x = element_text(angle = 90, hjust = 1))
print (plot)
}
for (i in teamsList) {
plot <- ggplot(grouped[grep(i, grouped$teamID),], aes(x=nameLast, y=HR)) + geom_bar(stat="identity") +
labs(title = "Home Runs in 2014", x = "Player", y = "Home Runs") +
theme(text=element_text(size=20), axis.text.x = element_text(angle = 90, hjust = 1))
print (plot)
}
library(knitr)
knit('mlb2014.R')
|
## create plot 3 for week 1 of exploratory data analysis
## read the data in from your current working directory
data <- read.table("household_power_consumption.txt", sep = ";", header = TRUE)
## put the dates into the correct format and filter for just the days that we want (dplyr)
data$Date <- as.Date(data$Date, format = "%d/%m/%Y")
data_small <- filter(data, Date == "2007-02-01" | Date == "2007-02-02")
## put the data into numeric format
data_small$Sub_metering_1 <- as.numeric(as.character(data_small$Sub_metering_1))
data_small$Sub_metering_2 <- as.numeric(as.character(data_small$Sub_metering_2))
data_small$Sub_metering_3 <- as.numeric(as.character(data_small$Sub_metering_3))
## combine the dates and times
data_small$date_time <- as.POSIXct(paste(data_small$Date, data_small$Time), "%d/%m/%Y %H:%M:%S")
## plot the data
plot(data_small$date_time, data_small$Sub_metering_1, type = "l", ylab = "Energy sub metering", xlab = "")
lines(data_small$date_time, data_small$Sub_metering_2, col = "red")
lines(data_small$date_time, data_small$Sub_metering_3, col = "blue")
legend("topright", col = c("black", "red", "blue"), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty = 1)
## save the file as a png
dev.copy(png, file = "plot3.png", width = 480, height = 480)
dev.off()
|
/plot3.R
|
no_license
|
sevenBananas/ExData_Plotting1
|
R
| false
| false
| 1,306
|
r
|
## create plot 3 for week 1 of exploratory data analysis
## read the data in from your current working directory
data <- read.table("household_power_consumption.txt", sep = ";", header = TRUE)
## put the dates into the correct format and filter for just the days that we want (dplyr)
data$Date <- as.Date(data$Date, format = "%d/%m/%Y")
data_small <- filter(data, Date == "2007-02-01" | Date == "2007-02-02")
## put the data into numeric format
data_small$Sub_metering_1 <- as.numeric(as.character(data_small$Sub_metering_1))
data_small$Sub_metering_2 <- as.numeric(as.character(data_small$Sub_metering_2))
data_small$Sub_metering_3 <- as.numeric(as.character(data_small$Sub_metering_3))
## combine the dates and times
data_small$date_time <- as.POSIXct(paste(data_small$Date, data_small$Time), "%d/%m/%Y %H:%M:%S")
## plot the data
plot(data_small$date_time, data_small$Sub_metering_1, type = "l", ylab = "Energy sub metering", xlab = "")
lines(data_small$date_time, data_small$Sub_metering_2, col = "red")
lines(data_small$date_time, data_small$Sub_metering_3, col = "blue")
legend("topright", col = c("black", "red", "blue"), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty = 1)
## save the file as a png
dev.copy(png, file = "plot3.png", width = 480, height = 480)
dev.off()
|
library("tidyverse")
library("readxl")
source('./splitting_names.R')
file <- "../lobby/BB.DD. LOBBY 2_ (1).xlsx"
sheets <- excel_sheets(file)
sheets_data <- map(sheets, function(X) {
result <- read_excel(file, sheet = X)
result <- result %>% select(1:13)
result$month <- X
result
})
lobby_data_xl <- do.call("rbind", sheets_data) %>%
select(
nombreCompletoActivo = `Sujetos Activos`,
nombreInstitucion = `Organismo Público`,
topic = `Materia de Audiencia`,
fecha = Fecha,
nombreCompletoPasivo = Autoridad,
`Cargo autoridad`, `Categoria Cargo`, `Categorización FM`,
`Categorización portal`, `Materia Ley`
) %>%
mutate(nombreCompletoActivo = str_split(nombreCompletoActivo, ","),
nombreActivo = NA,
apellidosActivo = NA,
nombrePasivo = NA,
apellidosPasivo = NA,
codigoInstitucion = NA,
codigoActualDatasetRegistro = paste0("xl-", rep(1:nrow(.)))) %>%
unnest()
pasivos <- read_delim("../lobby/pasivos.csv", delim = ";")
activos <- read_csv("../lobby/activos_parsed.csv") %>%
select(codigoActualDatasetRegistro, nombreActivo, apellidosActivo)
datos_audiencias <- read_csv("../lobby/datosAudiencia.csv")
names_institution_topics <- merge(pasivos, datos_audiencias, by = "codigoActualDatasetRegistro", all = TRUE) %>%
select(
nombrePasivo, apellidosPasivo,
nombreInstitucion, codigoInstitucion,
topic = observacionesMateriaAudiencia,
fecha = fechaInicio,
codigoActualDatasetRegistro
) %>%
merge(activos, by = "codigoActualDatasetRegistro", all = TRUE) %>%
mutate(
nombreCompletoPasivo = ifelse(
is.na(nombrePasivo) | is.na(apellidosPasivo),
NA,
paste(nombrePasivo, apellidosPasivo)
),
nombreCompletoActivo = ifelse(
is.na(nombreActivo) | is.na(apellidosActivo),
NA,
paste(nombreActivo, apellidosActivo)
),
`Cargo autoridad` = NA,
`Categoria Cargo` = NA,
`Categorización FM` = NA,
`Categorización portal` = NA,
`Materia Ley` = NA
)
parse_names <- function(names) {
names %>%
tolower() %>%
str_trim()
}
lobby_data <- rbind(names_institution_topics, lobby_data_xl) %>%
mutate(
nombreCompletoActivo = parse_names(nombreCompletoActivo),
nombreCompletoPasivo = parse_names(nombreCompletoPasivo),
nombreActivo = parse_names(nombreActivo),
nombrePasivo = parse_names(nombrePasivo),
apellidosActivo = parse_names(apellidosActivo),
apellidosPasivo = parse_names(apellidosPasivo)
)
names_table <- lobby_data %>%
filter(is.na(apellidosActivo)) %>%
select(nombreCompleto = nombreCompletoActivo) %>%
split_names()
lobby_data <- lobby_data %>%
merge(names_table, by.x = "nombreCompletoActivo", by.y = "nombreCompleto", all.x = TRUE) %>%
mutate(
nombreActivo = ifelse(!is.na(nombreActivo), nombreActivo, nombre),
apellidosActivo = ifelse(!is.na(apellidosActivo), apellidosActivo, apellidos)
) %>%
select(-nombre, -apellidos)
saveRDS(lobby_data, "./combined_lobby_data.Rda")
|
/read_lobby_data.R
|
no_license
|
wfjvdham/padronElectoralChile
|
R
| false
| false
| 3,034
|
r
|
library("tidyverse")
library("readxl")
source('./splitting_names.R')
file <- "../lobby/BB.DD. LOBBY 2_ (1).xlsx"
sheets <- excel_sheets(file)
sheets_data <- map(sheets, function(X) {
result <- read_excel(file, sheet = X)
result <- result %>% select(1:13)
result$month <- X
result
})
lobby_data_xl <- do.call("rbind", sheets_data) %>%
select(
nombreCompletoActivo = `Sujetos Activos`,
nombreInstitucion = `Organismo Público`,
topic = `Materia de Audiencia`,
fecha = Fecha,
nombreCompletoPasivo = Autoridad,
`Cargo autoridad`, `Categoria Cargo`, `Categorización FM`,
`Categorización portal`, `Materia Ley`
) %>%
mutate(nombreCompletoActivo = str_split(nombreCompletoActivo, ","),
nombreActivo = NA,
apellidosActivo = NA,
nombrePasivo = NA,
apellidosPasivo = NA,
codigoInstitucion = NA,
codigoActualDatasetRegistro = paste0("xl-", rep(1:nrow(.)))) %>%
unnest()
pasivos <- read_delim("../lobby/pasivos.csv", delim = ";")
activos <- read_csv("../lobby/activos_parsed.csv") %>%
select(codigoActualDatasetRegistro, nombreActivo, apellidosActivo)
datos_audiencias <- read_csv("../lobby/datosAudiencia.csv")
names_institution_topics <- merge(pasivos, datos_audiencias, by = "codigoActualDatasetRegistro", all = TRUE) %>%
select(
nombrePasivo, apellidosPasivo,
nombreInstitucion, codigoInstitucion,
topic = observacionesMateriaAudiencia,
fecha = fechaInicio,
codigoActualDatasetRegistro
) %>%
merge(activos, by = "codigoActualDatasetRegistro", all = TRUE) %>%
mutate(
nombreCompletoPasivo = ifelse(
is.na(nombrePasivo) | is.na(apellidosPasivo),
NA,
paste(nombrePasivo, apellidosPasivo)
),
nombreCompletoActivo = ifelse(
is.na(nombreActivo) | is.na(apellidosActivo),
NA,
paste(nombreActivo, apellidosActivo)
),
`Cargo autoridad` = NA,
`Categoria Cargo` = NA,
`Categorización FM` = NA,
`Categorización portal` = NA,
`Materia Ley` = NA
)
parse_names <- function(names) {
names %>%
tolower() %>%
str_trim()
}
lobby_data <- rbind(names_institution_topics, lobby_data_xl) %>%
mutate(
nombreCompletoActivo = parse_names(nombreCompletoActivo),
nombreCompletoPasivo = parse_names(nombreCompletoPasivo),
nombreActivo = parse_names(nombreActivo),
nombrePasivo = parse_names(nombrePasivo),
apellidosActivo = parse_names(apellidosActivo),
apellidosPasivo = parse_names(apellidosPasivo)
)
names_table <- lobby_data %>%
filter(is.na(apellidosActivo)) %>%
select(nombreCompleto = nombreCompletoActivo) %>%
split_names()
lobby_data <- lobby_data %>%
merge(names_table, by.x = "nombreCompletoActivo", by.y = "nombreCompleto", all.x = TRUE) %>%
mutate(
nombreActivo = ifelse(!is.na(nombreActivo), nombreActivo, nombre),
apellidosActivo = ifelse(!is.na(apellidosActivo), apellidosActivo, apellidos)
) %>%
select(-nombre, -apellidos)
saveRDS(lobby_data, "./combined_lobby_data.Rda")
|
# normalisierte Gleitpunktzahlen
suppressPackageStartupMessages(library(mosaic))
all_values <- function(Z, E, B = 2, V=c("+", "-")) {
ret <- c()
for (e in E) {
for (z in Z) {
for (v in V) {
val <-0
b <- 0.5
for (c in strsplit(z, NULL)[[1]]) {
if (c == "1") val <- val + b
b <- b * 0.5
}
val <- val*eval(parse(text=paste0(v,"1")))*eval(parse(text=paste(B,"**(",e,")")))
ret <- c(ret, val)
}
}
}
c(0, ret)
}
plotVerteilung <- function(verteilung, limits=NULL, labels=NULL) {
ggplot(verteilung, aes(x, fill=factor(normalized))) +
geom_dotplot(
binwidth = 0.03,
dotsize=1.2,
stackgroups = TRUE,
stackratio = 1.1,
binpositions = "all",
method = "histodot",
show.legend = FALSE,
stackdir="center"
) +
theme_minimal() +
scale_y_continuous(NULL, breaks = NULL) +
scale_x_discrete(
limits=limits,
labels=labels
) +
ylab("")
}
B <- 2
E <- c("-1","0","1")
Z <- c("100", "101", "110", "111")
V <- c("+", "-")
#B <- 2
#E <- c("-1","0","1","2")
#Z <- c("100", "101", "110", "111")
#V <- c("+", "-")
normValues <- all_values(Z, E, B, V)
normValues.len <- length(normValues)
NZ <- c("001", "010", "011", "000")
nonNormValues <- all_values(NZ, E, B, V)
nonNormValues.len <- length(nonNormValues)
allValues <- c(normValues, nonNormValues)
allValues.uniquiLength <- length(unique(allValues))
x_min <- round(min(allValues))
x_max <- round(max(allValues))
df <- data.frame(
x = allValues,
normalized = c(rep("yes", normValues.len), rep("no", nonNormValues.len))
)
lim_lab <- seq(x_min, x_max, 0.5)
plotVerteilung(df, limits=lim_lab, labels=lim_lab)
ggsave("VerteilungGleitpunktzahlen_all.pdf", device="pdf", width = 9, height = 1.5, units="in")
plotVerteilung(df[df$normalized == "yes",], limits=lim_lab, labels=lim_lab)
ggsave("VerteilungGleitpunktzahlen_norm.pdf", device="pdf", width = 9, height = 1.5, units="in")
plotVerteilung(df[df$normalized == "no",], limits=lim_lab, labels=lim_lab)
ggsave("VerteilungGleitpunktzahlen_nonnorm.pdf", device="pdf", width = 9, height = 1.5, units="in")
gf_point(rep(1, nrow(df)) ~ x, color = ~ normalized, data=df) %>%
gf_lims(x = c(-4,4), y=c(0.99,1.01)) %>%
gf_theme(theme_minimal())
bins <- length(unique(c(normValues,nonNormValues)))
gf_dotplot(~ x, fill = ~ normalized, binwidth=0.04, binpositions="all", show.legend=FALSE,
method= "histodot", dotsize = 1, data=df) %>%
gf_lims(x = c(-3.5, 3.5)) %>%
gf_refine(scale_y_discrete(limits=c(0))) %>%
gf_theme(theme_minimal())
gf_counts(~ x, fill = ~ normalized, size=2, show.legend=FALSE, geom="dotplot", data=df) %>%
gf_lims(x = c(-3.5, 3.5)) %>%
gf_theme(theme_minimal())
######
ggplot(df[df$normalized=="yes",], aes(x, 1, color=normalized)) +
geom_hline(yintercept = 1, size= 0.1) +
geom_point(show.legend = FALSE) +
theme_minimal() +
scale_y_continuous(NULL, breaks = NULL)
ggsave("VerteilungGleitpunktzahlen_norm.pdf", device="pdf", width = 9, height = 1.5, units="in")
ggplot(df[df$normalized=="no",], aes(x, 1, color=normalized)) +
geom_hline(yintercept = 1, size= 0.1) +
geom_point(show.legend = FALSE) +
theme_minimal() +
scale_y_continuous(NULL, breaks = NULL)
ggsave("VerteilungGleitpunktzahlen_nonnorm.pdf", device="pdf", width = 9, height = 1.5, units="in")
#####
ggplot(df[df$normalized=="yes",], aes(x, fill=factor(normalized))) +
geom_dotplot(
binwidth = 0.03,
dotsize=1.2,
stackgroups = TRUE,
stackratio = 1.1,
binpositions = "all",
method = "histodot",
show.legend = FALSE,
stackdir="center",
) +
theme_minimal() +
scale_y_continuous(NULL, breaks = NULL) +
scale_x_discrete(
# limits=c(-3.5,-3,-2.5,-2,-1.5,-1,-0.5,0,0.5,1,1.5,2,2.5,3,3.5),
# labels=c("","-3", "", "-2","", "-1","", "0", "", "1" ,"", "2","", "3", "")
#nice.breaks=F,
breaks=c(-2,-1.5,-1,-0.5,0,0.5,1,1.5,2),
labels=c("-2", "", "-1", "", "0", "", "1" ,"", "2"),
limits=c(-2.5,2.5)
) +
ylab("")
ggsave("VerteilungGleitpunktzahlen1.pdf", device="pdf", width = 9, height = 1.5, units="in")
|
/R-Skripte/VerteilungGleitpunktzahlen.R
|
permissive
|
NMarkgraf/Quantitative-Methoden-der-W-Informatik
|
R
| false
| false
| 4,489
|
r
|
# normalisierte Gleitpunktzahlen
suppressPackageStartupMessages(library(mosaic))
all_values <- function(Z, E, B = 2, V=c("+", "-")) {
ret <- c()
for (e in E) {
for (z in Z) {
for (v in V) {
val <-0
b <- 0.5
for (c in strsplit(z, NULL)[[1]]) {
if (c == "1") val <- val + b
b <- b * 0.5
}
val <- val*eval(parse(text=paste0(v,"1")))*eval(parse(text=paste(B,"**(",e,")")))
ret <- c(ret, val)
}
}
}
c(0, ret)
}
plotVerteilung <- function(verteilung, limits=NULL, labels=NULL) {
ggplot(verteilung, aes(x, fill=factor(normalized))) +
geom_dotplot(
binwidth = 0.03,
dotsize=1.2,
stackgroups = TRUE,
stackratio = 1.1,
binpositions = "all",
method = "histodot",
show.legend = FALSE,
stackdir="center"
) +
theme_minimal() +
scale_y_continuous(NULL, breaks = NULL) +
scale_x_discrete(
limits=limits,
labels=labels
) +
ylab("")
}
B <- 2
E <- c("-1","0","1")
Z <- c("100", "101", "110", "111")
V <- c("+", "-")
#B <- 2
#E <- c("-1","0","1","2")
#Z <- c("100", "101", "110", "111")
#V <- c("+", "-")
normValues <- all_values(Z, E, B, V)
normValues.len <- length(normValues)
NZ <- c("001", "010", "011", "000")
nonNormValues <- all_values(NZ, E, B, V)
nonNormValues.len <- length(nonNormValues)
allValues <- c(normValues, nonNormValues)
allValues.uniquiLength <- length(unique(allValues))
x_min <- round(min(allValues))
x_max <- round(max(allValues))
df <- data.frame(
x = allValues,
normalized = c(rep("yes", normValues.len), rep("no", nonNormValues.len))
)
lim_lab <- seq(x_min, x_max, 0.5)
plotVerteilung(df, limits=lim_lab, labels=lim_lab)
ggsave("VerteilungGleitpunktzahlen_all.pdf", device="pdf", width = 9, height = 1.5, units="in")
plotVerteilung(df[df$normalized == "yes",], limits=lim_lab, labels=lim_lab)
ggsave("VerteilungGleitpunktzahlen_norm.pdf", device="pdf", width = 9, height = 1.5, units="in")
plotVerteilung(df[df$normalized == "no",], limits=lim_lab, labels=lim_lab)
ggsave("VerteilungGleitpunktzahlen_nonnorm.pdf", device="pdf", width = 9, height = 1.5, units="in")
gf_point(rep(1, nrow(df)) ~ x, color = ~ normalized, data=df) %>%
gf_lims(x = c(-4,4), y=c(0.99,1.01)) %>%
gf_theme(theme_minimal())
bins <- length(unique(c(normValues,nonNormValues)))
gf_dotplot(~ x, fill = ~ normalized, binwidth=0.04, binpositions="all", show.legend=FALSE,
method= "histodot", dotsize = 1, data=df) %>%
gf_lims(x = c(-3.5, 3.5)) %>%
gf_refine(scale_y_discrete(limits=c(0))) %>%
gf_theme(theme_minimal())
gf_counts(~ x, fill = ~ normalized, size=2, show.legend=FALSE, geom="dotplot", data=df) %>%
gf_lims(x = c(-3.5, 3.5)) %>%
gf_theme(theme_minimal())
######
ggplot(df[df$normalized=="yes",], aes(x, 1, color=normalized)) +
geom_hline(yintercept = 1, size= 0.1) +
geom_point(show.legend = FALSE) +
theme_minimal() +
scale_y_continuous(NULL, breaks = NULL)
ggsave("VerteilungGleitpunktzahlen_norm.pdf", device="pdf", width = 9, height = 1.5, units="in")
ggplot(df[df$normalized=="no",], aes(x, 1, color=normalized)) +
geom_hline(yintercept = 1, size= 0.1) +
geom_point(show.legend = FALSE) +
theme_minimal() +
scale_y_continuous(NULL, breaks = NULL)
ggsave("VerteilungGleitpunktzahlen_nonnorm.pdf", device="pdf", width = 9, height = 1.5, units="in")
#####
ggplot(df[df$normalized=="yes",], aes(x, fill=factor(normalized))) +
geom_dotplot(
binwidth = 0.03,
dotsize=1.2,
stackgroups = TRUE,
stackratio = 1.1,
binpositions = "all",
method = "histodot",
show.legend = FALSE,
stackdir="center",
) +
theme_minimal() +
scale_y_continuous(NULL, breaks = NULL) +
scale_x_discrete(
# limits=c(-3.5,-3,-2.5,-2,-1.5,-1,-0.5,0,0.5,1,1.5,2,2.5,3,3.5),
# labels=c("","-3", "", "-2","", "-1","", "0", "", "1" ,"", "2","", "3", "")
#nice.breaks=F,
breaks=c(-2,-1.5,-1,-0.5,0,0.5,1,1.5,2),
labels=c("-2", "", "-1", "", "0", "", "1" ,"", "2"),
limits=c(-2.5,2.5)
) +
ylab("")
ggsave("VerteilungGleitpunktzahlen1.pdf", device="pdf", width = 9, height = 1.5, units="in")
|
stg_bin = read.csv("strange_binary.csv")
library(rpart)
dt = rpart(c~.,stg_bin,method="class",maxdepth=3)
plot(dt)
text(dt)
printcp(dt)
prd = predict(dt, stg_bin)
c = ifelse(stg_bin$c == 'good' , 0 , 1)
prd = as.data.frame(prd[,1])
sum(round(prd, digits = 0) == c)
|
/3a_decision_tree.R
|
no_license
|
anurag2301/data_mining
|
R
| false
| false
| 267
|
r
|
stg_bin = read.csv("strange_binary.csv")
library(rpart)
dt = rpart(c~.,stg_bin,method="class",maxdepth=3)
plot(dt)
text(dt)
printcp(dt)
prd = predict(dt, stg_bin)
c = ifelse(stg_bin$c == 'good' , 0 , 1)
prd = as.data.frame(prd[,1])
sum(round(prd, digits = 0) == c)
|
##These function cachethe inverse of a matrix
##This function creates a matrix which is just a list containing
##functions to set the value of a matrix, get the value of the matrix,
##set the value of the matrix, and get the value of the matrix
makeCacheMatrix <- function(x = matrix()) {
inv<-NULL
set<-function (y){
x<<-y
inv<<-NULL
}
get<-function()x
setInv<-function(inverse) inv<<-inverse
getInv<-function()inv
list(set=set, get=get, setInv=setInv, getInv=getInv)
}
## This function takes the matrix from the first function and calculates
##its inverse
cacheSolve <- function(x, ...) {
z<-x$getInv()
if(!is.null(z)){
message("getting cached data")
return(m)
}
data<-x$get()
z<-solve(data)
x$setInv(z)
z
## Return a matrix that is the inverse of 'x'
}
|
/cachematrix.R
|
no_license
|
abecca5/ProgrammingAssignment2
|
R
| false
| false
| 832
|
r
|
##These function cachethe inverse of a matrix
##This function creates a matrix which is just a list containing
##functions to set the value of a matrix, get the value of the matrix,
##set the value of the matrix, and get the value of the matrix
makeCacheMatrix <- function(x = matrix()) {
inv<-NULL
set<-function (y){
x<<-y
inv<<-NULL
}
get<-function()x
setInv<-function(inverse) inv<<-inverse
getInv<-function()inv
list(set=set, get=get, setInv=setInv, getInv=getInv)
}
## This function takes the matrix from the first function and calculates
##its inverse
cacheSolve <- function(x, ...) {
z<-x$getInv()
if(!is.null(z)){
message("getting cached data")
return(m)
}
data<-x$get()
z<-solve(data)
x$setInv(z)
z
## Return a matrix that is the inverse of 'x'
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plotPerf_multi.R
\name{plotPerf_multi}
\alias{plotPerf_multi}
\title{Plots a set of ROC/PR curves with average.}
\usage{
plotPerf_multi(inList, plotTitle = "performance", plotType = "ROC",
xlab = "TPR", ylab = "FPR", meanCol = "darkblue", xlim = c(0, 1),
ylim = c(0, 1))
}
\arguments{
\item{inList}{(list) ROCR::performance objects, one per iteration}
\item{plotTitle}{(numeric) plot title}
\item{plotType}{(char) one of ROC | PR | custom. Affects x/y labels}
\item{xlab}{(char) x-axis label}
\item{ylab}{(char) y-axis label}
\item{meanCol}{(char) colour for mean trendline}
\item{xlim}{(numeric) min/max extent for x-axis}
\item{ylim}{(numeric) min/max extent for y-axis}
}
\value{
No value. Side effect of plotting ROC and PR curves
}
\description{
Plots a set of ROC/PR curves with average.
}
\details{
Plots average curves with individual curves imposed.
}
\examples{
inDir <- system.file("extdata","example_output",package="netDx")
all_rng <- list.files(path = inDir, pattern = 'rng.')
fList <- paste(inDir,all_rng,'predictionResults.txt',sep=getFileSep())
rocList <- list()
for (k in seq_len(length(fList))) {
dat <- read.delim(fList[1],sep='\\t',header=TRUE,as.is=TRUE)
predClasses <- c('LumA', 'notLumA')
pred_col1 <- sprintf('\%s_SCORE',predClasses[1])
pred_col2 <- sprintf('\%s_SCORE',predClasses[2])
idx1 <- which(colnames(dat) == pred_col1)
idx2 <- which(colnames(dat) == pred_col2)
pred <- ROCR::prediction(dat[,idx1]-dat[,idx2],
dat$STATUS==predClasses[1])
rocList[[k]] <- ROCR::performance(pred,'tpr','fpr')
}
plotPerf_multi(rocList,'ROC')
}
|
/man/plotPerf_multi.Rd
|
permissive
|
Yaqiongxiao/netDx
|
R
| false
| true
| 1,663
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plotPerf_multi.R
\name{plotPerf_multi}
\alias{plotPerf_multi}
\title{Plots a set of ROC/PR curves with average.}
\usage{
plotPerf_multi(inList, plotTitle = "performance", plotType = "ROC",
xlab = "TPR", ylab = "FPR", meanCol = "darkblue", xlim = c(0, 1),
ylim = c(0, 1))
}
\arguments{
\item{inList}{(list) ROCR::performance objects, one per iteration}
\item{plotTitle}{(numeric) plot title}
\item{plotType}{(char) one of ROC | PR | custom. Affects x/y labels}
\item{xlab}{(char) x-axis label}
\item{ylab}{(char) y-axis label}
\item{meanCol}{(char) colour for mean trendline}
\item{xlim}{(numeric) min/max extent for x-axis}
\item{ylim}{(numeric) min/max extent for y-axis}
}
\value{
No value. Side effect of plotting ROC and PR curves
}
\description{
Plots a set of ROC/PR curves with average.
}
\details{
Plots average curves with individual curves imposed.
}
\examples{
inDir <- system.file("extdata","example_output",package="netDx")
all_rng <- list.files(path = inDir, pattern = 'rng.')
fList <- paste(inDir,all_rng,'predictionResults.txt',sep=getFileSep())
rocList <- list()
for (k in seq_len(length(fList))) {
dat <- read.delim(fList[1],sep='\\t',header=TRUE,as.is=TRUE)
predClasses <- c('LumA', 'notLumA')
pred_col1 <- sprintf('\%s_SCORE',predClasses[1])
pred_col2 <- sprintf('\%s_SCORE',predClasses[2])
idx1 <- which(colnames(dat) == pred_col1)
idx2 <- which(colnames(dat) == pred_col2)
pred <- ROCR::prediction(dat[,idx1]-dat[,idx2],
dat$STATUS==predClasses[1])
rocList[[k]] <- ROCR::performance(pred,'tpr','fpr')
}
plotPerf_multi(rocList,'ROC')
}
|
#!/usr/bin/env Rscript
library(randomForest)
library(reshape)
library(ggplot2)
library(caret)
library(MLmetrics)
library(philentropy)
library(vcd)
library(PRROC)
OG_Dir = getwd() # Original directory from which script was called
dirInd = strsplit(OG_Dir, split = '/')[[1]]
randInd = dirInd[length(dirInd)]
dirInd = dirInd[length(dirInd) -1]
homeDir = '/dartfs-hpc/rc/home/y/f002tsy/cbklab/Mattox/lec_gly_binding/'
# homeDir = '/Users/dmattox/cbk/lec_gly_binding/'
setwd(homeDir)
#######################
# Functions
#######################
pullDiverseCases <- function(scaledData, startInds, thresh, prevSampled = NA){
if (any(is.na(prevSampled))) {
# if no inds being passed in that were sampled from the positive class
if (length(startInds) == 1) {
out = startInds
} else if (length(startInds) == 2) {
if (suppressMessages(distance(scaledFeats[startInds,])) >= thresh) {
out = startInds # Two binding sites are greater than the threshold distance from each other, keep both
} else{
out = sample(startInds, size = 1) # Two binding sites are within the threshold distance from each other, pick one at random
}
} else {
cnter = 1
out = rep(0, length(startInds)) # Hold the set of diverse indices
}
} else{
# if inds are passed in from the positive class
out = c(prevSampled, rep(0, length(startInds)))
cnter = length(prevSampled) + 1
if (length(startInds) == 1) {
# One new neg site to compare to previously smapled positive sites
distMat = suppressMessages(distance(x = rbind(scaledData[out[out != 0], ], scaledData[startInds, ]))) # Find all pairwise distances b/w binding sites in cluster
if (is.matrix(distMat)){ # if more than one previously sampled binding site
distMat = distMat[1:sum(out != 0), -1 * (1:sum(out != 0))] # Get the top n rows of the distance matrix dropping the first n columns, where n = the number of indices already sampled
}
if (!any(distMat < thresh)) {
out[cnter] = startInds
}
}
}
if (any(out == 0)){
while( length(startInds) >= 2 ){
out[cnter] = sample(startInds, size = 1) # Sample an index randomly
cnter = cnter + 1 # increase the sample count
startInds = startInds[! startInds %in% out] # Drop sampled indices from vector of remaining indices
distMat = suppressMessages(distance(x = rbind(scaledData[out[out != 0],], scaledData[startInds,]))) # Find all pairwise distances b/w binding sites in cluster
distMat = distMat[1:sum(out != 0),-1*(1:sum(out != 0))] # Get the top n rows of the distance matrix dropping the first n columns, where n = the number of indices already sampled
if (is.matrix(distMat)){
distMat = apply(X = distMat, MARGIN = 2, FUN = min) # For each of the remaining binding sites, take the minimum pairwise distance to any sampled binding site
}
dropInds = startInds[distMat < thresh ]
startInds = startInds[! startInds %in% dropInds]
if(length(startInds) == 1){
out[cnter] = startInds
}
}
}
out = out[out != 0]
return(out)
}
sampleDiverseSitesByLig <- function(clusterIDs, testClust, featureSet, ligandTag, distThresh, scaledFeatureSet = featureSet){
# Sample diverse binding sites with ligand and without ligand [ligandTag] for each cluster in clusterIDs, except the cluster held out for LO(C)O validation indicated by testClust
# Samples binding sites from the specified feature set, calculates Euclidean distance b/w binding sites from scaledFeatureSet
# Binding sites sampled randomly if Euc. distance to any previously sampled binding sites is greater than distThresh (median pariwise distance between all binding sites)
# Drop the excluded cluster
uniClusts = unique(clusterIDs)
uniClusts = uniClusts[ ! uniClusts %in% testClust]
dat = as.data.frame(matrix(0, nrow = nrow(featureSet), ncol = ncol(featureSet)))
colnames(dat) = colnames(predFeats)
dat$bound = F
dat$clus = 0
j = 1 # index for writing to returned dataframe (dat)
for (i in 1:length(uniClusts)) {
inds = (1:nrow(featureSet))[clusterIDs == uniClusts[i]] # all the row indices matching a specific cluster
negInds = inds[! ligandTag[inds]] # Indices of binding sites w/o ligand
posInds = inds[ligandTag[inds]] # Indices of binding sites w/ ligand
if (length(posInds) > 0){
outInds = pullDiverseCases(scaledData = scaledFeatureSet, startInds = posInds, thresh = distThresh)
} else {
outInds = NA
}
if (length(negInds > 0)){
outInds = pullDiverseCases(scaledData = scaledFeatureSet, startInds = negInds, thresh = distThresh, prevSampled = outInds)
}
dat[(j:(j+length(outInds) - 1)), (1:ncol(predFeats))] = predFeats[outInds, ] # set feature values for representative binding sites
dat$bound[(j:(j+length(outInds) - 1))] = ligandTag[outInds] # Set bound variable
dat$clus[(j:(j+length(outInds) - 1))] = uniClusts[i] # Set cluster ID
j = j + length(outInds)
}
dat = dat[-1*(j:nrow(dat)), ]
dat$bound = as.factor(dat$bound)
return(dat)
}
f2 <- function (data, lev = NULL, model = NULL, beta = 2) {
precision <- posPredValue(data$pred, data$obs, positive = "TRUE")
recall <- sensitivity(data$pred, data$obs, postive = "TRUE")
f2_val <- ((1 + beta^2) * precision * recall) / (beta^2 * precision + recall)
names(f2_val) <- c("F2")
f2_val
}
# f3 <- function (data, lev = NULL, model = NULL, beta = 3) {
# precision <- posPredValue(data$pred, data$obs, positive = "TRUE")
# recall <- sensitivity(data$pred, data$obs, postive = "TRUE")
# f3_val <- ((1 + beta^2) * precision * recall) / (beta^2 * precision + recall)
# names(f3_val) <- c("F3")
# f3_val
# }
pCnt <- function(x){
return(x/sum(x))
}
getKPRFb <- function(conMatDF){
# conMatDF has rows of different confusion matrices, with the columns ordered as TP, TN, FP, FN
# sums each column and finds performance metrics
f2TestCon = apply(X = conMatDF, MARGIN = 2, FUN = sum)
TP = f2TestCon[[1]]
TN = f2TestCon[[2]]
FP = f2TestCon[[3]]
FN = f2TestCon[[4]]
f2_validationRecall = TP / ( TP + FN)
f2_validationPrec = TP / (TP + FP)
f2_validationF2 = ((1+(2^2)) * f2_validationPrec * f2_validationRecall) / (2^2 * f2_validationPrec + f2_validationRecall)
f3score = ((1+(3^2)) * f2_validationPrec * f2_validationRecall) / (3^2 * f2_validationPrec + f2_validationRecall)
f4score = ((1+(4^2)) * f2_validationPrec * f2_validationRecall) / (4^2 * f2_validationPrec + f2_validationRecall)
randAcc = ((TN+FP)*(TN+FN) + (FN+TP)*(FP+TP)) / sum(c(TP + TN + FP + FN))^2
testAcc = (TP+TN)/(TP+TN+FP+FN)
f2_validationKappa = (testAcc - randAcc) / (1 - randAcc)
return(list(kappa = f2_validationKappa,
recall = f2_validationRecall,
precision = f2_validationPrec,
F2 = f2_validationF2,
F3 = f3score,
F4 = f4score))
}
addSlash = function(string) {
# Adds a trailing forward slash to the end of a string (ex path to a driectory) if it is not present
if (substr(x = string,
start = nchar(string),
stop = nchar(string)) != "/") {
string = paste(string, "/", sep = "")
}
return(string)
}
colfunc = colorRampPalette(c("red","goldenrod","forestgreen","royalblue","darkviolet"))
#######################
# Read in data and set up models
#######################
set.seed(as.integer(randInd))
# Read in data
ligTags = read.delim(file = './analysis/training/data_in/ligTags.tsv', sep = '\t', stringsAsFactors = F)
predFeats = read.delim(file = './analysis/training/data_in/predFeats.csv', sep = ',', stringsAsFactors = F)
bsResiDat = read.delim(file = './analysis/training/data_in/bsResiDat.tsv', sep = '\t', stringsAsFactors = F)
scaledFeats = predFeats # Scale features between 0 & 1
for(i in 1:ncol(scaledFeats)){
scaledFeats[,i] = (scaledFeats[,i] - min(scaledFeats[,i])) / (max(scaledFeats[,i]) - min(scaledFeats[,i]))
}
bwBSiteDists = distance(scaledFeats)
medPairwiseDist = median(bwBSiteDists[upper.tri(bwBSiteDists)])
clusLst = unique(bsResiDat$seqClust50)
# all(row.names(bsResiDat) == row.names(predFeats))
folds = 5
reps = 1
testReps = 10
default_mtry = round(sqrt(ncol(predFeats)), 0)
default_ntree = 2000
# tune.grid = expand.grid(.mtry=default_mtry)
half_mtry = round(0.5*default_mtry,0)
tune.grid <- expand.grid(.mtry= c(-half_mtry:half_mtry) + default_mtry)
#########################
# Train and validate based on current ligand class
# 5x CV training from sampling + LO(C)O validation
# Revamped sampling to limit similar negative examples
# Train with F2
# mtry within [sqrt(feature count) +/- 50%]
#########################
dirInd = as.integer(dirInd) # Subdirectory from which script was called to indicate which ligTag column (& which ligand) to train classifier for
lig = ligTags[,dirInd]
lig = lig[sample(x = 1:nrow(ligTags), size = nrow(ligTags), replace = F)] # Shuffle the labels of the training data
# Define dataframes to hold model results
trainOut = as.data.frame(matrix(0, nrow = length(clusLst), ncol = 7))
row.names(trainOut) = clusLst
colnames(trainOut) = c('mtry', 'kappa', 'recall', 'TP', 'TN', 'FP', 'FN')
testOut = as.data.frame(matrix(0, nrow = testReps * length(clusLst), ncol = 4))
for (j in 1:testReps){
row.names(testOut)[(1:length(clusLst)) + (length(clusLst) * (j-1))] = paste(clusLst, j, sep = '_')
}
colnames(testOut) = c('TP', 'TN', 'FP', 'FN')
clusBinding = rep(F, length(clusLst)) # Whether a cluster has any positive examples of binding with the current ligand/ligand class
for (j in (1:length(clusLst))){
clusBinding[j] = any(lig[bsResiDat$seqClust50 == clusLst[j]])
}
# sum(clusBinding)
testCases = clusLst[clusBinding] # Clusters with any binding occurrences to iteratively withhold for validation in LO(C)O validation
predictions = as.data.frame(matrix(nrow = length(row.names(bsResiDat)[bsResiDat$seqClust50 %in% testCases]), ncol = testReps))
row.names(predictions) = row.names(bsResiDat)[bsResiDat$seqClust50 %in% testCases]
featImp = as.data.frame(matrix(0, nrow = ncol(predFeats), ncol = length(testCases)))
row.names(featImp) = colnames(predFeats)
colnames(featImp) = testCases
for (j in (1:length(testCases))){
outClust = testCases[j]
cat("testing on clust #", outClust, '[',as.character(round(100*j/length(testCases), 2)),'% done ]\n')
trainingClusts = clusLst[! clusLst == outClust]
trainingClustBinding = clusBinding[! clusLst == outClust]
foldClusIDs = createFolds(y = trainingClustBinding, k = folds)
trainDat = sampleDiverseSitesByLig(clusterIDs = bsResiDat$seqClust50,
testClust = outClust,
featureSet = predFeats,
ligandTag = lig,
distThresh = medPairwiseDist,
scaledFeatureSet = scaledFeats)
for(n in 1:folds){
foldClusIDs[[n]] = (1:nrow(trainDat))[trainDat$clus %in% trainingClusts[foldClusIDs[[n]]]]
}
trainDat$clus <- NULL
# trainDat$bound = trainDat$bound[sample(x = 1:nrow(trainDat), size = nrow(trainDat), replace = F)] # Shuffle the labels of the training data
train.control = trainControl(index = foldClusIDs,
method = 'cv',
number = folds,
sampling = 'down',
summaryFunction = f2)
rfFit <- train(bound ~ .,
data = trainDat,
method = "rf",
trControl = train.control,
tuneGrid = tune.grid,
maximize = TRUE,
verbose = TRUE,
importance = TRUE,
ntree = default_ntree,
metric = "F2")
trainKappa = Kappa(rfFit$finalModel$confusion[1:2,1:2])$Unweighted[1]
trainRecall = 1 - rfFit$finalModel$confusion[2,3]
trainAcc = (rfFit$finalModel$confusion[1,1] + rfFit$finalModel$confusion[2,2]) / sum(rfFit$finalModel$confusion)
trainTag = row.names(trainOut) == outClust
trainOut$mtry[trainTag] = unname(rfFit$bestTune)[,1]
trainOut$kappa[trainTag] = trainKappa
trainOut$recall[trainTag] = trainRecall
trainOut$TP[trainTag] = rfFit$finalModel$confusion[2,2]
trainOut$TN[trainTag] = rfFit$finalModel$confusion[1,1]
trainOut$FP[trainTag] = rfFit$finalModel$confusion[1,2]
trainOut$FN[trainTag] = rfFit$finalModel$confusion[2,1]
featImp[, j] = rfFit$finalModel$importance[,4]
cat("train:\n\tRecall = ", trainRecall, "\n\tKappa = ", trainKappa,"\n\tAccuracy = ", trainAcc, '\n\tMtry = ', trainOut$mtry[trainTag], '\n\n')
for(m in 1:testReps){
inds = (1:nrow(predFeats))[bsResiDat$seqClust50 == outClust] # all the row indices matching the validation cluster
negInds = inds[! lig[inds]] # Indices of binding sites w/o ligand
posInds = inds[lig[inds]] # Indices of binding sites w/ ligand
outInds = pullDiverseCases(scaledData = scaledFeats, startInds = posInds, thresh = medPairwiseDist) # Diverse sample from examples of positive interactions
if (length(negInds > 0)){
outInds = pullDiverseCases(scaledData = scaledFeats, startInds = negInds, thresh = medPairwiseDist, prevSampled = outInds) # Diverse sample from examples of negative interactions
}
testDat = predFeats[outInds,]
testObs = factor(lig[outInds], levels = levels(trainDat$bound))
validate = predict(rfFit$finalModel, newdata = testDat, type = 'prob')
for(n in 1:nrow(validate)) {
bsName = row.names(validate)[n]
predictions[bsName, m] = validate[bsName,2]
}
TP = sum(validate[,2] >= 0.5 & testObs == "TRUE")
TN = sum(validate[,2] < 0.5 & testObs == "FALSE")
FN = sum(validate[,2] < 0.5 & testObs == "TRUE")
FP = sum(validate[,2] >= 0.5 & testObs == "FALSE")
randAcc = ((TN+FP)*(TN+FN) + (FN+TP)*(FP+TP)) / nrow(validate)^2
testAcc = (TP+TN)/(TP+TN+FP+FN)
testKappa = (testAcc - randAcc) / (1 - randAcc)
testRecall = TP / (TP + FN)
testTag = row.names(testOut) == paste(outClust, m, sep = '_')
testOut$TP[testTag] = TP
testOut$TN[testTag] = TN
testOut$FN[testTag] = FN
testOut$FP[testTag] = FP
cat("test:\n\tRecall = ", testRecall, "\n\tKappa = ", testKappa,"\n\tAccuracy = ", testAcc, '\n\n')
}
cat('__________________\n\n')
}
#########################
# output prelim stats and save training/validation performance files
#########################
ligColors = colfunc(ncol(ligTags))
OG_Dir = addSlash(OG_Dir)
cat("Ligand: ", colnames(ligTags)[dirInd], '\n\n')
trainOut = trainOut[as.character(testCases),]
testOut = testOut[gsub('_\\d*$', '', row.names(testOut)) %in% as.character(testCases),]
# Save out files
write.table(x = predictions, file = paste(OG_Dir,colnames(ligTags)[dirInd], '_outcomes.csv', sep = ''), quote = F, sep = ',')
# read.delim(file = paste(OG_Dir,colnames(ligTags)[dirInd], '_outcomes.csv', sep = ''), sep = ',', stringsAsFactors = F)
write.table(x = trainOut, file = paste(OG_Dir,colnames(ligTags)[dirInd], '_training.csv', sep = ''), quote = F, sep = ',')
write.table(x = testOut, file = paste(OG_Dir,colnames(ligTags)[dirInd], '_testing.csv', sep = ''), quote = F, sep = ',')
write.table(x = featImp, file = paste(OG_Dir,colnames(ligTags)[dirInd], '_features.csv', sep = ''), quote = F, sep = ',')
# Training
cat('Training performance\n')
print(apply(trainOut[,4:7], 2, mean))
print(apply(apply(trainOut[,4:7], 1, pCnt), 1, mean))
trainOut$f2 = 0
trainOut$prec = 0
for(i in 1:nrow(trainOut)){
r = trainOut$recall[i]
p = trainOut$TP[i] / (trainOut$TP[i] + trainOut$FP[i])
trainOut$prec[i] = p
trainOut$f2[i] = ((1+(2^2)) * p * r) / (2^2 * p + r)
# trainOut$f3[i] = ((1+(3^2)) * p * r) / (3^2 * p + r)
# trainOut$f4[i] = ((1+(4^2)) * p * r) / (4^2 * p + r)
}
pdf(file = paste(OG_Dir,colnames(ligTags)[dirInd], '_training_metrics.pdf', sep = ''),
width = 4, # The width of the plot in inches
height = 6) # The height of the plot in inches
boxplot(trainOut$kappa, trainOut$recall, trainOut$prec, trainOut$f2,
ylim = c(0, 1),
names = c('Kappa', 'Recall', 'Prec.', 'F2'),
main = paste(colnames(ligTags)[dirInd], 'Training', sep = ''),
col = ligColors[dirInd])
dev.off()
# Validation
cat('Validation performance\n')
outcomes = as.data.frame(matrix('', nrow = nrow(predictions), ncol = (1+testReps)))
row.names(outcomes) = row.names(predictions)
colnames(outcomes) = c("Obs", paste('rep',1:testReps, sep =''))
outcomes$Obs = ligTags[row.names(outcomes), dirInd]
for( j in 1:testReps){
outcomes[,j+1] = as.numeric(predictions[row.names(outcomes), j])
}
for (j in 1:testReps){
print(apply(testOut[(1:length(testCases)) + (length(testCases) * (j-1)),], 2, sum))
validationMetrics = getKPRFb(testOut[(1:length(testCases)) + (length(testCases) * (j-1)),])
cat(names(validationMetrics), '\n')
cat(unlist(validationMetrics), '\n')
}
testedOutcomes = outcomes[! is.na(outcomes[,(1 + testReps)]), c(1, (1+ testReps))]
pr = pr.curve(testedOutcomes[testedOutcomes$Obs == T, 2], testedOutcomes[testedOutcomes$Obs == F, 2], curve= T, rand.compute = T)
R = validationMetrics[['recall']]
P = validationMetrics[['precision']]
pdf(file = paste(OG_Dir,colnames(ligTags)[dirInd], '_examplePRcurve.pdf', sep = ''),
width = 6, # The width of the plot in inches
height = 6.25) # The height of the plot in inches
plot(pr$curve[,1:2], type = 'l', lwd = 4, col = ligColors[dirInd],
xlim = c(0,1), ylim = c(0,1),
xlab = 'Recall', ylab = 'Precision', main = paste('PR Curve - ', colnames(ligTags)[dirInd], '\nAUC = ', as.character(round(pr$auc.integral, digits = 5)), sep = ''))
abline(h = pr$rand$auc.integral, lty = 1, lwd = 2)
lines(x = c(R,R), y = c(-1,P), lty = 2)
lines(x = c(-1,R), y = c(P,P), lty = 2)
points(R,P, pch = 19)
dev.off()
|
/scripts/prediction/train_validate_random.R
|
permissive
|
demattox/lec_gly_binding
|
R
| false
| false
| 18,099
|
r
|
#!/usr/bin/env Rscript
library(randomForest)
library(reshape)
library(ggplot2)
library(caret)
library(MLmetrics)
library(philentropy)
library(vcd)
library(PRROC)
OG_Dir = getwd() # Original directory from which script was called
dirInd = strsplit(OG_Dir, split = '/')[[1]]
randInd = dirInd[length(dirInd)]
dirInd = dirInd[length(dirInd) -1]
homeDir = '/dartfs-hpc/rc/home/y/f002tsy/cbklab/Mattox/lec_gly_binding/'
# homeDir = '/Users/dmattox/cbk/lec_gly_binding/'
setwd(homeDir)
#######################
# Functions
#######################
pullDiverseCases <- function(scaledData, startInds, thresh, prevSampled = NA){
if (any(is.na(prevSampled))) {
# if no inds being passed in that were sampled from the positive class
if (length(startInds) == 1) {
out = startInds
} else if (length(startInds) == 2) {
if (suppressMessages(distance(scaledFeats[startInds,])) >= thresh) {
out = startInds # Two binding sites are greater than the threshold distance from each other, keep both
} else{
out = sample(startInds, size = 1) # Two binding sites are within the threshold distance from each other, pick one at random
}
} else {
cnter = 1
out = rep(0, length(startInds)) # Hold the set of diverse indices
}
} else{
# if inds are passed in from the positive class
out = c(prevSampled, rep(0, length(startInds)))
cnter = length(prevSampled) + 1
if (length(startInds) == 1) {
# One new neg site to compare to previously smapled positive sites
distMat = suppressMessages(distance(x = rbind(scaledData[out[out != 0], ], scaledData[startInds, ]))) # Find all pairwise distances b/w binding sites in cluster
if (is.matrix(distMat)){ # if more than one previously sampled binding site
distMat = distMat[1:sum(out != 0), -1 * (1:sum(out != 0))] # Get the top n rows of the distance matrix dropping the first n columns, where n = the number of indices already sampled
}
if (!any(distMat < thresh)) {
out[cnter] = startInds
}
}
}
if (any(out == 0)){
while( length(startInds) >= 2 ){
out[cnter] = sample(startInds, size = 1) # Sample an index randomly
cnter = cnter + 1 # increase the sample count
startInds = startInds[! startInds %in% out] # Drop sampled indices from vector of remaining indices
distMat = suppressMessages(distance(x = rbind(scaledData[out[out != 0],], scaledData[startInds,]))) # Find all pairwise distances b/w binding sites in cluster
distMat = distMat[1:sum(out != 0),-1*(1:sum(out != 0))] # Get the top n rows of the distance matrix dropping the first n columns, where n = the number of indices already sampled
if (is.matrix(distMat)){
distMat = apply(X = distMat, MARGIN = 2, FUN = min) # For each of the remaining binding sites, take the minimum pairwise distance to any sampled binding site
}
dropInds = startInds[distMat < thresh ]
startInds = startInds[! startInds %in% dropInds]
if(length(startInds) == 1){
out[cnter] = startInds
}
}
}
out = out[out != 0]
return(out)
}
sampleDiverseSitesByLig <- function(clusterIDs, testClust, featureSet, ligandTag, distThresh, scaledFeatureSet = featureSet){
# Sample diverse binding sites with ligand and without ligand [ligandTag] for each cluster in clusterIDs, except the cluster held out for LO(C)O validation indicated by testClust
# Samples binding sites from the specified feature set, calculates Euclidean distance b/w binding sites from scaledFeatureSet
# Binding sites sampled randomly if Euc. distance to any previously sampled binding sites is greater than distThresh (median pariwise distance between all binding sites)
# Drop the excluded cluster
uniClusts = unique(clusterIDs)
uniClusts = uniClusts[ ! uniClusts %in% testClust]
dat = as.data.frame(matrix(0, nrow = nrow(featureSet), ncol = ncol(featureSet)))
colnames(dat) = colnames(predFeats)
dat$bound = F
dat$clus = 0
j = 1 # index for writing to returned dataframe (dat)
for (i in 1:length(uniClusts)) {
inds = (1:nrow(featureSet))[clusterIDs == uniClusts[i]] # all the row indices matching a specific cluster
negInds = inds[! ligandTag[inds]] # Indices of binding sites w/o ligand
posInds = inds[ligandTag[inds]] # Indices of binding sites w/ ligand
if (length(posInds) > 0){
outInds = pullDiverseCases(scaledData = scaledFeatureSet, startInds = posInds, thresh = distThresh)
} else {
outInds = NA
}
if (length(negInds > 0)){
outInds = pullDiverseCases(scaledData = scaledFeatureSet, startInds = negInds, thresh = distThresh, prevSampled = outInds)
}
dat[(j:(j+length(outInds) - 1)), (1:ncol(predFeats))] = predFeats[outInds, ] # set feature values for representative binding sites
dat$bound[(j:(j+length(outInds) - 1))] = ligandTag[outInds] # Set bound variable
dat$clus[(j:(j+length(outInds) - 1))] = uniClusts[i] # Set cluster ID
j = j + length(outInds)
}
dat = dat[-1*(j:nrow(dat)), ]
dat$bound = as.factor(dat$bound)
return(dat)
}
f2 <- function (data, lev = NULL, model = NULL, beta = 2) {
precision <- posPredValue(data$pred, data$obs, positive = "TRUE")
recall <- sensitivity(data$pred, data$obs, postive = "TRUE")
f2_val <- ((1 + beta^2) * precision * recall) / (beta^2 * precision + recall)
names(f2_val) <- c("F2")
f2_val
}
# f3 <- function (data, lev = NULL, model = NULL, beta = 3) {
# precision <- posPredValue(data$pred, data$obs, positive = "TRUE")
# recall <- sensitivity(data$pred, data$obs, postive = "TRUE")
# f3_val <- ((1 + beta^2) * precision * recall) / (beta^2 * precision + recall)
# names(f3_val) <- c("F3")
# f3_val
# }
pCnt <- function(x){
return(x/sum(x))
}
getKPRFb <- function(conMatDF){
# conMatDF has rows of different confusion matrices, with the columns ordered as TP, TN, FP, FN
# sums each column and finds performance metrics
f2TestCon = apply(X = conMatDF, MARGIN = 2, FUN = sum)
TP = f2TestCon[[1]]
TN = f2TestCon[[2]]
FP = f2TestCon[[3]]
FN = f2TestCon[[4]]
f2_validationRecall = TP / ( TP + FN)
f2_validationPrec = TP / (TP + FP)
f2_validationF2 = ((1+(2^2)) * f2_validationPrec * f2_validationRecall) / (2^2 * f2_validationPrec + f2_validationRecall)
f3score = ((1+(3^2)) * f2_validationPrec * f2_validationRecall) / (3^2 * f2_validationPrec + f2_validationRecall)
f4score = ((1+(4^2)) * f2_validationPrec * f2_validationRecall) / (4^2 * f2_validationPrec + f2_validationRecall)
randAcc = ((TN+FP)*(TN+FN) + (FN+TP)*(FP+TP)) / sum(c(TP + TN + FP + FN))^2
testAcc = (TP+TN)/(TP+TN+FP+FN)
f2_validationKappa = (testAcc - randAcc) / (1 - randAcc)
return(list(kappa = f2_validationKappa,
recall = f2_validationRecall,
precision = f2_validationPrec,
F2 = f2_validationF2,
F3 = f3score,
F4 = f4score))
}
addSlash = function(string) {
# Adds a trailing forward slash to the end of a string (ex path to a driectory) if it is not present
if (substr(x = string,
start = nchar(string),
stop = nchar(string)) != "/") {
string = paste(string, "/", sep = "")
}
return(string)
}
colfunc = colorRampPalette(c("red","goldenrod","forestgreen","royalblue","darkviolet"))
#######################
# Read in data and set up models
#######################
set.seed(as.integer(randInd))
# Read in data
ligTags = read.delim(file = './analysis/training/data_in/ligTags.tsv', sep = '\t', stringsAsFactors = F)
predFeats = read.delim(file = './analysis/training/data_in/predFeats.csv', sep = ',', stringsAsFactors = F)
bsResiDat = read.delim(file = './analysis/training/data_in/bsResiDat.tsv', sep = '\t', stringsAsFactors = F)
scaledFeats = predFeats # Scale features between 0 & 1
for(i in 1:ncol(scaledFeats)){
scaledFeats[,i] = (scaledFeats[,i] - min(scaledFeats[,i])) / (max(scaledFeats[,i]) - min(scaledFeats[,i]))
}
bwBSiteDists = distance(scaledFeats)
medPairwiseDist = median(bwBSiteDists[upper.tri(bwBSiteDists)])
clusLst = unique(bsResiDat$seqClust50)
# all(row.names(bsResiDat) == row.names(predFeats))
folds = 5
reps = 1
testReps = 10
default_mtry = round(sqrt(ncol(predFeats)), 0)
default_ntree = 2000
# tune.grid = expand.grid(.mtry=default_mtry)
half_mtry = round(0.5*default_mtry,0)
tune.grid <- expand.grid(.mtry= c(-half_mtry:half_mtry) + default_mtry)
#########################
# Train and validate based on current ligand class
# 5x CV training from sampling + LO(C)O validation
# Revamped sampling to limit similar negative examples
# Train with F2
# mtry within [sqrt(feature count) +/- 50%]
#########################
dirInd = as.integer(dirInd) # Subdirectory from which script was called to indicate which ligTag column (& which ligand) to train classifier for
lig = ligTags[,dirInd]
lig = lig[sample(x = 1:nrow(ligTags), size = nrow(ligTags), replace = F)] # Shuffle the labels of the training data
# Define dataframes to hold model results
trainOut = as.data.frame(matrix(0, nrow = length(clusLst), ncol = 7))
row.names(trainOut) = clusLst
colnames(trainOut) = c('mtry', 'kappa', 'recall', 'TP', 'TN', 'FP', 'FN')
testOut = as.data.frame(matrix(0, nrow = testReps * length(clusLst), ncol = 4))
for (j in 1:testReps){
row.names(testOut)[(1:length(clusLst)) + (length(clusLst) * (j-1))] = paste(clusLst, j, sep = '_')
}
colnames(testOut) = c('TP', 'TN', 'FP', 'FN')
clusBinding = rep(F, length(clusLst)) # Whether a cluster has any positive examples of binding with the current ligand/ligand class
for (j in (1:length(clusLst))){
clusBinding[j] = any(lig[bsResiDat$seqClust50 == clusLst[j]])
}
# sum(clusBinding)
testCases = clusLst[clusBinding] # Clusters with any binding occurrences to iteratively withhold for validation in LO(C)O validation
predictions = as.data.frame(matrix(nrow = length(row.names(bsResiDat)[bsResiDat$seqClust50 %in% testCases]), ncol = testReps))
row.names(predictions) = row.names(bsResiDat)[bsResiDat$seqClust50 %in% testCases]
featImp = as.data.frame(matrix(0, nrow = ncol(predFeats), ncol = length(testCases)))
row.names(featImp) = colnames(predFeats)
colnames(featImp) = testCases
for (j in (1:length(testCases))){
outClust = testCases[j]
cat("testing on clust #", outClust, '[',as.character(round(100*j/length(testCases), 2)),'% done ]\n')
trainingClusts = clusLst[! clusLst == outClust]
trainingClustBinding = clusBinding[! clusLst == outClust]
foldClusIDs = createFolds(y = trainingClustBinding, k = folds)
trainDat = sampleDiverseSitesByLig(clusterIDs = bsResiDat$seqClust50,
testClust = outClust,
featureSet = predFeats,
ligandTag = lig,
distThresh = medPairwiseDist,
scaledFeatureSet = scaledFeats)
for(n in 1:folds){
foldClusIDs[[n]] = (1:nrow(trainDat))[trainDat$clus %in% trainingClusts[foldClusIDs[[n]]]]
}
trainDat$clus <- NULL
# trainDat$bound = trainDat$bound[sample(x = 1:nrow(trainDat), size = nrow(trainDat), replace = F)] # Shuffle the labels of the training data
train.control = trainControl(index = foldClusIDs,
method = 'cv',
number = folds,
sampling = 'down',
summaryFunction = f2)
rfFit <- train(bound ~ .,
data = trainDat,
method = "rf",
trControl = train.control,
tuneGrid = tune.grid,
maximize = TRUE,
verbose = TRUE,
importance = TRUE,
ntree = default_ntree,
metric = "F2")
trainKappa = Kappa(rfFit$finalModel$confusion[1:2,1:2])$Unweighted[1]
trainRecall = 1 - rfFit$finalModel$confusion[2,3]
trainAcc = (rfFit$finalModel$confusion[1,1] + rfFit$finalModel$confusion[2,2]) / sum(rfFit$finalModel$confusion)
trainTag = row.names(trainOut) == outClust
trainOut$mtry[trainTag] = unname(rfFit$bestTune)[,1]
trainOut$kappa[trainTag] = trainKappa
trainOut$recall[trainTag] = trainRecall
trainOut$TP[trainTag] = rfFit$finalModel$confusion[2,2]
trainOut$TN[trainTag] = rfFit$finalModel$confusion[1,1]
trainOut$FP[trainTag] = rfFit$finalModel$confusion[1,2]
trainOut$FN[trainTag] = rfFit$finalModel$confusion[2,1]
featImp[, j] = rfFit$finalModel$importance[,4]
cat("train:\n\tRecall = ", trainRecall, "\n\tKappa = ", trainKappa,"\n\tAccuracy = ", trainAcc, '\n\tMtry = ', trainOut$mtry[trainTag], '\n\n')
for(m in 1:testReps){
inds = (1:nrow(predFeats))[bsResiDat$seqClust50 == outClust] # all the row indices matching the validation cluster
negInds = inds[! lig[inds]] # Indices of binding sites w/o ligand
posInds = inds[lig[inds]] # Indices of binding sites w/ ligand
outInds = pullDiverseCases(scaledData = scaledFeats, startInds = posInds, thresh = medPairwiseDist) # Diverse sample from examples of positive interactions
if (length(negInds > 0)){
outInds = pullDiverseCases(scaledData = scaledFeats, startInds = negInds, thresh = medPairwiseDist, prevSampled = outInds) # Diverse sample from examples of negative interactions
}
testDat = predFeats[outInds,]
testObs = factor(lig[outInds], levels = levels(trainDat$bound))
validate = predict(rfFit$finalModel, newdata = testDat, type = 'prob')
for(n in 1:nrow(validate)) {
bsName = row.names(validate)[n]
predictions[bsName, m] = validate[bsName,2]
}
TP = sum(validate[,2] >= 0.5 & testObs == "TRUE")
TN = sum(validate[,2] < 0.5 & testObs == "FALSE")
FN = sum(validate[,2] < 0.5 & testObs == "TRUE")
FP = sum(validate[,2] >= 0.5 & testObs == "FALSE")
randAcc = ((TN+FP)*(TN+FN) + (FN+TP)*(FP+TP)) / nrow(validate)^2
testAcc = (TP+TN)/(TP+TN+FP+FN)
testKappa = (testAcc - randAcc) / (1 - randAcc)
testRecall = TP / (TP + FN)
testTag = row.names(testOut) == paste(outClust, m, sep = '_')
testOut$TP[testTag] = TP
testOut$TN[testTag] = TN
testOut$FN[testTag] = FN
testOut$FP[testTag] = FP
cat("test:\n\tRecall = ", testRecall, "\n\tKappa = ", testKappa,"\n\tAccuracy = ", testAcc, '\n\n')
}
cat('__________________\n\n')
}
#########################
# output prelim stats and save training/validation performance files
#########################
ligColors = colfunc(ncol(ligTags))
OG_Dir = addSlash(OG_Dir)
cat("Ligand: ", colnames(ligTags)[dirInd], '\n\n')
trainOut = trainOut[as.character(testCases),]
testOut = testOut[gsub('_\\d*$', '', row.names(testOut)) %in% as.character(testCases),]
# Save out files
write.table(x = predictions, file = paste(OG_Dir,colnames(ligTags)[dirInd], '_outcomes.csv', sep = ''), quote = F, sep = ',')
# read.delim(file = paste(OG_Dir,colnames(ligTags)[dirInd], '_outcomes.csv', sep = ''), sep = ',', stringsAsFactors = F)
write.table(x = trainOut, file = paste(OG_Dir,colnames(ligTags)[dirInd], '_training.csv', sep = ''), quote = F, sep = ',')
write.table(x = testOut, file = paste(OG_Dir,colnames(ligTags)[dirInd], '_testing.csv', sep = ''), quote = F, sep = ',')
write.table(x = featImp, file = paste(OG_Dir,colnames(ligTags)[dirInd], '_features.csv', sep = ''), quote = F, sep = ',')
# Training
cat('Training performance\n')
print(apply(trainOut[,4:7], 2, mean))
print(apply(apply(trainOut[,4:7], 1, pCnt), 1, mean))
trainOut$f2 = 0
trainOut$prec = 0
for(i in 1:nrow(trainOut)){
r = trainOut$recall[i]
p = trainOut$TP[i] / (trainOut$TP[i] + trainOut$FP[i])
trainOut$prec[i] = p
trainOut$f2[i] = ((1+(2^2)) * p * r) / (2^2 * p + r)
# trainOut$f3[i] = ((1+(3^2)) * p * r) / (3^2 * p + r)
# trainOut$f4[i] = ((1+(4^2)) * p * r) / (4^2 * p + r)
}
pdf(file = paste(OG_Dir,colnames(ligTags)[dirInd], '_training_metrics.pdf', sep = ''),
width = 4, # The width of the plot in inches
height = 6) # The height of the plot in inches
boxplot(trainOut$kappa, trainOut$recall, trainOut$prec, trainOut$f2,
ylim = c(0, 1),
names = c('Kappa', 'Recall', 'Prec.', 'F2'),
main = paste(colnames(ligTags)[dirInd], 'Training', sep = ''),
col = ligColors[dirInd])
dev.off()
# Validation
cat('Validation performance\n')
outcomes = as.data.frame(matrix('', nrow = nrow(predictions), ncol = (1+testReps)))
row.names(outcomes) = row.names(predictions)
colnames(outcomes) = c("Obs", paste('rep',1:testReps, sep =''))
outcomes$Obs = ligTags[row.names(outcomes), dirInd]
for( j in 1:testReps){
outcomes[,j+1] = as.numeric(predictions[row.names(outcomes), j])
}
for (j in 1:testReps){
print(apply(testOut[(1:length(testCases)) + (length(testCases) * (j-1)),], 2, sum))
validationMetrics = getKPRFb(testOut[(1:length(testCases)) + (length(testCases) * (j-1)),])
cat(names(validationMetrics), '\n')
cat(unlist(validationMetrics), '\n')
}
testedOutcomes = outcomes[! is.na(outcomes[,(1 + testReps)]), c(1, (1+ testReps))]
pr = pr.curve(testedOutcomes[testedOutcomes$Obs == T, 2], testedOutcomes[testedOutcomes$Obs == F, 2], curve= T, rand.compute = T)
R = validationMetrics[['recall']]
P = validationMetrics[['precision']]
pdf(file = paste(OG_Dir,colnames(ligTags)[dirInd], '_examplePRcurve.pdf', sep = ''),
width = 6, # The width of the plot in inches
height = 6.25) # The height of the plot in inches
plot(pr$curve[,1:2], type = 'l', lwd = 4, col = ligColors[dirInd],
xlim = c(0,1), ylim = c(0,1),
xlab = 'Recall', ylab = 'Precision', main = paste('PR Curve - ', colnames(ligTags)[dirInd], '\nAUC = ', as.character(round(pr$auc.integral, digits = 5)), sep = ''))
abline(h = pr$rand$auc.integral, lty = 1, lwd = 2)
lines(x = c(R,R), y = c(-1,P), lty = 2)
lines(x = c(-1,R), y = c(P,P), lty = 2)
points(R,P, pch = 19)
dev.off()
|
#' Set credentials for an ODBC data source
#'
#' @details The credentials for the DSN are saved as environmental variables,
#' using the R function \code{Sys.setenv()}. The user name for the DSN is
#'
#'
#' @param dsn The name of the data source name
#' @param usr The user name for the connection
#' @param pwd The password for the connection
#'
#' @examples
#' set_odbc_credential("whiskey", "foo", "bar")
#' Sys.getenv(c("WHISKEY_USR", "WHISKEY_PWD"))
#' unset_odbc_credential("whiskey")
#' Sys.getenv(c("WHISKEY_USR", "WHISKEY_PWD"), unset = NA)
#' @export
set_odbc_credential <- function(dsn, usr, pwd) {
# dsn <- "whiskey"; usr <- "foo"; pwd <- "bar"
env_usr <- paste0(toupper(dsn), "_USR")
env_pwd <- paste0(toupper(dsn), "_PWD")
eval(parse(text = paste0("Sys.setenv(", env_usr, " = '", usr, "' , ",
env_pwd, " = '", pwd, "')")))
}
#' @describeIn set_odbc_credential Unset the ODBC credentials
#' @export
unset_odbc_credential <- function(dsn) {
env_usr <- paste0(toupper(dsn), "_USR")
env_pwd <- paste0(toupper(dsn), "_PWD")
Sys.unsetenv(c(env_usr, env_pwd))
}
|
/R/set_odbc_credential.R
|
no_license
|
NateByers/etljobs
|
R
| false
| false
| 1,112
|
r
|
#' Set credentials for an ODBC data source
#'
#' @details The credentials for the DSN are saved as environmental variables,
#' using the R function \code{Sys.setenv()}. The user name for the DSN is
#'
#'
#' @param dsn The name of the data source name
#' @param usr The user name for the connection
#' @param pwd The password for the connection
#'
#' @examples
#' set_odbc_credential("whiskey", "foo", "bar")
#' Sys.getenv(c("WHISKEY_USR", "WHISKEY_PWD"))
#' unset_odbc_credential("whiskey")
#' Sys.getenv(c("WHISKEY_USR", "WHISKEY_PWD"), unset = NA)
#' @export
set_odbc_credential <- function(dsn, usr, pwd) {
# dsn <- "whiskey"; usr <- "foo"; pwd <- "bar"
env_usr <- paste0(toupper(dsn), "_USR")
env_pwd <- paste0(toupper(dsn), "_PWD")
eval(parse(text = paste0("Sys.setenv(", env_usr, " = '", usr, "' , ",
env_pwd, " = '", pwd, "')")))
}
#' @describeIn set_odbc_credential Unset the ODBC credentials
#' @export
unset_odbc_credential <- function(dsn) {
env_usr <- paste0(toupper(dsn), "_USR")
env_pwd <- paste0(toupper(dsn), "_PWD")
Sys.unsetenv(c(env_usr, env_pwd))
}
|
## ----setup, include = FALSE----------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>",
out.width = "100%"
)
## ----example1------------------------------------------------------------
library(discreteQ)
set.seed(1234)
outcome <- rpois(100, 3)
results1 <- discreteQ(outcome)
## ----example2------------------------------------------------------------
results1
## ----example3, fig.width=10, fig.height=10-------------------------------
plot(results1)
## ----example4------------------------------------------------------------
summary(results1)
## ----example5------------------------------------------------------------
set.seed(1234)
treatment <- c(rep(0,1000), rep(1,1000))
reg <- rbinom(2000, 1, 0.4+treatment*0.2)
outcome <- rpois(2000, lambda = 2+4*reg)
## ----example6, fig.width=10, fig.height=10-------------------------------
results2 <- discreteQ(outcome, treatment)
plot(results2, main="Difference between the unconditional quantile functions")
## ----example7------------------------------------------------------------
results3 <- discreteQ(outcome, treatment, cbind(1, reg))
plot(results3)
## ----example8, fig.width=10, fig.height=10-------------------------------
plot(results3, which="Q0")
plot(results3, which="Q1", add=TRUE, shift=0.2, col.l="dark green", col.b="light green")
## ----example9, fig.width=10, fig.height=10-------------------------------
results4 <- discreteQ(outcome, treatment, cbind(1, reg), decomposition=TRUE)
plot(results4)
## ----example10, fig.width=10, fig.height=10------------------------------
set.seed(1234)
outcome <- rnorm(500, 3)
results5 <- discreteQ(outcome, ys = Inf)
plot(results5, support = "continuous")
## ----example11-----------------------------------------------------------
set.seed(1234)
treatment <- c(rep(0,1000), rep(1,1000))
reg <- rbinom(2000, 1, 0.4+treatment*0.2)
outcome <- rpois(2000, lambda = 2+4*reg)
#Without parallel computing
set.seed(42)
system.time(results6 <- discreteQ(outcome, treatment, cbind(1,reg)))
my_cl <- parallel::makePSOCKcluster(2)
#With parallel computing
set.seed(42)
system.time(results7 <- discreteQ(outcome, treatment, cbind(1,reg), cl = my_cl ))
## ----example12-----------------------------------------------------------
#Results with and without parallel computing are equal
all.equal(results6, results7)
## ----example13-----------------------------------------------------------
#95% confidence bands (default value)
results8 <- discreteQ(outcome, treatment, cbind(1,reg), return.boot = TRUE)
#90% confidence bands
results9 <- discreteQ(outcome, treatment, cbind(1,reg), old.res = results8, alpha = 0.1)
|
/inst/doc/discreteQ.R
|
permissive
|
bmelly/discreteQ
|
R
| false
| false
| 2,672
|
r
|
## ----setup, include = FALSE----------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>",
out.width = "100%"
)
## ----example1------------------------------------------------------------
library(discreteQ)
set.seed(1234)
outcome <- rpois(100, 3)
results1 <- discreteQ(outcome)
## ----example2------------------------------------------------------------
results1
## ----example3, fig.width=10, fig.height=10-------------------------------
plot(results1)
## ----example4------------------------------------------------------------
summary(results1)
## ----example5------------------------------------------------------------
set.seed(1234)
treatment <- c(rep(0,1000), rep(1,1000))
reg <- rbinom(2000, 1, 0.4+treatment*0.2)
outcome <- rpois(2000, lambda = 2+4*reg)
## ----example6, fig.width=10, fig.height=10-------------------------------
results2 <- discreteQ(outcome, treatment)
plot(results2, main="Difference between the unconditional quantile functions")
## ----example7------------------------------------------------------------
results3 <- discreteQ(outcome, treatment, cbind(1, reg))
plot(results3)
## ----example8, fig.width=10, fig.height=10-------------------------------
plot(results3, which="Q0")
plot(results3, which="Q1", add=TRUE, shift=0.2, col.l="dark green", col.b="light green")
## ----example9, fig.width=10, fig.height=10-------------------------------
results4 <- discreteQ(outcome, treatment, cbind(1, reg), decomposition=TRUE)
plot(results4)
## ----example10, fig.width=10, fig.height=10------------------------------
set.seed(1234)
outcome <- rnorm(500, 3)
results5 <- discreteQ(outcome, ys = Inf)
plot(results5, support = "continuous")
## ----example11-----------------------------------------------------------
set.seed(1234)
treatment <- c(rep(0,1000), rep(1,1000))
reg <- rbinom(2000, 1, 0.4+treatment*0.2)
outcome <- rpois(2000, lambda = 2+4*reg)
#Without parallel computing
set.seed(42)
system.time(results6 <- discreteQ(outcome, treatment, cbind(1,reg)))
my_cl <- parallel::makePSOCKcluster(2)
#With parallel computing
set.seed(42)
system.time(results7 <- discreteQ(outcome, treatment, cbind(1,reg), cl = my_cl ))
## ----example12-----------------------------------------------------------
#Results with and without parallel computing are equal
all.equal(results6, results7)
## ----example13-----------------------------------------------------------
#95% confidence bands (default value)
results8 <- discreteQ(outcome, treatment, cbind(1,reg), return.boot = TRUE)
#90% confidence bands
results9 <- discreteQ(outcome, treatment, cbind(1,reg), old.res = results8, alpha = 0.1)
|
Instructions --
Add both the year (1980) and director (Stanley Kubrick) to shining_list again. Try to do it in a one-liner this time, and save the result
in a new variable, shining_list_ext.
Again, have a look at the structure of shining_list_ext.
---------------
# shining_list is already defined in the workspace
shining_list
# Add both the year and director to shining_list: shining_list_ext
shining_list_ext <- c(shining_list,
year = 1980,
director = 'Stanley Kubrick')
# Have a look at the structure of shining_list_ext
str(shining_list_ext)
|
/LIST/LAB2/L5.r
|
no_license
|
jabhij/DAT204x_R_DataScience
|
R
| false
| false
| 596
|
r
|
Instructions --
Add both the year (1980) and director (Stanley Kubrick) to shining_list again. Try to do it in a one-liner this time, and save the result
in a new variable, shining_list_ext.
Again, have a look at the structure of shining_list_ext.
---------------
# shining_list is already defined in the workspace
shining_list
# Add both the year and director to shining_list: shining_list_ext
shining_list_ext <- c(shining_list,
year = 1980,
director = 'Stanley Kubrick')
# Have a look at the structure of shining_list_ext
str(shining_list_ext)
|
# 4.6.5 K-Nearest Neighbors
library(class)
attach(Smarket)
# Split data for train and test
train = (Year < 2005)
train.X = cbind(Lag1, Lag2)[train,] # Train predictors
test.X = cbind(Lag1, Lag2)[!train,] # Test predictors
train.Direction = Direction[train] # Direction labels for train data
# Predict movement with one neighbor
set.seed(1)
knn.pred = knn(train.X, test.X, train.Direction, k=1)
table(knn.pred, Direction.2005) # Observe 50% correct observations
# Predict with 3 neighbors
knn.pred = knn(train.X, test.X, train.Direction, k=3)
table(knn.pred, Direction.2005)
mean(knn.pred == Direction.2005) # 53.6% though still not better than QDA
|
/ch4-classification/4.R-classification-in-R/k-nearest-neighbors/4.6.5-k-nearest-neighbors.R
|
no_license
|
AntonioPelayo/stanford-statistical-learning
|
R
| false
| false
| 671
|
r
|
# 4.6.5 K-Nearest Neighbors
library(class)
attach(Smarket)
# Split data for train and test
train = (Year < 2005)
train.X = cbind(Lag1, Lag2)[train,] # Train predictors
test.X = cbind(Lag1, Lag2)[!train,] # Test predictors
train.Direction = Direction[train] # Direction labels for train data
# Predict movement with one neighbor
set.seed(1)
knn.pred = knn(train.X, test.X, train.Direction, k=1)
table(knn.pred, Direction.2005) # Observe 50% correct observations
# Predict with 3 neighbors
knn.pred = knn(train.X, test.X, train.Direction, k=3)
table(knn.pred, Direction.2005)
mean(knn.pred == Direction.2005) # 53.6% though still not better than QDA
|
library(ritis)
### Name: vernacular_languages
### Title: Provides a list of the unique languages used in the vernacular
### table.
### Aliases: vernacular_languages
### ** Examples
## Not run:
##D vernacular_languages()
## End(Not run)
|
/data/genthat_extracted_code/ritis/examples/vernacular_languages.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 246
|
r
|
library(ritis)
### Name: vernacular_languages
### Title: Provides a list of the unique languages used in the vernacular
### table.
### Aliases: vernacular_languages
### ** Examples
## Not run:
##D vernacular_languages()
## End(Not run)
|
\name{summary.FPDclustering}
\alias{summary.FPDclustering}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Summary for FPDclusteringt Objects
}
\description{
Number of elements per cluster.
}
\usage{
\method{summary}{FPDclustering}(object, ... )
}
\arguments{
\item{object}{
an object of class FPDclustering
}
\item{...}{Additional parameters for the function paris}
}
\author{
Cristina Tortora
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
|
/man/summary.FPDclustering.Rd
|
no_license
|
cran/FPDclustering
|
R
| false
| false
| 498
|
rd
|
\name{summary.FPDclustering}
\alias{summary.FPDclustering}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Summary for FPDclusteringt Objects
}
\description{
Number of elements per cluster.
}
\usage{
\method{summary}{FPDclustering}(object, ... )
}
\arguments{
\item{object}{
an object of class FPDclustering
}
\item{...}{Additional parameters for the function paris}
}
\author{
Cristina Tortora
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
|
stiti ce este o " cotitura radicala " ?
e o miscare hotarita in urma careia elanul si dorinta de schimbare duc un om , o echipa , un partid sau o tara exact in pozitia in care erau si inainte .
nici nea Nicu , atunci cind zicea : " Tovarasi , sa facem o cotitura radicala in munca noastra " , nu - si imagina ca noi o vom face atit de virtos incit sa ajungem la situatia de la care am plecat .
asa si taranistii !
se tot aduna , se tot reunesc si tot fac planuri de relansare a partidului pina cind se trezesc la aceeasi antipatie de care se " bucura " de la o vreme incoace .
sedinta de alaltaieri a reprezentat si ea o " cotitura radicala " in stilul celor de pe vremuri .
partidul lui Maniu si Mihalache pare blestemat .
nu reuseste sa se desprinda din virtejul care il trage vertiginos sub pragul electoral .
taranistii nu inteleg ce se intimpla , nu sint capabili sa se detaseze de situatia critica pe care o traverseaza .
chiar n - au umor .
isi imagineaza ca ei ar putea sustine un prezidentiabil care sa si cistige .
fara sa se gindeasca macar o secunda , ei s - au grabit , prin Ion Diaconescu si la sedinta de alaltaieri , sa sprijine candidatura lui Mugur Isarescu .
din acest gest se poate vedea cit de confuza e gindirea oamenilor de aici .
inca nu si - au dat seama ca populatia percepe PNTCD ca principal responsabil al neimplinirilor acestei guvernari .
pur si simplu nu inteleg ca sustinerea lor are valoarea unui handicap pe care ei il ofera ca pe o floare , ca pe un cadou de pret .
taranistii sint in situatia de a nu pricepe ca si daca l - ar sustine pe Iuliu Maniu sau pe Nicolae Titulescu , in conditiile Romaniei de azi n - ar reusi decit sa le asigure acestora infringerea .
daca ar fi sprijiniti de taranisti , si Tony Blair sau Bill Clinton ar pierde din start orice fel de alegeri in Romania .
n - as vrea sa se inteleaga ca aceste pareri au la baza o furie oarba sau o anume antipatie fata de PNTCD .
dar prestatia din ultimii ani , in loc sa duca la o perceptie pozitiva , a transferat toata raspunderea guvernarii pe umerii sai .
acest fapt e deja evident .
e dureros , e neplacut , dar nu e fatal .
oameni si institutii s - au aflat in situatii mult mai nenorocite . Dar au iesit la liman .
prima conditie pentru a putea sa te salvezi este sa descoperi unde ai gresit si sa dai semne ca vrei sa te schimbi .
or , taranistii se poarta de parca ar fi pe primul loc in sondajele de opinie , de parca populatia ar muri de dragul lor .
ei nu pricep ca sint totusi responsabili ca au dat Romaniei doi premieri de paie ( Victor Ciorbea si Radu Vasile ) si ca sint condusi de un om depasit de situatie ( Ion Diaconescu ) .
venerabilul presedinte taranist nu - si da seama ca a devenit subiect umoristic , ca si el este tot un fel de Ion Iliescu pe partea cealalta . Ca atunci cind il acuza pe liderul PDSR de vrute si nevrute ar trebui sa se priveasca mai intii in oglinda si apoi sa mearga sa se ia de mina .
biografiile lor sint net diferite , dar conditia de acum ii apropie izbitor .
din ratiuni care tin de birocratia statutului si de dificultatea convocarii unui congres , Ion Diaconescu isi amina retragerea ( ca si Ion Iliescu ) .
fara sa - si dea seama , octogenarul politician pune plumb in picioarele propriului partid .
orice incercare de iesire din criza nu face decit sa o adinceasca .
probabil ca multi oameni din preajma sa ii picura in ureche ideea amagitoare ca fara el urmeaza sfirsitul lumii , ca taranistii mai tineri se vor sfisiia intre ei , si partidul va avea de suferit .
or , lucrurile stau exact pe dos .
cu un presedinte sifonat de prestatia politica din ultimii trei ani , PNTCD nu poate sustine convingator pe absolut nimeni .
sprijinul sau , in loc sa faca bine , ingroapa sigur .
fara un semn de trezire , de schimbare , de curatare , PNTCD se conserva , dar facindu - se din ce in ce mai mic , tot mai mic , ca pupaza care se pierde in zare .
retragerea lui Ion Diaconescu ar fi un prim - semn ca acest partid istoric mai are rezerve de luciditate si bun simt .
alaltaieri , Ion Diaconescu a evitat aceasta miscare binefacatoare pentru partidul sau .
fiindca foarte putini au curajul sa i - o spuna , nu ne ramine decit sa scriem in acest colt de pagina o propozitie dureroasa .
domnule Diaconescu , ar cam fi vremea !
|
/data/Newspapers/2000.07.28.editorial.28595.0398.r
|
no_license
|
narcis96/decrypting-alpha
|
R
| false
| false
| 4,316
|
r
|
stiti ce este o " cotitura radicala " ?
e o miscare hotarita in urma careia elanul si dorinta de schimbare duc un om , o echipa , un partid sau o tara exact in pozitia in care erau si inainte .
nici nea Nicu , atunci cind zicea : " Tovarasi , sa facem o cotitura radicala in munca noastra " , nu - si imagina ca noi o vom face atit de virtos incit sa ajungem la situatia de la care am plecat .
asa si taranistii !
se tot aduna , se tot reunesc si tot fac planuri de relansare a partidului pina cind se trezesc la aceeasi antipatie de care se " bucura " de la o vreme incoace .
sedinta de alaltaieri a reprezentat si ea o " cotitura radicala " in stilul celor de pe vremuri .
partidul lui Maniu si Mihalache pare blestemat .
nu reuseste sa se desprinda din virtejul care il trage vertiginos sub pragul electoral .
taranistii nu inteleg ce se intimpla , nu sint capabili sa se detaseze de situatia critica pe care o traverseaza .
chiar n - au umor .
isi imagineaza ca ei ar putea sustine un prezidentiabil care sa si cistige .
fara sa se gindeasca macar o secunda , ei s - au grabit , prin Ion Diaconescu si la sedinta de alaltaieri , sa sprijine candidatura lui Mugur Isarescu .
din acest gest se poate vedea cit de confuza e gindirea oamenilor de aici .
inca nu si - au dat seama ca populatia percepe PNTCD ca principal responsabil al neimplinirilor acestei guvernari .
pur si simplu nu inteleg ca sustinerea lor are valoarea unui handicap pe care ei il ofera ca pe o floare , ca pe un cadou de pret .
taranistii sint in situatia de a nu pricepe ca si daca l - ar sustine pe Iuliu Maniu sau pe Nicolae Titulescu , in conditiile Romaniei de azi n - ar reusi decit sa le asigure acestora infringerea .
daca ar fi sprijiniti de taranisti , si Tony Blair sau Bill Clinton ar pierde din start orice fel de alegeri in Romania .
n - as vrea sa se inteleaga ca aceste pareri au la baza o furie oarba sau o anume antipatie fata de PNTCD .
dar prestatia din ultimii ani , in loc sa duca la o perceptie pozitiva , a transferat toata raspunderea guvernarii pe umerii sai .
acest fapt e deja evident .
e dureros , e neplacut , dar nu e fatal .
oameni si institutii s - au aflat in situatii mult mai nenorocite . Dar au iesit la liman .
prima conditie pentru a putea sa te salvezi este sa descoperi unde ai gresit si sa dai semne ca vrei sa te schimbi .
or , taranistii se poarta de parca ar fi pe primul loc in sondajele de opinie , de parca populatia ar muri de dragul lor .
ei nu pricep ca sint totusi responsabili ca au dat Romaniei doi premieri de paie ( Victor Ciorbea si Radu Vasile ) si ca sint condusi de un om depasit de situatie ( Ion Diaconescu ) .
venerabilul presedinte taranist nu - si da seama ca a devenit subiect umoristic , ca si el este tot un fel de Ion Iliescu pe partea cealalta . Ca atunci cind il acuza pe liderul PDSR de vrute si nevrute ar trebui sa se priveasca mai intii in oglinda si apoi sa mearga sa se ia de mina .
biografiile lor sint net diferite , dar conditia de acum ii apropie izbitor .
din ratiuni care tin de birocratia statutului si de dificultatea convocarii unui congres , Ion Diaconescu isi amina retragerea ( ca si Ion Iliescu ) .
fara sa - si dea seama , octogenarul politician pune plumb in picioarele propriului partid .
orice incercare de iesire din criza nu face decit sa o adinceasca .
probabil ca multi oameni din preajma sa ii picura in ureche ideea amagitoare ca fara el urmeaza sfirsitul lumii , ca taranistii mai tineri se vor sfisiia intre ei , si partidul va avea de suferit .
or , lucrurile stau exact pe dos .
cu un presedinte sifonat de prestatia politica din ultimii trei ani , PNTCD nu poate sustine convingator pe absolut nimeni .
sprijinul sau , in loc sa faca bine , ingroapa sigur .
fara un semn de trezire , de schimbare , de curatare , PNTCD se conserva , dar facindu - se din ce in ce mai mic , tot mai mic , ca pupaza care se pierde in zare .
retragerea lui Ion Diaconescu ar fi un prim - semn ca acest partid istoric mai are rezerve de luciditate si bun simt .
alaltaieri , Ion Diaconescu a evitat aceasta miscare binefacatoare pentru partidul sau .
fiindca foarte putini au curajul sa i - o spuna , nu ne ramine decit sa scriem in acest colt de pagina o propozitie dureroasa .
domnule Diaconescu , ar cam fi vremea !
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ATEbounds.R
\name{ATEbounds}
\alias{ATEbounds}
\title{Bounding the Average Treatment Effect when some of the Outcome Data are
Missing}
\usage{
ATEbounds(
formula,
data = parent.frame(),
maxY = NULL,
minY = NULL,
alpha = 0.05,
n.reps = 0,
strata = NULL,
ratio = NULL,
survey = NULL,
...
)
}
\arguments{
\item{formula}{A formula of the form \code{Y ~ X} where \code{Y} is the name
of the outcome variable and \code{X} is the name of the (randomized)
treatment variable. \code{X} should be a factor variable but its value can
take more than two levels. The missing values for \code{Y} should be coded
as \code{NA}.}
\item{data}{A data frame containing the relevant variables.}
\item{maxY}{A scalar. The maximum value of the outcome variable. The default
is the maximum sample value.}
\item{minY}{A scalar. The minimum value of the outcome variable. The default
is the minimum sample value.}
\item{alpha}{A positive scalar that is less than or equal to 0.5. This will
determine the (1-\code{alpha}) level of confidence intervals. The default is
\code{0.05}.}
\item{n.reps}{A positive integer. The number of bootstrap replicates used
for the construction of confidence intervals via B-method of Berran (1988).
If it equals zero, the confidence intervals will not be constructed.}
\item{strata}{The variable name indicating strata. If this is specified, the
quantities of interest will be first calculated within each strata and then
aggregated. The default is \code{NULL}.}
\item{ratio}{A \eqn{J \times M} matrix of probabilities where \eqn{J} is the
number of strata and \eqn{M} is the number of treatment and control groups.
Each element of the matrix specifies the probability of a unit falling into
that category. The default is \code{NULL} in which case the sample estimates
of these probabilities are used for computation.}
\item{survey}{The variable name for survey weights. The default is
\code{NULL}.}
\item{...}{The arguments passed to other functions.}
}
\value{
A list of class \code{ATEbounds} which contains the following items:
\item{call}{ The matched call. } \item{Y}{ The outcome variable. }
\item{D}{ The treatment variable. } \item{bounds}{ The point estimates of
the sharp bounds on the average treatment effect. } \item{bounds.Y}{ The
point estimates of the sharp bounds on the outcome variable within each
treatment/control group. } \item{bmethod.ci}{ The B-method confidence
interval of the bounds on the average treatment effect. } \item{bonf.ci}{
The Bonferroni confidence interval of the bounds on the average treatment
effect. } \item{bonf.ci.Y}{ The Bonferroni confidence interval of the
bounds on the outcome variable within each treatment/control group. }
\item{bmethod.ci.Y}{ The B-method confidence interval of the bounds on the
outcome variable within each treatment/control group. } \item{maxY}{ The
maximum value of the outcome variable used in the computation. }
\item{minY}{ The minimum value of the outcome variable used in the
computation. } \item{nobs}{ The number of observations. } \item{nobs.Y}{
The number of observations within each treatment/control group. }
\item{ratio}{ The probability of treatment assignment (within each strata if
\code{strata} is specified) used in the computation. }
}
\description{
This function computes the sharp bounds on the average treatment effect when
some of the outcome data are missing. The confidence intervals for the
bounds are also computed.
}
\details{
For the details of the method implemented by this function, see the
references.
}
\references{
Horowitz, Joel L. and Charles F. Manski. (1998).
\dQuote{Censoring of Outcomes and Regressors due to Survey Nonresponse:
Identification and Estimation Using Weights and Imputations.} \emph{Journal
of Econometrics}, Vol. 84, pp.37-58.
Horowitz, Joel L. and Charles F. Manski. (2000). \dQuote{Nonparametric
Analysis of Randomized Experiments With Missing Covariate and Outcome Data.}
\emph{Journal of the Americal Statistical Association}, Vol. 95, No. 449,
pp.77-84.
Harris-Lacewell, Melissa, Kosuke Imai, and Teppei Yamamoto. (2007).
\dQuote{Racial Gaps in the Responses to Hurricane Katrina: An Experimental
Study}, \emph{Technical Report}. Department of Politics, Princeton
University.
}
\author{
Kosuke Imai, Department of Government and Department of Statistics, Harvard University
\email{imai@Harvard.Edu}, \url{https://imai.fas.harvard.edu};
}
\keyword{design}
|
/man/ATEbounds.Rd
|
no_license
|
kosukeimai/experiment
|
R
| false
| true
| 4,521
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ATEbounds.R
\name{ATEbounds}
\alias{ATEbounds}
\title{Bounding the Average Treatment Effect when some of the Outcome Data are
Missing}
\usage{
ATEbounds(
formula,
data = parent.frame(),
maxY = NULL,
minY = NULL,
alpha = 0.05,
n.reps = 0,
strata = NULL,
ratio = NULL,
survey = NULL,
...
)
}
\arguments{
\item{formula}{A formula of the form \code{Y ~ X} where \code{Y} is the name
of the outcome variable and \code{X} is the name of the (randomized)
treatment variable. \code{X} should be a factor variable but its value can
take more than two levels. The missing values for \code{Y} should be coded
as \code{NA}.}
\item{data}{A data frame containing the relevant variables.}
\item{maxY}{A scalar. The maximum value of the outcome variable. The default
is the maximum sample value.}
\item{minY}{A scalar. The minimum value of the outcome variable. The default
is the minimum sample value.}
\item{alpha}{A positive scalar that is less than or equal to 0.5. This will
determine the (1-\code{alpha}) level of confidence intervals. The default is
\code{0.05}.}
\item{n.reps}{A positive integer. The number of bootstrap replicates used
for the construction of confidence intervals via B-method of Berran (1988).
If it equals zero, the confidence intervals will not be constructed.}
\item{strata}{The variable name indicating strata. If this is specified, the
quantities of interest will be first calculated within each strata and then
aggregated. The default is \code{NULL}.}
\item{ratio}{A \eqn{J \times M} matrix of probabilities where \eqn{J} is the
number of strata and \eqn{M} is the number of treatment and control groups.
Each element of the matrix specifies the probability of a unit falling into
that category. The default is \code{NULL} in which case the sample estimates
of these probabilities are used for computation.}
\item{survey}{The variable name for survey weights. The default is
\code{NULL}.}
\item{...}{The arguments passed to other functions.}
}
\value{
A list of class \code{ATEbounds} which contains the following items:
\item{call}{ The matched call. } \item{Y}{ The outcome variable. }
\item{D}{ The treatment variable. } \item{bounds}{ The point estimates of
the sharp bounds on the average treatment effect. } \item{bounds.Y}{ The
point estimates of the sharp bounds on the outcome variable within each
treatment/control group. } \item{bmethod.ci}{ The B-method confidence
interval of the bounds on the average treatment effect. } \item{bonf.ci}{
The Bonferroni confidence interval of the bounds on the average treatment
effect. } \item{bonf.ci.Y}{ The Bonferroni confidence interval of the
bounds on the outcome variable within each treatment/control group. }
\item{bmethod.ci.Y}{ The B-method confidence interval of the bounds on the
outcome variable within each treatment/control group. } \item{maxY}{ The
maximum value of the outcome variable used in the computation. }
\item{minY}{ The minimum value of the outcome variable used in the
computation. } \item{nobs}{ The number of observations. } \item{nobs.Y}{
The number of observations within each treatment/control group. }
\item{ratio}{ The probability of treatment assignment (within each strata if
\code{strata} is specified) used in the computation. }
}
\description{
This function computes the sharp bounds on the average treatment effect when
some of the outcome data are missing. The confidence intervals for the
bounds are also computed.
}
\details{
For the details of the method implemented by this function, see the
references.
}
\references{
Horowitz, Joel L. and Charles F. Manski. (1998).
\dQuote{Censoring of Outcomes and Regressors due to Survey Nonresponse:
Identification and Estimation Using Weights and Imputations.} \emph{Journal
of Econometrics}, Vol. 84, pp.37-58.
Horowitz, Joel L. and Charles F. Manski. (2000). \dQuote{Nonparametric
Analysis of Randomized Experiments With Missing Covariate and Outcome Data.}
\emph{Journal of the Americal Statistical Association}, Vol. 95, No. 449,
pp.77-84.
Harris-Lacewell, Melissa, Kosuke Imai, and Teppei Yamamoto. (2007).
\dQuote{Racial Gaps in the Responses to Hurricane Katrina: An Experimental
Study}, \emph{Technical Report}. Department of Politics, Princeton
University.
}
\author{
Kosuke Imai, Department of Government and Department of Statistics, Harvard University
\email{imai@Harvard.Edu}, \url{https://imai.fas.harvard.edu};
}
\keyword{design}
|
read_dir <- function (path = getwd(), file.type = c("csv"), read.as = c("csv"))
{
map <- matrix(c("csv", "tsv", "xlsx",
",", "\t", "xlsx"), ncol = 2)
if(length(file.type) != length(read.as)) stop("file.type and read.as must be of same length")
if(!(all(read.as %in% map[, 1]))) stop("A read.as type specified is not supported")
fileNames <- list.files(path)
allTbls <- vector('list', length(fileNames))
read_type <- function(li, file.type, read.as, ind) {
if (length(file.type) == 0)
return(li[1:ind-1])
typeFiles <- fileNames[grepl(paste0("(.)+\\.", file.type[1]), fileNames)]
incr <- length(typeFiles)
tblList <- lapply(typeFiles, function(f) {
if (read.as[1] == "xlsx") {
readxl::read_xlsx(paste0(path, "/", f), 1)
}
else {
utils::read.table(paste0(path, "/", f), header = TRUE, sep = map[map[, 1] == read.as[1]][2])
}
})
li[ind:(ind+incr-1)] <- tblList
names(li)[ind:(ind+incr-1)] <- gsub("\\.(.)+", "", typeFiles)
read_type(li, file.type[-1], read.as[-1], ind+incr)
}
return(read_type(allTbls, file.type, read.as, 1))
}
setwd("./Test")
a <- read_dir(file.type = c("csv", "txt", "xlsx"), read.as = c("csv", "tsv", "xlsx"))
|
/other/read_dir.R
|
no_license
|
benpremireiller/my-personal-repo
|
R
| false
| false
| 1,315
|
r
|
read_dir <- function (path = getwd(), file.type = c("csv"), read.as = c("csv"))
{
map <- matrix(c("csv", "tsv", "xlsx",
",", "\t", "xlsx"), ncol = 2)
if(length(file.type) != length(read.as)) stop("file.type and read.as must be of same length")
if(!(all(read.as %in% map[, 1]))) stop("A read.as type specified is not supported")
fileNames <- list.files(path)
allTbls <- vector('list', length(fileNames))
read_type <- function(li, file.type, read.as, ind) {
if (length(file.type) == 0)
return(li[1:ind-1])
typeFiles <- fileNames[grepl(paste0("(.)+\\.", file.type[1]), fileNames)]
incr <- length(typeFiles)
tblList <- lapply(typeFiles, function(f) {
if (read.as[1] == "xlsx") {
readxl::read_xlsx(paste0(path, "/", f), 1)
}
else {
utils::read.table(paste0(path, "/", f), header = TRUE, sep = map[map[, 1] == read.as[1]][2])
}
})
li[ind:(ind+incr-1)] <- tblList
names(li)[ind:(ind+incr-1)] <- gsub("\\.(.)+", "", typeFiles)
read_type(li, file.type[-1], read.as[-1], ind+incr)
}
return(read_type(allTbls, file.type, read.as, 1))
}
setwd("./Test")
a <- read_dir(file.type = c("csv", "txt", "xlsx"), read.as = c("csv", "tsv", "xlsx"))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/uhc_plots.R
\name{calc_w}
\alias{calc_w}
\title{Calculate \code{w(x)}}
\usage{
calc_w(f, b, newdata)
}
\arguments{
\item{f}{\verb{[formula]} \cr A model formula.}
\item{b}{\verb{[numeric]} A named vector of coefficients.}
\item{newdata}{\verb{[data.frame]} \cr A \code{data.frame} to predict eHSF values.}
}
\description{
Calculates the value of the exponential habitat selection function
}
\details{
This is actually like to be w(x) * \phi(x) for an iSSF.
}
|
/man/calc_w.Rd
|
no_license
|
jmsigner/amt
|
R
| false
| true
| 539
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/uhc_plots.R
\name{calc_w}
\alias{calc_w}
\title{Calculate \code{w(x)}}
\usage{
calc_w(f, b, newdata)
}
\arguments{
\item{f}{\verb{[formula]} \cr A model formula.}
\item{b}{\verb{[numeric]} A named vector of coefficients.}
\item{newdata}{\verb{[data.frame]} \cr A \code{data.frame} to predict eHSF values.}
}
\description{
Calculates the value of the exponential habitat selection function
}
\details{
This is actually like to be w(x) * \phi(x) for an iSSF.
}
|
# This script will organize data in concise manner to attribute lat/lng to AU and appropriate lake name
# and additional information
library(tidyverse)
library(rgdal)
# data from 2016 IR, there are 255 AU's in the 2016 IR, one additional from 2014
# table was exported from VA_ADB_2016_final.mdb, tblAllLakes_2016 table
allLakes <- readxl::read_excel('data/tblAllLakes_2016.xlsx',sheet='tblAllLakes_2016')
unique(allLakes$WATER_NAME)
unique(allLakes$ID305B)
filter(lakeStationIDs,ID305B_1 != ID305B_2)
# 2014 WQMS GIS data brought in from U:/305b2014/GIS_Layers_2014/Monitoring_Stations/Stations_2014
#wqms_2014 <- readOGR('C:/GIS/EmmaGIS/Assessment/MonitoringStations','Stations_2014')@data
# Can't bring in 2014 bc the U drive version is only WCRO
# 2016 WQMS GIS data brought in from U:\305b2016\GIS\2016_wqms
# Local copies stored in C:\GIS\EmmaGIS\Assessment\MonitoringStations
#wqms_2016 <- readOGR('C:/GIS/EmmaGIS/Assessment/MonitoringStations/2016IR','2016_wqms')@data # nonFinal version
# Final version from X:\2016_Assessment\GIS_2016\MonitoringStations copied to a local location
wqms_2016 <- readOGR('C:/GIS/EmmaGIS/Assessment/MonitoringStations/2016IR','va_16ir_wqm_stations')@data #final version
# Connect allLakes ID305b to wqms_2016 to get StationIDs sampled in each Lake
lakeStationIDs <- filter(wqms_2016,ID305B_1 %in% unique(allLakes$ID305B))
lakeStationIDs2 <- filter(wqms_2016,ID305B_1 %in% unique(allLakes$ID305B) |
ID305B_2 %in% unique(allLakes$ID305B) |
ID305B_3 %in% unique(allLakes$ID305B) )
subset(data.frame(x=lakeStationIDs2$ID305B_1 %in% lakeStationIDs$ID305B_1),x==FALSE)
# There are 3 stations that ID305B_1 doesn't capture, row 150,182,199
# Make a column to join on ID305B, making the three needed corrections
lakeStationIDs2 <- dplyr::select(lakeStationIDs2,STATION_ID:VAHU6) %>%
dplyr::mutate(ID305B=ID305B_1)
lakeStationIDs2[150,]$ID305B <- lakeStationIDs2$ID305B_3[150] #VAC-L73L_DAN07A04
lakeStationIDs2[182,]$ID305B <- lakeStationIDs2$ID305B_3[182] #VAW-L10L_BWR03A10
lakeStationIDs2[199,]$ID305B <- lakeStationIDs2$ID305B_2[199] #VAC-L79L_ROA07A98
lakeStations <- plyr::join(lakeStationIDs2,allLakes,by="ID305B")
lakeStations <- lakeStations[,-15] # get rid of duplicate REGION column, will mess up dplyr functions
# Bring in Roger's conventionals
conventionals <- read_csv('data/CONVENTIONALS_20171010.csv')
conventionals$FDT_DATE_TIME2 <- as.POSIXct(conventionals$FDT_DATE_TIME, format="%m/%d/%y %H:%M")
lakeStationIDdata <- filter(conventionals,FDT_STA_ID %in% unique(lakeStations$STATION_ID))
length(unique(lakeStationIDdata$FDT_STA_ID))
# Transform 9VAC25-260-187 Individual lake standards for nutrients into a table
# from https://leg1.state.va.us/cgi-bin/legp504.exe?000+reg+9VAC25-260-187
# I copy/pasted the table into excel and saved as .csv
lakeNutStandards <- read_csv('data/9VAC25-260-187lakeNutrientStandards.csv')%>%
mutate(SIGLAKENAME=`Man-made Lake or Reservoir Name`)
colnames(lakeNutStandards)[3:4] <- c('Chlorophyll_A_limit','TPhosphorus_limit')
lakeStations <- left_join(lakeStations,lakeNutStandards,by='SIGLAKENAME')
# Some Lake names don't match between two dataframes so fix them manually after next step
# Find StationID's associated with Lacustrine zone
# this is only for WCRO at present. I need a full list of stations statewide with this designation
# Data from U:\305b2016\305bSTAMASTER\305b.mdb, MONITOR table, exported to /data folder as excel table
# This version was last updated 11/27/2017, but lake stations do not change nor are there recent
# lake station additions, so this should be comprehensive for WCRO
monitor <- readxl::read_excel('data/MONITOR.xlsx', sheet='MONITOR')%>%
dplyr::select(STATION,Assess_TYPE)%>%
filter(Assess_TYPE=='LZ') %>%# just get lacustrine stations
mutate(STATION_ID=STATION) %>%dplyr::select(-c(STATION))
lakeStations <- left_join(lakeStations,monitor,by='STATION_ID')
lakeStations$STATION_ID <- as.factor(lakeStations$STATION_ID)
#saveRDS(lakeStations,'data/lakeStations.RDS')
# Afterwards, I went in manually and populated Amanda's LZ stations by looking at her
# assessment determinations (if there was a nutrient assessment I called it LZ)
lakeStations <- readRDS('data/lakeStations.RDS')#%>%
# mutate(FDT_STA_ID=STATION_ID)
fixMe <- unique(filter(lakeStations,Assess_TYPE =='LZ' & is.na(TPhosphorus_limit))$WATER_NAME)
# go in one at a time and fix in excel
write.csv(lakeStations,'data/lakeStations.csv',row.names=F)
# bring in fixed version
lakeStations <- read.csv('data/lakeStations.csv')
saveRDS(lakeStations,'data/lakeStationsFinal.RDS')
# Organize secchi data
# have to do this step because Roger's conventionals.xlsx doesn't include secchi info which
# is necessary for TSI calculations
BRRO <- readxl::read_excel('data/secchi/BlueRidge_Sechi.xlsx')
NRO <- readxl::read_excel('data/secchi/Northern_Sechi.xlsx')
PRO <- readxl::read_excel('data/secchi/Piedmont_Sechi.xlsx')
SWRO <- readxl::read_excel('data/secchi/Southwest_Sechi.xlsx')
TRO <- readxl::read_excel('data/secchi/Tidewater_Sechi.xlsx')
VRO <- readxl::read_excel('data/secchi/Valley_Sechi.xlsx')
secchi <- rbind(BRRO,NRO,PRO,SWRO,TRO,VRO)%>%
dplyr::rename(FDT_STA_ID=`Station ID`,FDT_DATE_TIME=`Date Time`) # to make joining with conventionals easier
write.csv(secchi,'data/secchi_2018IR.csv',row.names = F)
##### ADD ROARING FORK RESERVOIR DATA TO CONVENTIONALS
# it was filtered out in 2018 data pull bc it had level3 code MUN
# I grabbed 2016 conventionals pull and added it to 2018 bc no additional data taken after 2012 (that fell in 2018 window)
library(lubridate)
# Bring in Roger's conventionals
conventionals <- read_csv('data/CONVENTIONALS_20171010_updated.csv')
conventionals$FDT_DATE_TIME2 <- as.POSIXct(conventionals$FDT_DATE_TIME, format="%m/%d/%y %H:%M")
# read in local copy of X:/2016_Assessment/Monitoring Data/2016_cedswqm_data/CONVENTIONALS.xlsx
c2016 <- readxl::read_excel('C:/HardDriveBackup/IR/IR2016/CONVENTIONALS.xlsx',sheet='CONVENTIONALS')
rfk <- filter(c2016, FDT_STA_ID =='4ARFK000.20') %>%
mutate(STA_LV1_CODE='RESERV',STA_LV3_CODE='MUN',Deq_Region = 'SCRO',STA_DESC ='ROARING FK RESERV AT DAM',
FDT_COMMENT=NA,DO=FDT_DO_PROBE,DO_RMK = FDT_DO_PROBE_RMK,FDT_SPECIFIC_CONDUCTANCE=NA, FDT_SPECIFIC_CONDUCTANCE_RMK=NA,
AMMONIA = NA,RMK_AMMONIA=NA,NH3_DISS=NA, RMK_00608=NA, NH3_TOTAL=NA, RMK_00610=NA,SULFATE_TOTAL =NA, RMK_00945=NA,
SULFATE_DISS=NA, RMK_00946=NA, STA_LV2_CODE ='AMBNT',Latitude=LATITUDE, Longitude=LONGITUDE,FDT_SALINITY_RMK =NA,
Majorbasincode=NA, Majorbasinname=NA, Basin=NA, Subbasin=NA, Huc6_Huc_8=NA,
Huc6_Huc_8_Name=NA, Huc6_Name=NA, Huc6_Huc_12=NA, Huc6_Huc_12_Name=NA, Huc6_Vahu5=NA, Huc6_Vahu6=NA, STA_CBP_NAME=NA) %>%
dplyr::select(-c(FDT_CAG_CODE,FDT_DO_PROBE, FDT_DO_PROBE_RMK,LATITUDE,LONGITUDE)) %>%
dplyr::select(FDT_STA_ID, STA_LV3_CODE, STA_LV1_CODE, STA_REC_CODE, Deq_Region, STA_DESC,
FDT_SSC_CODE, FDT_SPG_CODE, FDT_DATE_TIME, FDT_DEPTH, FDT_DEPTH_DESC, FDT_PERCENT_FRB,
FDT_COMMENT, FDT_TEMP_CELCIUS, FDT_TEMP_CELCIUS_RMK, FDT_FIELD_PH, FDT_FIELD_PH_RMK, DO,
DO_RMK, FDT_SPECIFIC_CONDUCTANCE, FDT_SPECIFIC_CONDUCTANCE_RMK, FDT_SALINITY,FDT_SALINITY_RMK, NITROGEN,RMK_00600,AMMONIA,
RMK_AMMONIA,NH3_DISS, RMK_00608, NH3_TOTAL, RMK_00610, PHOSPHORUS, RMK_00665, FECAL_COLI,
RMK_31616, E.COLI, RMK_ECOLI, ENTEROCOCCI, RMK_31649, CHLOROPHYLL, RMK_32211, SSC, SSC_RMK,
NITRATE, RMK_00620, CHLORIDE, RMK_00940, SULFATE_TOTAL, RMK_00945, SULFATE_DISS, RMK_00946,
STA_LV2_CODE, Latitude, Longitude, Majorbasincode, Majorbasinname, Basin, Subbasin, Huc6_Huc_8,
Huc6_Huc_8_Name, Huc6_Name, Huc6_Huc_12, Huc6_Huc_12_Name, Huc6_Vahu5, Huc6_Vahu6, STA_CBP_NAME) %>%
mutate(month=month(FDT_DATE_TIME),day=day(FDT_DATE_TIME),year=year(FDT_DATE_TIME),
time=substring(gsub('.* ([0-9]+)', '\\1' , as.character(FDT_DATE_TIME)),1,5),
FDT_DATE_TIME3 = paste(month,'/',day,'/',substring(year,3,4),' ',time,sep='')) %>%
mutate(FDT_DATE_TIME=FDT_DATE_TIME3)%>%
dplyr::select(-c(FDT_DATE_TIME3,month,day,year,time))
rfk$FDT_DATE_TIME2 <- as.POSIXct(as.character(rfk$FDT_DATE_TIME), format="%m/%d/%y %H:%M",tz='UTC')
conventionals2 <- rbind(conventionals,rfk)
conventionals2$FDT_PERCENT_FRB <- as.integer(as.character(conventionals2$FDT_PERCENT_FRB))
str(conventionals[60:67])
str(conventionals2[60:67])
write.csv(conventionals2,'data/conventionals08152018EVJ.csv',row.names = F)
# Add in Citizen data
# Bring in Roger's conventionals
conventionals <- read_csv('data/CONVENTIONALS_20171010.csv')
conventionals$FDT_DATE_TIME2 <- as.POSIXct(conventionals$FDT_DATE_TIME, format="%m/%d/%y %H:%M")
lakeStations <- readRDS('data/lakeStations.RDS')%>%
mutate(FDT_STA_ID=STATION_ID)
# Bring in Citizen data, Paula organized it to match up with Roger's data format
cit1 <- read_csv('data/Citmon_New_Reorg_1.csv') %>%
dplyr::select(-c(X1))
cit2 <- read_csv('data/Citmon_New_Reorg_2.csv') %>%
dplyr::select(-c(X1))
cit <- rbind(cit1,cit2)
cit$FDT_DATE_TIME2 <- as.POSIXct(cit$FDT_DATE_TIME, format="%m/%d/%Y")
# For lake app, get rid of non level 3 citizen data
# unforturnately, this is best done by hand.
write.csv(cit,'data/citdata.csv')
# Bring back in the clean dataset (no level 1 or 2 data included)
cit3 <- read.csv('data/citdata_EVJclean.csv')
# Ferrum College doesnt report time with measures, fix so can have same datetime format for all data
cit3$FDT_DATE_TIME <- as.character(cit3$FDT_DATE_TIME)
cit3$FDT_DATE_TIME2 <- as.POSIXct(as.character(cit3$FDT_DATE_TIME), format="%m/%d/%y %H:%M")
names(cit3)[c(66,63,61,60,56)] <- c("STA_CBP_NAME","Huc6_Huc_12_Name","Huc6_Name","Huc6_Huc_8_Name","Majorbasinname" ) # fix where arcGIS cut off full column names
# Figure out which AU citizen stations fall into to add to lakeStations
# 2016 Final AU's from X:\2016_Assessment\GIS_2016\StatewideAUs I copied locally, imported into GIS, and exported just the va_2016_aus_reservoir file
library(rgdal)
lakeAU <- readOGR('data','va_2016_aus_reservoir_prjWGS84')
lakeAU@proj4string
cit_shp <- cit# just use citizen sites, not full conventionals2 dataset
coordinates(cit_shp) <- ~Longitude+Latitude
proj4string(cit_shp) <- CRS("+proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0 ") # WGS84 to start
# double check everything will work
identicalCRS(lakeAU,cit_shp)
cit_shp@data$ID305B_1 <- NA
for(i in 1:nrow(cit_shp@data)){
z <- lakeAU[cit_shp[i,],]
if(nrow(z@data) < 1 | nrow(z@data) > 1 ){
cit_shp@data$ID305B_1[i] <- NA
}else{
cit_shp@data$ID305B_1[i] <- as.character(z@data$ID305B)
}
}
# Check the broken ones in GIS
writeOGR(obj=cit_shp, dsn="data", layer="kindafixedcitdata", driver="ESRI Shapefile")
# see if paula's QA helped?
me <- readOGR('data','kindafixedcitdata')
paula <- readOGR('data/cit','kindafixedcitdata')
me@data %>%
mutate_all(as.character)==paula@data%>%
mutate_all(as.character)
as.character(me@data$ID305) == as.character(paula@data$ID305)
# nope. same old shit as waht I came up with
rm(paula)
rm(me)
# Figure out which AU citizen stations fall into to add to lakeStations
# 2016 Final AU's from X:\2016_Assessment\GIS_2016\StatewideAUs I copied locally, imported into GIS, and exported just the va_2016_aus_reservoir file
library(rgdal)
lakeAU <- readOGR('data','va_2016_aus_reservoir_prjWGS84')
lakeAU@proj4string
# don't lose lat/long data
cit3 <- mutate(cit3,lat=Latitude,long=Longitude)
cit_shp <- cit3# just use citizen sites, not full conventionals2 dataset
coordinates(cit_shp) <- ~long+lat
proj4string(cit_shp) <- CRS("+proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0 ") # WGS84 to start
# double check everything will work
identicalCRS(lakeAU,cit_shp)
cit_shp@data$ID305B_1 <- NA
for(i in 1:nrow(cit_shp@data)){
z <- lakeAU[cit_shp[i,],]
if(nrow(z@data) < 1 | nrow(z@data) > 1 ){
cit_shp@data$ID305B_1[i] <- NA
}else{
cit_shp@data$ID305B_1[i] <- as.character(z@data$ID305B)
}
}
# Filter out stations taht don't match up with a Lake AU bc that means they aren't in fact in a lake
citCorrect <- cit_shp[!is.na(cit_shp@data$ID305B_1),]
cit4 <- filter(cit3,FDT_STA_ID %in% citCorrect@data$FDT_STA_ID)
# Add citizen lake stations to lakeStations.RDS
citFinal <- dplyr::select(citCorrect@data,FDT_STA_ID,ID305B_1,Latitude,Longitude) %>%
dplyr::rename(STATION_ID=FDT_STA_ID,DD_LAT=Latitude,DD_LONG=Longitude)
# Map column names
n1 <- unique(names(lakeStations))
n2 <- unique(names(citFinal))
needToAdd <- n1[!(n1 %in% n2)]
citFinal[needToAdd] <- NA
# Double check everything in both
names(citFinal) %in% names(lakeStations)
citFinal <- dplyr::select(citFinal,STATION_ID, ID305B_1, ID305B_2, ID305B_3, DEPTH, REGION, STATYPE1, STATYPE2,
STATYPE3, DD_LAT, DD_LONG, WATERSHED, VAHU6, ID305B, WATER_NAME, dswc, CATEGORY_ID, TROPHIC_STATUS,
PUBLIC_LAKE, SEC187, SIG_LAKE, USE, OWNER, SIGLAKENAME, CYCLE, `Man-made Lake or Reservoir Name`, Location,
Chlorophyll_A_limit, TPhosphorus_limit, Assess_TYPE, FDT_STA_ID)
names(citFinal) == names(lakeStations)
# Just one record per station
citFinal1 <- citFinal[!duplicated(citFinal[,c('STATION_ID')]),] %>%
mutate(FDT_STA_ID = STATION_ID, ID305B = ID305B_1)
# Fix lake name
citFinal1$SIGLAKENAME[1] <- 'Smith Mountain Lake'
citFinal1$SIGLAKENAME[2] <- 'Smith Mountain Lake'
citFinal1$SIGLAKENAME[3] <- 'Smith Mountain Lake'
citFinal1$SIGLAKENAME[4] <- 'Smith Mountain Lake'
citFinal1$SIGLAKENAME[5] <- 'Leesville Reservoir'
citFinal1$SIGLAKENAME[6] <- 'Leesville Reservoir'
# finally combine and save
lakeStationsWithCit <- rbind(lakeStations,citFinal1)
saveRDS(lakeStationsWithCit,'data/lakeStationsWithCit.RDS')
# smash them together
cit5 <- dplyr::select(cit4,-c(lat,long))
cit5$FDT_DATE_TIME <- as.character(cit5$FDT_DATE_TIME)
conventionals2 <- rbind(conventionals,cit5)
str(conventionals[1:10])
str(conventionals2[1:10])
# fix data formats for specific columns
#conventionals2$FDT_DATE_TIME2 <- as.POSIXct(conventionals2$FDT_DATE_TIME, format="%m/%d/%y %H:%M")
conventionals2$FDT_DEPTH <- as.numeric(conventionals2$FDT_DEPTH)
conventionals2$FDT_PERCENT_FRB <- as.integer(conventionals2$FDT_PERCENT_FRB)
conventionals2$FDT_SPECIFIC_CONDUCTANCE <- as.numeric(conventionals2$FDT_SPECIFIC_CONDUCTANCE)
conventionals2$FDT_SALINITY <- as.numeric(conventionals2$FDT_SALINITY)
conventionals2$NITROGEN <- as.numeric(conventionals2$NITROGEN)
conventionals2$AMMONIA <- as.numeric(conventionals2$AMMONIA)
conventionals2$NH3_DISS <- as.numeric(conventionals2$NH3_DISS)
conventionals2$NH3_TOTAL <- as.numeric(conventionals2$NH3_TOTAL)
conventionals2$PHOSPHORUS <- as.integer(conventionals2$PHOSPHORUS)
conventionals2$FECAL_COLI <- as.integer(conventionals2$FECAL_COLI)
conventionals2$E.COLI <- as.numeric(conventionals2$E.COLI)
conventionals2$ENTEROCOCCI <- as.numeric(conventionals2$ENTEROCOCCI)
conventionals2$CHLOROPHYLL <- as.numeric(conventionals2$CHLOROPHYLL)
conventionals2$SSC <- as.numeric(conventionals2$SSC)
conventionals2$NITRATE <- as.numeric(conventionals2$NITRATE)
conventionals2$CHLORIDE <- as.numeric(conventionals2$CHLORIDE)
conventionals2$SULFATE_TOTAL <- as.numeric(conventionals2$SULFATE_TOTAL)
conventionals2$SULFATE_DISS <- as.numeric(conventionals2$SULFATE_DISS)
write.csv(conventionals2,'data/conventionalsWITHCITIZEN.csv',row.names = F)
|
/app2020/LakeAssessmentApp_v2/dataManagement_2018IR_AUattribution.R
|
no_license
|
EmmaVJones/LakesAssessment2020
|
R
| false
| false
| 15,569
|
r
|
# This script will organize data in concise manner to attribute lat/lng to AU and appropriate lake name
# and additional information
library(tidyverse)
library(rgdal)
# data from 2016 IR, there are 255 AU's in the 2016 IR, one additional from 2014
# table was exported from VA_ADB_2016_final.mdb, tblAllLakes_2016 table
allLakes <- readxl::read_excel('data/tblAllLakes_2016.xlsx',sheet='tblAllLakes_2016')
unique(allLakes$WATER_NAME)
unique(allLakes$ID305B)
filter(lakeStationIDs,ID305B_1 != ID305B_2)
# 2014 WQMS GIS data brought in from U:/305b2014/GIS_Layers_2014/Monitoring_Stations/Stations_2014
#wqms_2014 <- readOGR('C:/GIS/EmmaGIS/Assessment/MonitoringStations','Stations_2014')@data
# Can't bring in 2014 bc the U drive version is only WCRO
# 2016 WQMS GIS data brought in from U:\305b2016\GIS\2016_wqms
# Local copies stored in C:\GIS\EmmaGIS\Assessment\MonitoringStations
#wqms_2016 <- readOGR('C:/GIS/EmmaGIS/Assessment/MonitoringStations/2016IR','2016_wqms')@data # nonFinal version
# Final version from X:\2016_Assessment\GIS_2016\MonitoringStations copied to a local location
wqms_2016 <- readOGR('C:/GIS/EmmaGIS/Assessment/MonitoringStations/2016IR','va_16ir_wqm_stations')@data #final version
# Connect allLakes ID305b to wqms_2016 to get StationIDs sampled in each Lake
lakeStationIDs <- filter(wqms_2016,ID305B_1 %in% unique(allLakes$ID305B))
lakeStationIDs2 <- filter(wqms_2016,ID305B_1 %in% unique(allLakes$ID305B) |
ID305B_2 %in% unique(allLakes$ID305B) |
ID305B_3 %in% unique(allLakes$ID305B) )
subset(data.frame(x=lakeStationIDs2$ID305B_1 %in% lakeStationIDs$ID305B_1),x==FALSE)
# There are 3 stations that ID305B_1 doesn't capture, row 150,182,199
# Make a column to join on ID305B, making the three needed corrections
lakeStationIDs2 <- dplyr::select(lakeStationIDs2,STATION_ID:VAHU6) %>%
dplyr::mutate(ID305B=ID305B_1)
lakeStationIDs2[150,]$ID305B <- lakeStationIDs2$ID305B_3[150] #VAC-L73L_DAN07A04
lakeStationIDs2[182,]$ID305B <- lakeStationIDs2$ID305B_3[182] #VAW-L10L_BWR03A10
lakeStationIDs2[199,]$ID305B <- lakeStationIDs2$ID305B_2[199] #VAC-L79L_ROA07A98
lakeStations <- plyr::join(lakeStationIDs2,allLakes,by="ID305B")
lakeStations <- lakeStations[,-15] # get rid of duplicate REGION column, will mess up dplyr functions
# Bring in Roger's conventionals
conventionals <- read_csv('data/CONVENTIONALS_20171010.csv')
conventionals$FDT_DATE_TIME2 <- as.POSIXct(conventionals$FDT_DATE_TIME, format="%m/%d/%y %H:%M")
lakeStationIDdata <- filter(conventionals,FDT_STA_ID %in% unique(lakeStations$STATION_ID))
length(unique(lakeStationIDdata$FDT_STA_ID))
# Transform 9VAC25-260-187 Individual lake standards for nutrients into a table
# from https://leg1.state.va.us/cgi-bin/legp504.exe?000+reg+9VAC25-260-187
# I copy/pasted the table into excel and saved as .csv
lakeNutStandards <- read_csv('data/9VAC25-260-187lakeNutrientStandards.csv')%>%
mutate(SIGLAKENAME=`Man-made Lake or Reservoir Name`)
colnames(lakeNutStandards)[3:4] <- c('Chlorophyll_A_limit','TPhosphorus_limit')
lakeStations <- left_join(lakeStations,lakeNutStandards,by='SIGLAKENAME')
# Some Lake names don't match between two dataframes so fix them manually after next step
# Find StationID's associated with Lacustrine zone
# this is only for WCRO at present. I need a full list of stations statewide with this designation
# Data from U:\305b2016\305bSTAMASTER\305b.mdb, MONITOR table, exported to /data folder as excel table
# This version was last updated 11/27/2017, but lake stations do not change nor are there recent
# lake station additions, so this should be comprehensive for WCRO
monitor <- readxl::read_excel('data/MONITOR.xlsx', sheet='MONITOR')%>%
dplyr::select(STATION,Assess_TYPE)%>%
filter(Assess_TYPE=='LZ') %>%# just get lacustrine stations
mutate(STATION_ID=STATION) %>%dplyr::select(-c(STATION))
lakeStations <- left_join(lakeStations,monitor,by='STATION_ID')
lakeStations$STATION_ID <- as.factor(lakeStations$STATION_ID)
#saveRDS(lakeStations,'data/lakeStations.RDS')
# Afterwards, I went in manually and populated Amanda's LZ stations by looking at her
# assessment determinations (if there was a nutrient assessment I called it LZ)
lakeStations <- readRDS('data/lakeStations.RDS')#%>%
# mutate(FDT_STA_ID=STATION_ID)
fixMe <- unique(filter(lakeStations,Assess_TYPE =='LZ' & is.na(TPhosphorus_limit))$WATER_NAME)
# go in one at a time and fix in excel
write.csv(lakeStations,'data/lakeStations.csv',row.names=F)
# bring in fixed version
lakeStations <- read.csv('data/lakeStations.csv')
saveRDS(lakeStations,'data/lakeStationsFinal.RDS')
# Organize secchi data
# have to do this step because Roger's conventionals.xlsx doesn't include secchi info which
# is necessary for TSI calculations
BRRO <- readxl::read_excel('data/secchi/BlueRidge_Sechi.xlsx')
NRO <- readxl::read_excel('data/secchi/Northern_Sechi.xlsx')
PRO <- readxl::read_excel('data/secchi/Piedmont_Sechi.xlsx')
SWRO <- readxl::read_excel('data/secchi/Southwest_Sechi.xlsx')
TRO <- readxl::read_excel('data/secchi/Tidewater_Sechi.xlsx')
VRO <- readxl::read_excel('data/secchi/Valley_Sechi.xlsx')
secchi <- rbind(BRRO,NRO,PRO,SWRO,TRO,VRO)%>%
dplyr::rename(FDT_STA_ID=`Station ID`,FDT_DATE_TIME=`Date Time`) # to make joining with conventionals easier
write.csv(secchi,'data/secchi_2018IR.csv',row.names = F)
##### ADD ROARING FORK RESERVOIR DATA TO CONVENTIONALS
# it was filtered out in 2018 data pull bc it had level3 code MUN
# I grabbed 2016 conventionals pull and added it to 2018 bc no additional data taken after 2012 (that fell in 2018 window)
library(lubridate)
# Bring in Roger's conventionals
conventionals <- read_csv('data/CONVENTIONALS_20171010_updated.csv')
conventionals$FDT_DATE_TIME2 <- as.POSIXct(conventionals$FDT_DATE_TIME, format="%m/%d/%y %H:%M")
# read in local copy of X:/2016_Assessment/Monitoring Data/2016_cedswqm_data/CONVENTIONALS.xlsx
c2016 <- readxl::read_excel('C:/HardDriveBackup/IR/IR2016/CONVENTIONALS.xlsx',sheet='CONVENTIONALS')
rfk <- filter(c2016, FDT_STA_ID =='4ARFK000.20') %>%
mutate(STA_LV1_CODE='RESERV',STA_LV3_CODE='MUN',Deq_Region = 'SCRO',STA_DESC ='ROARING FK RESERV AT DAM',
FDT_COMMENT=NA,DO=FDT_DO_PROBE,DO_RMK = FDT_DO_PROBE_RMK,FDT_SPECIFIC_CONDUCTANCE=NA, FDT_SPECIFIC_CONDUCTANCE_RMK=NA,
AMMONIA = NA,RMK_AMMONIA=NA,NH3_DISS=NA, RMK_00608=NA, NH3_TOTAL=NA, RMK_00610=NA,SULFATE_TOTAL =NA, RMK_00945=NA,
SULFATE_DISS=NA, RMK_00946=NA, STA_LV2_CODE ='AMBNT',Latitude=LATITUDE, Longitude=LONGITUDE,FDT_SALINITY_RMK =NA,
Majorbasincode=NA, Majorbasinname=NA, Basin=NA, Subbasin=NA, Huc6_Huc_8=NA,
Huc6_Huc_8_Name=NA, Huc6_Name=NA, Huc6_Huc_12=NA, Huc6_Huc_12_Name=NA, Huc6_Vahu5=NA, Huc6_Vahu6=NA, STA_CBP_NAME=NA) %>%
dplyr::select(-c(FDT_CAG_CODE,FDT_DO_PROBE, FDT_DO_PROBE_RMK,LATITUDE,LONGITUDE)) %>%
dplyr::select(FDT_STA_ID, STA_LV3_CODE, STA_LV1_CODE, STA_REC_CODE, Deq_Region, STA_DESC,
FDT_SSC_CODE, FDT_SPG_CODE, FDT_DATE_TIME, FDT_DEPTH, FDT_DEPTH_DESC, FDT_PERCENT_FRB,
FDT_COMMENT, FDT_TEMP_CELCIUS, FDT_TEMP_CELCIUS_RMK, FDT_FIELD_PH, FDT_FIELD_PH_RMK, DO,
DO_RMK, FDT_SPECIFIC_CONDUCTANCE, FDT_SPECIFIC_CONDUCTANCE_RMK, FDT_SALINITY,FDT_SALINITY_RMK, NITROGEN,RMK_00600,AMMONIA,
RMK_AMMONIA,NH3_DISS, RMK_00608, NH3_TOTAL, RMK_00610, PHOSPHORUS, RMK_00665, FECAL_COLI,
RMK_31616, E.COLI, RMK_ECOLI, ENTEROCOCCI, RMK_31649, CHLOROPHYLL, RMK_32211, SSC, SSC_RMK,
NITRATE, RMK_00620, CHLORIDE, RMK_00940, SULFATE_TOTAL, RMK_00945, SULFATE_DISS, RMK_00946,
STA_LV2_CODE, Latitude, Longitude, Majorbasincode, Majorbasinname, Basin, Subbasin, Huc6_Huc_8,
Huc6_Huc_8_Name, Huc6_Name, Huc6_Huc_12, Huc6_Huc_12_Name, Huc6_Vahu5, Huc6_Vahu6, STA_CBP_NAME) %>%
mutate(month=month(FDT_DATE_TIME),day=day(FDT_DATE_TIME),year=year(FDT_DATE_TIME),
time=substring(gsub('.* ([0-9]+)', '\\1' , as.character(FDT_DATE_TIME)),1,5),
FDT_DATE_TIME3 = paste(month,'/',day,'/',substring(year,3,4),' ',time,sep='')) %>%
mutate(FDT_DATE_TIME=FDT_DATE_TIME3)%>%
dplyr::select(-c(FDT_DATE_TIME3,month,day,year,time))
rfk$FDT_DATE_TIME2 <- as.POSIXct(as.character(rfk$FDT_DATE_TIME), format="%m/%d/%y %H:%M",tz='UTC')
conventionals2 <- rbind(conventionals,rfk)
conventionals2$FDT_PERCENT_FRB <- as.integer(as.character(conventionals2$FDT_PERCENT_FRB))
str(conventionals[60:67])
str(conventionals2[60:67])
write.csv(conventionals2,'data/conventionals08152018EVJ.csv',row.names = F)
# Add in Citizen data
# Bring in Roger's conventionals
conventionals <- read_csv('data/CONVENTIONALS_20171010.csv')
conventionals$FDT_DATE_TIME2 <- as.POSIXct(conventionals$FDT_DATE_TIME, format="%m/%d/%y %H:%M")
lakeStations <- readRDS('data/lakeStations.RDS')%>%
mutate(FDT_STA_ID=STATION_ID)
# Bring in Citizen data, Paula organized it to match up with Roger's data format
cit1 <- read_csv('data/Citmon_New_Reorg_1.csv') %>%
dplyr::select(-c(X1))
cit2 <- read_csv('data/Citmon_New_Reorg_2.csv') %>%
dplyr::select(-c(X1))
cit <- rbind(cit1,cit2)
cit$FDT_DATE_TIME2 <- as.POSIXct(cit$FDT_DATE_TIME, format="%m/%d/%Y")
# For lake app, get rid of non level 3 citizen data
# unforturnately, this is best done by hand.
write.csv(cit,'data/citdata.csv')
# Bring back in the clean dataset (no level 1 or 2 data included)
cit3 <- read.csv('data/citdata_EVJclean.csv')
# Ferrum College doesnt report time with measures, fix so can have same datetime format for all data
cit3$FDT_DATE_TIME <- as.character(cit3$FDT_DATE_TIME)
cit3$FDT_DATE_TIME2 <- as.POSIXct(as.character(cit3$FDT_DATE_TIME), format="%m/%d/%y %H:%M")
names(cit3)[c(66,63,61,60,56)] <- c("STA_CBP_NAME","Huc6_Huc_12_Name","Huc6_Name","Huc6_Huc_8_Name","Majorbasinname" ) # fix where arcGIS cut off full column names
# Figure out which AU citizen stations fall into to add to lakeStations
# 2016 Final AU's from X:\2016_Assessment\GIS_2016\StatewideAUs I copied locally, imported into GIS, and exported just the va_2016_aus_reservoir file
library(rgdal)
lakeAU <- readOGR('data','va_2016_aus_reservoir_prjWGS84')
lakeAU@proj4string
cit_shp <- cit# just use citizen sites, not full conventionals2 dataset
coordinates(cit_shp) <- ~Longitude+Latitude
proj4string(cit_shp) <- CRS("+proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0 ") # WGS84 to start
# double check everything will work
identicalCRS(lakeAU,cit_shp)
cit_shp@data$ID305B_1 <- NA
for(i in 1:nrow(cit_shp@data)){
z <- lakeAU[cit_shp[i,],]
if(nrow(z@data) < 1 | nrow(z@data) > 1 ){
cit_shp@data$ID305B_1[i] <- NA
}else{
cit_shp@data$ID305B_1[i] <- as.character(z@data$ID305B)
}
}
# Check the broken ones in GIS
writeOGR(obj=cit_shp, dsn="data", layer="kindafixedcitdata", driver="ESRI Shapefile")
# see if paula's QA helped?
me <- readOGR('data','kindafixedcitdata')
paula <- readOGR('data/cit','kindafixedcitdata')
me@data %>%
mutate_all(as.character)==paula@data%>%
mutate_all(as.character)
as.character(me@data$ID305) == as.character(paula@data$ID305)
# nope. same old shit as waht I came up with
rm(paula)
rm(me)
# Figure out which AU citizen stations fall into to add to lakeStations
# 2016 Final AU's from X:\2016_Assessment\GIS_2016\StatewideAUs I copied locally, imported into GIS, and exported just the va_2016_aus_reservoir file
library(rgdal)
lakeAU <- readOGR('data','va_2016_aus_reservoir_prjWGS84')
lakeAU@proj4string
# don't lose lat/long data
cit3 <- mutate(cit3,lat=Latitude,long=Longitude)
cit_shp <- cit3# just use citizen sites, not full conventionals2 dataset
coordinates(cit_shp) <- ~long+lat
proj4string(cit_shp) <- CRS("+proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0 ") # WGS84 to start
# double check everything will work
identicalCRS(lakeAU,cit_shp)
cit_shp@data$ID305B_1 <- NA
for(i in 1:nrow(cit_shp@data)){
z <- lakeAU[cit_shp[i,],]
if(nrow(z@data) < 1 | nrow(z@data) > 1 ){
cit_shp@data$ID305B_1[i] <- NA
}else{
cit_shp@data$ID305B_1[i] <- as.character(z@data$ID305B)
}
}
# Filter out stations taht don't match up with a Lake AU bc that means they aren't in fact in a lake
citCorrect <- cit_shp[!is.na(cit_shp@data$ID305B_1),]
cit4 <- filter(cit3,FDT_STA_ID %in% citCorrect@data$FDT_STA_ID)
# Add citizen lake stations to lakeStations.RDS
citFinal <- dplyr::select(citCorrect@data,FDT_STA_ID,ID305B_1,Latitude,Longitude) %>%
dplyr::rename(STATION_ID=FDT_STA_ID,DD_LAT=Latitude,DD_LONG=Longitude)
# Map column names
n1 <- unique(names(lakeStations))
n2 <- unique(names(citFinal))
needToAdd <- n1[!(n1 %in% n2)]
citFinal[needToAdd] <- NA
# Double check everything in both
names(citFinal) %in% names(lakeStations)
citFinal <- dplyr::select(citFinal,STATION_ID, ID305B_1, ID305B_2, ID305B_3, DEPTH, REGION, STATYPE1, STATYPE2,
STATYPE3, DD_LAT, DD_LONG, WATERSHED, VAHU6, ID305B, WATER_NAME, dswc, CATEGORY_ID, TROPHIC_STATUS,
PUBLIC_LAKE, SEC187, SIG_LAKE, USE, OWNER, SIGLAKENAME, CYCLE, `Man-made Lake or Reservoir Name`, Location,
Chlorophyll_A_limit, TPhosphorus_limit, Assess_TYPE, FDT_STA_ID)
names(citFinal) == names(lakeStations)
# Just one record per station
citFinal1 <- citFinal[!duplicated(citFinal[,c('STATION_ID')]),] %>%
mutate(FDT_STA_ID = STATION_ID, ID305B = ID305B_1)
# Fix lake name
citFinal1$SIGLAKENAME[1] <- 'Smith Mountain Lake'
citFinal1$SIGLAKENAME[2] <- 'Smith Mountain Lake'
citFinal1$SIGLAKENAME[3] <- 'Smith Mountain Lake'
citFinal1$SIGLAKENAME[4] <- 'Smith Mountain Lake'
citFinal1$SIGLAKENAME[5] <- 'Leesville Reservoir'
citFinal1$SIGLAKENAME[6] <- 'Leesville Reservoir'
# finally combine and save
lakeStationsWithCit <- rbind(lakeStations,citFinal1)
saveRDS(lakeStationsWithCit,'data/lakeStationsWithCit.RDS')
# smash them together
cit5 <- dplyr::select(cit4,-c(lat,long))
cit5$FDT_DATE_TIME <- as.character(cit5$FDT_DATE_TIME)
conventionals2 <- rbind(conventionals,cit5)
str(conventionals[1:10])
str(conventionals2[1:10])
# fix data formats for specific columns
#conventionals2$FDT_DATE_TIME2 <- as.POSIXct(conventionals2$FDT_DATE_TIME, format="%m/%d/%y %H:%M")
conventionals2$FDT_DEPTH <- as.numeric(conventionals2$FDT_DEPTH)
conventionals2$FDT_PERCENT_FRB <- as.integer(conventionals2$FDT_PERCENT_FRB)
conventionals2$FDT_SPECIFIC_CONDUCTANCE <- as.numeric(conventionals2$FDT_SPECIFIC_CONDUCTANCE)
conventionals2$FDT_SALINITY <- as.numeric(conventionals2$FDT_SALINITY)
conventionals2$NITROGEN <- as.numeric(conventionals2$NITROGEN)
conventionals2$AMMONIA <- as.numeric(conventionals2$AMMONIA)
conventionals2$NH3_DISS <- as.numeric(conventionals2$NH3_DISS)
conventionals2$NH3_TOTAL <- as.numeric(conventionals2$NH3_TOTAL)
conventionals2$PHOSPHORUS <- as.integer(conventionals2$PHOSPHORUS)
conventionals2$FECAL_COLI <- as.integer(conventionals2$FECAL_COLI)
conventionals2$E.COLI <- as.numeric(conventionals2$E.COLI)
conventionals2$ENTEROCOCCI <- as.numeric(conventionals2$ENTEROCOCCI)
conventionals2$CHLOROPHYLL <- as.numeric(conventionals2$CHLOROPHYLL)
conventionals2$SSC <- as.numeric(conventionals2$SSC)
conventionals2$NITRATE <- as.numeric(conventionals2$NITRATE)
conventionals2$CHLORIDE <- as.numeric(conventionals2$CHLORIDE)
conventionals2$SULFATE_TOTAL <- as.numeric(conventionals2$SULFATE_TOTAL)
conventionals2$SULFATE_DISS <- as.numeric(conventionals2$SULFATE_DISS)
write.csv(conventionals2,'data/conventionalsWITHCITIZEN.csv',row.names = F)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/add_line.R
\name{add_line}
\alias{add_line}
\title{Add a line to an \code{xyplot}}
\usage{
add_line(
intercept,
slope,
vline = NULL,
hline = NULL,
units = "native",
color = "red"
)
}
\arguments{
\item{intercept}{Numerical (optional). The intercept term for a line to plot.}
\item{slope}{color (optional). Sets color of the line to be drawn}
}
\description{
Include a line to an \code{xyplot} by either including a slope and intercept
term or by clicking twice on the \emph{Plots} pane in RStudio. After creating an
\code{xyplot}, either run \code{add_line()} with no arguments and then click
on the RStudio plot plane twice to draw a line OR include arguments for
\code{slope} and \code{intercept} to draw a specific line.
}
\examples{
data(cdc)
\dontrun{
# Add a line by clicking on the plot pane
xyplot(weight ~ height, data = cdc)
add_line() # Click the Plots pane twice inorder to proceed.
}
# Specify a line using the slope and intercept arguments
xyplot(weight ~ height, data = cdc)
add_line(intercept = -180, slope = 165)
# Include a horizontal and/or vertical line
xyplot(weight ~ height, data = cdc)
add_line(vline = 1.6, hline = 100)
}
\seealso{
\code{\link{add_curve}}
}
|
/man/add_line.Rd
|
no_license
|
mobilizingcs/mobilizr
|
R
| false
| true
| 1,277
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/add_line.R
\name{add_line}
\alias{add_line}
\title{Add a line to an \code{xyplot}}
\usage{
add_line(
intercept,
slope,
vline = NULL,
hline = NULL,
units = "native",
color = "red"
)
}
\arguments{
\item{intercept}{Numerical (optional). The intercept term for a line to plot.}
\item{slope}{color (optional). Sets color of the line to be drawn}
}
\description{
Include a line to an \code{xyplot} by either including a slope and intercept
term or by clicking twice on the \emph{Plots} pane in RStudio. After creating an
\code{xyplot}, either run \code{add_line()} with no arguments and then click
on the RStudio plot plane twice to draw a line OR include arguments for
\code{slope} and \code{intercept} to draw a specific line.
}
\examples{
data(cdc)
\dontrun{
# Add a line by clicking on the plot pane
xyplot(weight ~ height, data = cdc)
add_line() # Click the Plots pane twice inorder to proceed.
}
# Specify a line using the slope and intercept arguments
xyplot(weight ~ height, data = cdc)
add_line(intercept = -180, slope = 165)
# Include a horizontal and/or vertical line
xyplot(weight ~ height, data = cdc)
add_line(vline = 1.6, hline = 100)
}
\seealso{
\code{\link{add_curve}}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utilities.R
\name{plot_prior_vs_posterior}
\alias{plot_prior_vs_posterior}
\title{Plot prior vs posterior distribution of parameters}
\usage{
plot_prior_vs_posterior(stanfitobjects)
}
\arguments{
\item{stanfitobjects}{list of stanfit objects}
}
\value{
Missing
}
\description{
Missing.
Also Todo: handling for only one item
}
\examples{
NULL
}
|
/man/plot_prior_vs_posterior.Rd
|
no_license
|
nikosbosse/epipredictr
|
R
| false
| true
| 423
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utilities.R
\name{plot_prior_vs_posterior}
\alias{plot_prior_vs_posterior}
\title{Plot prior vs posterior distribution of parameters}
\usage{
plot_prior_vs_posterior(stanfitobjects)
}
\arguments{
\item{stanfitobjects}{list of stanfit objects}
}
\value{
Missing
}
\description{
Missing.
Also Todo: handling for only one item
}
\examples{
NULL
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rbind.pages.R
\name{rbind.pages}
\alias{rbind.pages}
\title{Combine pages into a single data frame}
\usage{
rbind.pages(pages)
}
\arguments{
\item{pages}{a list of data frames, each representing a \emph{page} of data}
}
\description{
The \code{rbind.pages} function is used to combine a list of data frames into a single
data frame. This is often needed when working with a JSON API that limits the amount
of data per request. If we need more data than what fits in a single request, we need to
perform multiple requests that each retrieve a fragment of data, not unlike pages in a
book. In practice this is often implemented using a \code{page} parameter in the API. The
\code{rbind.pages} function can be used to combine these pages back into a single dataset.
}
\details{
The \code{\link{rbind.pages}} function generalizes \code{\link[base:rbind]{base::rbind}} and
\code{\link[plyr:rbind.fill]{plyr::rbind.fill}} with added support for nested data frames. Not each column
has to be present in each of the individual data frames; missing columns will be filled
up in \code{NA} values.
}
\examples{
# Basic example
x <- data.frame(foo = rnorm(3), bar = c(TRUE, FALSE, TRUE))
y <- data.frame(foo = rnorm(2), col = c("blue", "red"))
rbind.pages(list(x, y))
\dontrun{
baseurl <- "http://projects.propublica.org/nonprofits/api/v1/search.json"
pages <- list()
for(i in 0:20){
mydata <- fromJSON(paste0(baseurl, "?order=revenue&sort_order=desc&page=", i))
message("Retrieving page ", i)
pages[[i+1]] <- mydata$filings
}
filings <- rbind.pages(pages)
nrow(filings)
colnames(filings)
}
}
|
/man/rbind.pages.Rd
|
permissive
|
Smudgerville/jsonlite
|
R
| false
| true
| 1,667
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rbind.pages.R
\name{rbind.pages}
\alias{rbind.pages}
\title{Combine pages into a single data frame}
\usage{
rbind.pages(pages)
}
\arguments{
\item{pages}{a list of data frames, each representing a \emph{page} of data}
}
\description{
The \code{rbind.pages} function is used to combine a list of data frames into a single
data frame. This is often needed when working with a JSON API that limits the amount
of data per request. If we need more data than what fits in a single request, we need to
perform multiple requests that each retrieve a fragment of data, not unlike pages in a
book. In practice this is often implemented using a \code{page} parameter in the API. The
\code{rbind.pages} function can be used to combine these pages back into a single dataset.
}
\details{
The \code{\link{rbind.pages}} function generalizes \code{\link[base:rbind]{base::rbind}} and
\code{\link[plyr:rbind.fill]{plyr::rbind.fill}} with added support for nested data frames. Not each column
has to be present in each of the individual data frames; missing columns will be filled
up in \code{NA} values.
}
\examples{
# Basic example
x <- data.frame(foo = rnorm(3), bar = c(TRUE, FALSE, TRUE))
y <- data.frame(foo = rnorm(2), col = c("blue", "red"))
rbind.pages(list(x, y))
\dontrun{
baseurl <- "http://projects.propublica.org/nonprofits/api/v1/search.json"
pages <- list()
for(i in 0:20){
mydata <- fromJSON(paste0(baseurl, "?order=revenue&sort_order=desc&page=", i))
message("Retrieving page ", i)
pages[[i+1]] <- mydata$filings
}
filings <- rbind.pages(pages)
nrow(filings)
colnames(filings)
}
}
|
library(shiny)
library(tidyverse)
library(rjson)
library(ggvis)
library(dplyr)
library(xml2)
library(ggplot2)
library(readxl)
library(scales)
#####################
# SUPPORT FUNCTIONS #
#####################
# create a function to calculate eight indices, m-score, and final probability
# outputs a list
beneish = function(ticker="MMM") {
is=rjson::fromJSON(file = "income_statement_clean.json")
bs=rjson::fromJSON(file = "balance_sheet_clean.json")
cf=rjson::fromJSON(file = "cash_flow_clean.json")
dsri=(bs[[ticker]][['t']][['Net Receivables']]/is[[ticker]][['t']][['Total Revenue']])/
(bs[[ticker]][['t_1']][['Net Receivables']]/is[[ticker]][['t_1']][['Total Revenue']])
#dsri=(net receivalbes/sales in t=0)/(net receivables/sales in t=-1)
gmi= ((is[[ticker]][['t_1']][['Total Revenue']]-is[[ticker]][['t_1']][['Cost Of Revenue']])/is[[ticker]][['t']][['Total Revenue']])/
((is[[ticker]][['t']][['Total Revenue']]-is[[ticker]][['t']][['Cost Of Revenue']])/is[[ticker]][['t']][['Total Revenue']])
#gmi=((sales-COGS)/sales)in t=-1/((sales-cogs)/sales) in t=0
aqi= (1-(bs[[ticker]][['t']][['Total Current Assets']]+bs[[ticker]][['t']][['Property Plant Equipment']])/bs[[ticker]][['t']][['Total Assets']])/
(1-(bs[[ticker]][['t_1']][['Total Current Assets']]+bs[[ticker]][['t_1']][['Property Plant Equipment']])/bs[[ticker]][['t_1']][['Total Assets']])
#aqi=[1-(current assets+PPE)/Total Assets]in t=0/[1-(current assets+PPE)/Total Assets]in t=-1
sgi= is[[ticker]][['t']][['Total Revenue']]/is[[ticker]][['t_1']][['Total Revenue']]
#sgi=sales in t=0/sales in t=-1
depi= (cf[[ticker]][['t_1']][['Depreciation']]/(bs[[ticker]][['t_1']][['Property Plant Equipment']]+cf[[ticker]][['t_1']][['Depreciation']]))/
(cf[[ticker]][['t']][['Depreciation']]/(bs[[ticker]][['t']][['Property Plant Equipment']]+cf[[ticker]][['t']][['Depreciation']]))
#depi=(depreciation/(PPE+depreciation))in t=-1/(depreciation/(PPE+depreciation))in t=0
sgai= (is[[ticker]][['t']][['Selling General Administrative']]/is[[ticker]][['t']][['Total Revenue']])/
(is[[ticker]][['t_1']][['Selling General Administrative']]/is[[ticker]][['t_1']][['Total Revenue']])
#sgai=(SGA/sales)in t=0/(SGA/sales)in t=-1
if(is.null(bs[[ticker]][['t_1']][['Long Term Debt']])) {
lvgi=((bs[[ticker]][['t']][['Total Current Liabilities']]+bs[[ticker]][['t']][['Long Term Debt And Capital Lease Obligation']])/bs[[ticker]][['t']][['Total Assets']])/
((bs[[ticker]][['t_1']][['Total Current Liabilities']]+bs[[ticker]][['t_1']][['Long Term Debt And Capital Lease Obligation']])/(bs[[ticker]][['t_1']][['Total Assets']]))
} else {
lvgi=((bs[[ticker]][['t']][['Total Current Liabilities']]+bs[[ticker]][['t']][['Long Term Debt']])/(bs[[ticker]][['t']][['Total Assets']]))/
((bs[[ticker]][['t_1']][['Total Current Liabilities']]+bs[[ticker]][['t_1']][['Long Term Debt']])/bs[[ticker]][['t_1']][['Total Assets']])
}
tata= (is[[ticker]][['t']][['Net Income From Continuing Ops']] - cf[[ticker]][['t']][['Total Cash From Operating Activities']]) / bs[[ticker]][['t']][['Total Assets']]
intercept=-4.84
c_dsri=0.92
c_gmi=0.528
c_aqi=0.404
c_sgi=0.892
c_depi=0.115
c_sgai=-0.172
c_lvgi=-0.327
c_tata=4.679
coefs=c(c_dsri,c_gmi,c_aqi,c_sgi,c_depi,c_sgai,c_lvgi,c_tata)
vars=c( dsri, gmi, aqi, sgi, depi, sgai, lvgi, tata)
mscore=intercept+sum(vars*coefs)
prob=pnorm(mscore,mean=0,sd=1,lower.tail = T)
all=data.frame(dsri, gmi, aqi, sgi, depi, sgai, lvgi, tata ,mscore,prob)
return(all)
}
# create a reverse search function to match a ticker
# to the full company name
findtick <- function(company){
return(as.character(px[px['name'] == company, 1]))
}
is=rjson::fromJSON(file = "income_statement_clean.json")
bs=rjson::fromJSON(file = "balance_sheet_clean.json")
cf=rjson::fromJSON(file = "cash_flow_clean.json")
px = readxl::read_excel("px.xlsx", col_names = TRUE)
px$prob = round(px$prob * 100, 3)
stocks = read.table("stonks.csv", sep = ',', header = TRUE)
stocks$Date = as.Date(stocks$Date)
income_finder = function(ticker) {
# start by intialising a vector of years and incomes
years = c('FY20', 'FY19', 'FY18', 'FY17')
val = c()
for (i in names(is[[ticker]])) {
val = append(val, is[[ticker]][[i]][['Net Income']])
}
# convert two vectors and create one dataframe
return(data.frame(years, val))
}
GProfit_finder = function(ticker) {
# start by intialising a vector of years and incomes
years1 = c('FY20', 'FY19', 'FY18', 'FY17')
val1 = c()
for (i in names(is[[ticker]])) {
val1 = append(val1, is[[ticker]][[i]][['Gross Profit']])
}
# convert two vectors and create one dataframe
return(data.frame(years1, val1))
}
TRevenue_finder = function(ticker) {
# start by intialising a vector of years and incomes
years2 = c('FY20', 'FY19', 'FY18', 'FY17')
val2 = c()
for (i in names(is[[ticker]])) {
val2 = append(val2, is[[ticker]][[i]][['Total Revenue']])
}
# convert two vectors and create one dataframe
return(data.frame(years2, val2))
}
################
# SERVER LOGIC #
################
shinyServer(function(input, output) {
##############################
observeEvent(input$company, {
tick=findtick(input$company)
ben=beneish(tick)
ms= paste0(
as.character(format(
round(
ben$mscore, 3), nsmall = 3)))
prob= paste0(
as.character(format(
round(
ben$prob*100, 3), nsmall = 3)), "%")
dsri = paste0(
as.character(format(
round(
ben$dsri, 3), nsmall = 3)))
gmi = paste0(
as.character(format(
round(
ben$gmi, 3), nsmall = 3)))
aqi = paste0(
as.character(format(
round(
ben$aqi, 3), nsmall = 3)))
depi = paste0(
as.character(format(
round(
ben$depi, 3), nsmall = 3)))
sgai = paste0(
as.character(format(
round(
ben$sgai, 3), nsmall = 3)))
lvgi = paste0(
as.character(format(
round(
ben$lvgi, 3), nsmall = 3)))
sgi = paste0(
as.character(format(
round(
ben$sgi, 3), nsmall = 3)))
output$mscore1 <- renderValueBox({valueBox(ms, HTML("<b>M-score</b><br>Model output for m-score."), icon = icon("bar-chart-o"), color = "blue")})
output$prob <- renderValueBox({valueBox(prob, HTML("<b>Probability</b><br>Implied probability of earnings manipulation."), icon = icon("bar-chart-o"), color = "blue")})
output$dsri <- renderValueBox({valueBox(dsri, HTML("<b>DSRI</b><br>A high DSRI could indicate revenue inflation."), icon = icon("bar-chart-o"), color = "blue")})
output$gmi <- renderValueBox({valueBox(gmi, HTML("<b>GMI</b><br>A high GMI indicates deteriorating margins."), icon = icon("bar-chart-o"), color = "blue")})
output$aqi <- renderValueBox({valueBox(aqi, HTML("<b>AQI</b><br>A high AQI could indicate increased cost deferrals."), icon = icon("bar-chart-o"), color = "blue")})
output$sgi <- renderValueBox({valueBox(sgi, HTML("<b>SGI</b><br>A high SGI indicates declining sales."), icon = icon("bar-chart-o"), color = "blue")})
output$depi <- renderValueBox({valueBox(depi, HTML("<b>DEPI</b><br>A high DEPI could indicate firms artifically extended assets useful lives."), icon = icon("bar-chart-o"), color = "blue")})
output$sgai <- renderValueBox({valueBox(sgai, HTML("<b>SGAI</b><br>A high SGAI could indicate lower administrative efficiency, incentivizing earnings inflation."), icon = icon("bar-chart-o"), color = "blue")})
output$lvgi <- renderValueBox({valueBox(lvgi, HTML("<b>LVGI</b><br>A high LVGI could indicate higher likelihood of defaulting on debt."), icon = icon("bar-chart-o"), color = "blue")})
})
output$ben_hist <- renderPlot(
px %>%
filter(sector == input$sector) %>%
ggplot(aes(prob)) +
geom_density(aes(y = ..count..), color = "darkblue", fill="lightblue") +
xlab("Probability of earnings manipulation (%)") +
ylab("Number of companies") +
ggtitle("Distribution of probabilities")
)
output$stock_price = renderPlot(
ggplot(stocks,aes(y=get(findtick(input$company)),x=Date,group= 1)) +
geom_line() + scale_x_date(labels=date_format("%b-%d-%Y")) + labs(y="Price",title = "STOCK PRICES OVER THE YEARS")
)
output$net_income = renderPlot(
ggplot(income_finder(findtick(input$company)), aes(years, val, fill=years)) +
geom_bar(stat = "identity") +labs(y="Net Income",title = "NET INCOME PER YEAR")+
scale_y_continuous(labels = comma)+guides(fill=guide_legend(title="YEAR"))
)
output$gross_profit = renderPlot(
ggplot(GProfit_finder(findtick(input$company)), aes(years1, val1, fill=years1)) +
geom_bar(stat = "identity") +labs(y="Gross Profit",x="Years",title = "GROSS PROFIT PER YEAR")+
scale_y_continuous(labels = comma)+guides(fill=guide_legend(title="YEAR"))
)
output$total_revenue = renderPlot(
ggplot(TRevenue_finder(findtick(input$company)), aes(years2, val2, fill=years2)) +
geom_bar(stat = "identity") +labs(y="Total Revenue",x="Years",title = "TOTAL REVENUE PER YEAR")+
scale_y_continuous(labels = comma)+guides(fill=guide_legend(title="YEAR"))
)
output$mytable = DT::renderDataTable({
px %>%
filter(sector == input$sector) %>%
arrange(desc(prob))
})
})
|
/server.R
|
no_license
|
mjonelis/beneish-calculator-shiny
|
R
| false
| false
| 9,432
|
r
|
library(shiny)
library(tidyverse)
library(rjson)
library(ggvis)
library(dplyr)
library(xml2)
library(ggplot2)
library(readxl)
library(scales)
#####################
# SUPPORT FUNCTIONS #
#####################
# create a function to calculate eight indices, m-score, and final probability
# outputs a list
beneish = function(ticker="MMM") {
is=rjson::fromJSON(file = "income_statement_clean.json")
bs=rjson::fromJSON(file = "balance_sheet_clean.json")
cf=rjson::fromJSON(file = "cash_flow_clean.json")
dsri=(bs[[ticker]][['t']][['Net Receivables']]/is[[ticker]][['t']][['Total Revenue']])/
(bs[[ticker]][['t_1']][['Net Receivables']]/is[[ticker]][['t_1']][['Total Revenue']])
#dsri=(net receivalbes/sales in t=0)/(net receivables/sales in t=-1)
gmi= ((is[[ticker]][['t_1']][['Total Revenue']]-is[[ticker]][['t_1']][['Cost Of Revenue']])/is[[ticker]][['t']][['Total Revenue']])/
((is[[ticker]][['t']][['Total Revenue']]-is[[ticker]][['t']][['Cost Of Revenue']])/is[[ticker]][['t']][['Total Revenue']])
#gmi=((sales-COGS)/sales)in t=-1/((sales-cogs)/sales) in t=0
aqi= (1-(bs[[ticker]][['t']][['Total Current Assets']]+bs[[ticker]][['t']][['Property Plant Equipment']])/bs[[ticker]][['t']][['Total Assets']])/
(1-(bs[[ticker]][['t_1']][['Total Current Assets']]+bs[[ticker]][['t_1']][['Property Plant Equipment']])/bs[[ticker]][['t_1']][['Total Assets']])
#aqi=[1-(current assets+PPE)/Total Assets]in t=0/[1-(current assets+PPE)/Total Assets]in t=-1
sgi= is[[ticker]][['t']][['Total Revenue']]/is[[ticker]][['t_1']][['Total Revenue']]
#sgi=sales in t=0/sales in t=-1
depi= (cf[[ticker]][['t_1']][['Depreciation']]/(bs[[ticker]][['t_1']][['Property Plant Equipment']]+cf[[ticker]][['t_1']][['Depreciation']]))/
(cf[[ticker]][['t']][['Depreciation']]/(bs[[ticker]][['t']][['Property Plant Equipment']]+cf[[ticker]][['t']][['Depreciation']]))
#depi=(depreciation/(PPE+depreciation))in t=-1/(depreciation/(PPE+depreciation))in t=0
sgai= (is[[ticker]][['t']][['Selling General Administrative']]/is[[ticker]][['t']][['Total Revenue']])/
(is[[ticker]][['t_1']][['Selling General Administrative']]/is[[ticker]][['t_1']][['Total Revenue']])
#sgai=(SGA/sales)in t=0/(SGA/sales)in t=-1
if(is.null(bs[[ticker]][['t_1']][['Long Term Debt']])) {
lvgi=((bs[[ticker]][['t']][['Total Current Liabilities']]+bs[[ticker]][['t']][['Long Term Debt And Capital Lease Obligation']])/bs[[ticker]][['t']][['Total Assets']])/
((bs[[ticker]][['t_1']][['Total Current Liabilities']]+bs[[ticker]][['t_1']][['Long Term Debt And Capital Lease Obligation']])/(bs[[ticker]][['t_1']][['Total Assets']]))
} else {
lvgi=((bs[[ticker]][['t']][['Total Current Liabilities']]+bs[[ticker]][['t']][['Long Term Debt']])/(bs[[ticker]][['t']][['Total Assets']]))/
((bs[[ticker]][['t_1']][['Total Current Liabilities']]+bs[[ticker]][['t_1']][['Long Term Debt']])/bs[[ticker]][['t_1']][['Total Assets']])
}
tata= (is[[ticker]][['t']][['Net Income From Continuing Ops']] - cf[[ticker]][['t']][['Total Cash From Operating Activities']]) / bs[[ticker]][['t']][['Total Assets']]
intercept=-4.84
c_dsri=0.92
c_gmi=0.528
c_aqi=0.404
c_sgi=0.892
c_depi=0.115
c_sgai=-0.172
c_lvgi=-0.327
c_tata=4.679
coefs=c(c_dsri,c_gmi,c_aqi,c_sgi,c_depi,c_sgai,c_lvgi,c_tata)
vars=c( dsri, gmi, aqi, sgi, depi, sgai, lvgi, tata)
mscore=intercept+sum(vars*coefs)
prob=pnorm(mscore,mean=0,sd=1,lower.tail = T)
all=data.frame(dsri, gmi, aqi, sgi, depi, sgai, lvgi, tata ,mscore,prob)
return(all)
}
# create a reverse search function to match a ticker
# to the full company name
findtick <- function(company){
return(as.character(px[px['name'] == company, 1]))
}
is=rjson::fromJSON(file = "income_statement_clean.json")
bs=rjson::fromJSON(file = "balance_sheet_clean.json")
cf=rjson::fromJSON(file = "cash_flow_clean.json")
px = readxl::read_excel("px.xlsx", col_names = TRUE)
px$prob = round(px$prob * 100, 3)
stocks = read.table("stonks.csv", sep = ',', header = TRUE)
stocks$Date = as.Date(stocks$Date)
income_finder = function(ticker) {
# start by intialising a vector of years and incomes
years = c('FY20', 'FY19', 'FY18', 'FY17')
val = c()
for (i in names(is[[ticker]])) {
val = append(val, is[[ticker]][[i]][['Net Income']])
}
# convert two vectors and create one dataframe
return(data.frame(years, val))
}
GProfit_finder = function(ticker) {
# start by intialising a vector of years and incomes
years1 = c('FY20', 'FY19', 'FY18', 'FY17')
val1 = c()
for (i in names(is[[ticker]])) {
val1 = append(val1, is[[ticker]][[i]][['Gross Profit']])
}
# convert two vectors and create one dataframe
return(data.frame(years1, val1))
}
TRevenue_finder = function(ticker) {
# start by intialising a vector of years and incomes
years2 = c('FY20', 'FY19', 'FY18', 'FY17')
val2 = c()
for (i in names(is[[ticker]])) {
val2 = append(val2, is[[ticker]][[i]][['Total Revenue']])
}
# convert two vectors and create one dataframe
return(data.frame(years2, val2))
}
################
# SERVER LOGIC #
################
shinyServer(function(input, output) {
##############################
observeEvent(input$company, {
tick=findtick(input$company)
ben=beneish(tick)
ms= paste0(
as.character(format(
round(
ben$mscore, 3), nsmall = 3)))
prob= paste0(
as.character(format(
round(
ben$prob*100, 3), nsmall = 3)), "%")
dsri = paste0(
as.character(format(
round(
ben$dsri, 3), nsmall = 3)))
gmi = paste0(
as.character(format(
round(
ben$gmi, 3), nsmall = 3)))
aqi = paste0(
as.character(format(
round(
ben$aqi, 3), nsmall = 3)))
depi = paste0(
as.character(format(
round(
ben$depi, 3), nsmall = 3)))
sgai = paste0(
as.character(format(
round(
ben$sgai, 3), nsmall = 3)))
lvgi = paste0(
as.character(format(
round(
ben$lvgi, 3), nsmall = 3)))
sgi = paste0(
as.character(format(
round(
ben$sgi, 3), nsmall = 3)))
output$mscore1 <- renderValueBox({valueBox(ms, HTML("<b>M-score</b><br>Model output for m-score."), icon = icon("bar-chart-o"), color = "blue")})
output$prob <- renderValueBox({valueBox(prob, HTML("<b>Probability</b><br>Implied probability of earnings manipulation."), icon = icon("bar-chart-o"), color = "blue")})
output$dsri <- renderValueBox({valueBox(dsri, HTML("<b>DSRI</b><br>A high DSRI could indicate revenue inflation."), icon = icon("bar-chart-o"), color = "blue")})
output$gmi <- renderValueBox({valueBox(gmi, HTML("<b>GMI</b><br>A high GMI indicates deteriorating margins."), icon = icon("bar-chart-o"), color = "blue")})
output$aqi <- renderValueBox({valueBox(aqi, HTML("<b>AQI</b><br>A high AQI could indicate increased cost deferrals."), icon = icon("bar-chart-o"), color = "blue")})
output$sgi <- renderValueBox({valueBox(sgi, HTML("<b>SGI</b><br>A high SGI indicates declining sales."), icon = icon("bar-chart-o"), color = "blue")})
output$depi <- renderValueBox({valueBox(depi, HTML("<b>DEPI</b><br>A high DEPI could indicate firms artifically extended assets useful lives."), icon = icon("bar-chart-o"), color = "blue")})
output$sgai <- renderValueBox({valueBox(sgai, HTML("<b>SGAI</b><br>A high SGAI could indicate lower administrative efficiency, incentivizing earnings inflation."), icon = icon("bar-chart-o"), color = "blue")})
output$lvgi <- renderValueBox({valueBox(lvgi, HTML("<b>LVGI</b><br>A high LVGI could indicate higher likelihood of defaulting on debt."), icon = icon("bar-chart-o"), color = "blue")})
})
output$ben_hist <- renderPlot(
px %>%
filter(sector == input$sector) %>%
ggplot(aes(prob)) +
geom_density(aes(y = ..count..), color = "darkblue", fill="lightblue") +
xlab("Probability of earnings manipulation (%)") +
ylab("Number of companies") +
ggtitle("Distribution of probabilities")
)
output$stock_price = renderPlot(
ggplot(stocks,aes(y=get(findtick(input$company)),x=Date,group= 1)) +
geom_line() + scale_x_date(labels=date_format("%b-%d-%Y")) + labs(y="Price",title = "STOCK PRICES OVER THE YEARS")
)
output$net_income = renderPlot(
ggplot(income_finder(findtick(input$company)), aes(years, val, fill=years)) +
geom_bar(stat = "identity") +labs(y="Net Income",title = "NET INCOME PER YEAR")+
scale_y_continuous(labels = comma)+guides(fill=guide_legend(title="YEAR"))
)
output$gross_profit = renderPlot(
ggplot(GProfit_finder(findtick(input$company)), aes(years1, val1, fill=years1)) +
geom_bar(stat = "identity") +labs(y="Gross Profit",x="Years",title = "GROSS PROFIT PER YEAR")+
scale_y_continuous(labels = comma)+guides(fill=guide_legend(title="YEAR"))
)
output$total_revenue = renderPlot(
ggplot(TRevenue_finder(findtick(input$company)), aes(years2, val2, fill=years2)) +
geom_bar(stat = "identity") +labs(y="Total Revenue",x="Years",title = "TOTAL REVENUE PER YEAR")+
scale_y_continuous(labels = comma)+guides(fill=guide_legend(title="YEAR"))
)
output$mytable = DT::renderDataTable({
px %>%
filter(sector == input$sector) %>%
arrange(desc(prob))
})
})
|
library(camiller)
library(testthat)
test_that("brk_labels returns breaks formatted with shorthands", {
percent_breaks <- c("[0,0.1]", "(0.1,0.25]", "(0.25,0.3333]")
expect_equal(brk_labels(percent_breaks, format = "percent", mult_by = 100, round_digits = 0), c("0 to 10%", "10 to 25%", "25 to 33%"))
dollar_breaks <- c("[100,120]", "(120,145]", "(145,200]")
expect_equal(brk_labels(dollar_breaks, format = "dollar", sep = "-"), c("$100-$120", "$120-$145", "$145-$200"))
dollark_breaks <- c("[1200,2000]", "(2000,9000]", "(9000,12800]")
expect_equal(brk_labels(dollark_breaks, format = "dollark", mult_by = 1e-3), c("$1.2k to $2k", "$2k to $9k", "$9k to $12.8k"))
expect_equal(brk_labels(dollark_breaks, format = "dollark", mult_by = 1e-3, round_digits = 0), c("$1k to $2k", "$2k to $9k", "$9k to $13k"))
})
test_that("brk_labels returns breaks formatted with custom", {
scientific_breaks <- c("[0.01,0.5]", "(0.55,125]")
expect_equal(brk_labels(scientific_breaks, format = "e", custom = T, digits = 1), c("1.0e-02 to 5.0e-01", "5.5e-01 to 1.2e+02"))
})
test_that("brk_labels handles null format", {
int_breaks <- c("[1,3.5]", "(3.5,5.2]")
expect_equal(brk_labels(int_breaks, round_digits = 0), c("1 to 4", "4 to 5"))
expect_equal(brk_labels(int_breaks), c("1 to 3.5", "3.5 to 5.2"))
})
|
/tests/testthat/test-brk_labels.R
|
permissive
|
camille-s/camiller
|
R
| false
| false
| 1,315
|
r
|
library(camiller)
library(testthat)
test_that("brk_labels returns breaks formatted with shorthands", {
percent_breaks <- c("[0,0.1]", "(0.1,0.25]", "(0.25,0.3333]")
expect_equal(brk_labels(percent_breaks, format = "percent", mult_by = 100, round_digits = 0), c("0 to 10%", "10 to 25%", "25 to 33%"))
dollar_breaks <- c("[100,120]", "(120,145]", "(145,200]")
expect_equal(brk_labels(dollar_breaks, format = "dollar", sep = "-"), c("$100-$120", "$120-$145", "$145-$200"))
dollark_breaks <- c("[1200,2000]", "(2000,9000]", "(9000,12800]")
expect_equal(brk_labels(dollark_breaks, format = "dollark", mult_by = 1e-3), c("$1.2k to $2k", "$2k to $9k", "$9k to $12.8k"))
expect_equal(brk_labels(dollark_breaks, format = "dollark", mult_by = 1e-3, round_digits = 0), c("$1k to $2k", "$2k to $9k", "$9k to $13k"))
})
test_that("brk_labels returns breaks formatted with custom", {
scientific_breaks <- c("[0.01,0.5]", "(0.55,125]")
expect_equal(brk_labels(scientific_breaks, format = "e", custom = T, digits = 1), c("1.0e-02 to 5.0e-01", "5.5e-01 to 1.2e+02"))
})
test_that("brk_labels handles null format", {
int_breaks <- c("[1,3.5]", "(3.5,5.2]")
expect_equal(brk_labels(int_breaks, round_digits = 0), c("1 to 4", "4 to 5"))
expect_equal(brk_labels(int_breaks), c("1 to 3.5", "3.5 to 5.2"))
})
|
get.test<-function( proportion.test,
qdatafn=NULL,
seed=NULL,
folder=NULL,
qdata.trainfn=paste(strsplit(qdatafn,split=".csv")[[1]],"_train.csv",sep=""),
qdata.testfn=paste(strsplit(qdatafn,split=".csv")[[1]],"_test.csv",sep="")){
## Select dataset
if (is.null(qdatafn)){
if(.Platform$OS.type=="windows"){
## Adds to file filters to Cran R Filters table.
Filters <- rbind(Filters,img=c("Imagine files (*.img)", "*.img"))
Filters <- rbind(Filters,csv=c("Comma-delimited files (*.csv)", "*.csv"))
qdatafn <- choose.files(caption="Select data file", filters = Filters["csv",], multi = FALSE)
if(is.null(qdatafn)){stop("")}
}else{stop("you must provide qdatafn")}
}
## check that qdata.trainfn and qdata.testfn are filenames
if(is.matrix(qdata.trainfn)==TRUE || is.data.frame(qdata.trainfn)==TRUE){
stop("in the function get.test() 'qdata.trainfn' must be the filename for the new training dataset")}
if(is.matrix(qdata.trainfn)==TRUE || is.data.frame(qdata.trainfn)==TRUE){
stop("in the function get.test() 'qdata.testfn' must be the filename for the new test dataset")}
## Check if file name is full path or basename
if(is.matrix(qdatafn)!=TRUE && is.data.frame(qdatafn)!=TRUE){
if(identical(basename(qdatafn),qdatafn)){
if(is.null(folder)){
if(.Platform$OS.type=="windows"){
folder<-choose.dir(default=getwd(), caption="Select directory")
}else{
folder<-getwd()}}
qdatafn<-paste(folder,"/",qdatafn,sep="")
}
}
## check if qdata.trainfn and qdata.testfn are full path or basename
if(identical(basename(qdata.trainfn),qdata.trainfn)){
if(is.null(folder)){
if(.Platform$OS.type=="windows"){
folder<-choose.dir(default=getwd(), caption="Select directory")
}else{
folder<-getwd()}}
qdata.trainfn<-paste(folder,"/",qdata.trainfn,sep="")
}
if(identical(basename(qdata.testfn),qdata.testfn)){
if(is.null(folder)){
if(.Platform$OS.type=="windows"){
folder<-choose.dir(default=getwd(), caption="Select directory")
}else{
folder<-getwd()}}
qdata.testfn<-paste(folder,"/",qdata.testfn,sep="")
}
## Read in data
if(is.matrix(qdatafn)==TRUE || is.data.frame(qdatafn)==TRUE){
qdata<-qdatafn
}else{
qdata<-read.table(file=qdatafn,sep=",",header=TRUE,check.names=FALSE)}
if(!is.null(seed)){
set.seed(seed)}
train<-sample(1:nrow(qdata),round(nrow(qdata)*(1-proportion.test)))
qdata.train<-qdata[train,]
write.table(qdata.train, file = qdata.trainfn, sep=",",append = FALSE,row.names=FALSE)
if(nrow(qdata.train)<nrow(qdata)){
qdata.test<-qdata[-train,]
write.table(qdata.test, file = qdata.testfn, sep=",",append = FALSE,row.names=FALSE)
}
}
|
/R/get.test.R
|
no_license
|
cran/ModelMap
|
R
| false
| false
| 2,698
|
r
|
get.test<-function( proportion.test,
qdatafn=NULL,
seed=NULL,
folder=NULL,
qdata.trainfn=paste(strsplit(qdatafn,split=".csv")[[1]],"_train.csv",sep=""),
qdata.testfn=paste(strsplit(qdatafn,split=".csv")[[1]],"_test.csv",sep="")){
## Select dataset
if (is.null(qdatafn)){
if(.Platform$OS.type=="windows"){
## Adds to file filters to Cran R Filters table.
Filters <- rbind(Filters,img=c("Imagine files (*.img)", "*.img"))
Filters <- rbind(Filters,csv=c("Comma-delimited files (*.csv)", "*.csv"))
qdatafn <- choose.files(caption="Select data file", filters = Filters["csv",], multi = FALSE)
if(is.null(qdatafn)){stop("")}
}else{stop("you must provide qdatafn")}
}
## check that qdata.trainfn and qdata.testfn are filenames
if(is.matrix(qdata.trainfn)==TRUE || is.data.frame(qdata.trainfn)==TRUE){
stop("in the function get.test() 'qdata.trainfn' must be the filename for the new training dataset")}
if(is.matrix(qdata.trainfn)==TRUE || is.data.frame(qdata.trainfn)==TRUE){
stop("in the function get.test() 'qdata.testfn' must be the filename for the new test dataset")}
## Check if file name is full path or basename
if(is.matrix(qdatafn)!=TRUE && is.data.frame(qdatafn)!=TRUE){
if(identical(basename(qdatafn),qdatafn)){
if(is.null(folder)){
if(.Platform$OS.type=="windows"){
folder<-choose.dir(default=getwd(), caption="Select directory")
}else{
folder<-getwd()}}
qdatafn<-paste(folder,"/",qdatafn,sep="")
}
}
## check if qdata.trainfn and qdata.testfn are full path or basename
if(identical(basename(qdata.trainfn),qdata.trainfn)){
if(is.null(folder)){
if(.Platform$OS.type=="windows"){
folder<-choose.dir(default=getwd(), caption="Select directory")
}else{
folder<-getwd()}}
qdata.trainfn<-paste(folder,"/",qdata.trainfn,sep="")
}
if(identical(basename(qdata.testfn),qdata.testfn)){
if(is.null(folder)){
if(.Platform$OS.type=="windows"){
folder<-choose.dir(default=getwd(), caption="Select directory")
}else{
folder<-getwd()}}
qdata.testfn<-paste(folder,"/",qdata.testfn,sep="")
}
## Read in data
if(is.matrix(qdatafn)==TRUE || is.data.frame(qdatafn)==TRUE){
qdata<-qdatafn
}else{
qdata<-read.table(file=qdatafn,sep=",",header=TRUE,check.names=FALSE)}
if(!is.null(seed)){
set.seed(seed)}
train<-sample(1:nrow(qdata),round(nrow(qdata)*(1-proportion.test)))
qdata.train<-qdata[train,]
write.table(qdata.train, file = qdata.trainfn, sep=",",append = FALSE,row.names=FALSE)
if(nrow(qdata.train)<nrow(qdata)){
qdata.test<-qdata[-train,]
write.table(qdata.test, file = qdata.testfn, sep=",",append = FALSE,row.names=FALSE)
}
}
|
testlist <- list(x1 = numeric(0), x2 = numeric(0), y1 = numeric(0), y2 = c(7.29112026763647e-304, NaN, 4.77831053068348e-299, NaN))
result <- do.call(palm:::euc_distances,testlist)
str(result)
|
/palm/inst/testfiles/euc_distances/libFuzzer_euc_distances/euc_distances_valgrind_files/1612969159-test.R
|
no_license
|
akhikolla/updatedatatype-list2
|
R
| false
| false
| 193
|
r
|
testlist <- list(x1 = numeric(0), x2 = numeric(0), y1 = numeric(0), y2 = c(7.29112026763647e-304, NaN, 4.77831053068348e-299, NaN))
result <- do.call(palm:::euc_distances,testlist)
str(result)
|
\name{rLSreadVarStartPoint}
\alias{rLSreadVarStartPoint}
\title{Provide initial values for variables from a file.}
\description{
R interface function for LINDO API function \code{LSreadVarStartPoint}. For more information,
please refer to LINDO API User Manual.
}
\usage{
rLSreadVarStartPoint(model,pszFname)
}
\arguments{
\item{model}{A LINDO API model object, returned by \code{\link{rLScreateModel}}.}
\item{pszFname}{The name of the file from which to read the starting values.}
}
\value{
An R list object with components:
\item{ErrorCode}{Zero if successful, nonzero otherwise.}
}
\references{
LINDO SYSTEMS home page at www.lindo.com
}
\seealso{
\code{\link{rLSloadVarStartPoint}}
}
|
/man/rLSreadVarStartPoint.Rd
|
no_license
|
cran/rLindo
|
R
| false
| false
| 734
|
rd
|
\name{rLSreadVarStartPoint}
\alias{rLSreadVarStartPoint}
\title{Provide initial values for variables from a file.}
\description{
R interface function for LINDO API function \code{LSreadVarStartPoint}. For more information,
please refer to LINDO API User Manual.
}
\usage{
rLSreadVarStartPoint(model,pszFname)
}
\arguments{
\item{model}{A LINDO API model object, returned by \code{\link{rLScreateModel}}.}
\item{pszFname}{The name of the file from which to read the starting values.}
}
\value{
An R list object with components:
\item{ErrorCode}{Zero if successful, nonzero otherwise.}
}
\references{
LINDO SYSTEMS home page at www.lindo.com
}
\seealso{
\code{\link{rLSloadVarStartPoint}}
}
|
setwd("/Users/ellen/BMI215/HW2")
library(plyr)
library(ggplot2)
#------------------------------------------------------------------------------------------
# GENERAL IDEA
#------------------------------------------------------------------------------------------
# The point of this assignment is to build a "side-effect" profile for cholesterol drugs
# to predict new drug-drug interactions.
# Broader implication: can identify and study new, potentially dangerous interactions.
# Can also determine which drug combinations could complicate the symptoms of those
# with high cholesterol. Finally, can potentially find drug combinations with a similar beneficial
# effect as a cholesterol drug.
# Methods:
# 1) First figure out which adverse events are enriched for cholesterol drugs
# 2) Use feature selection to identify the 5 most informative adverse events for predicting
# whether a drug is a cholesterol drug or not
# 3) Use model to predict drug-drug interactions causing cholesterol-related effects
# 4) Inputs are non-cholesterol drugs, so we can be sure that any hits are due to interaction effects
# rather than side effects of a known cholesterol drugs in the pair
# Dataframe with 3 columns:
# drug - drug names (string)
# event - name of adverse event (AE) a drug was reported with (string)
# freq - fraction of reports for the drug that list the event (float)
aeFreqs <- read.csv("./single_drug_event_frequencies.csv", header=T, as.is=TRUE)
names(aeFreqs) <- c("drug", "event", "freq")
# List/vector of cholesterol drug names (strings)
cholDrugs <- scan("./cholesterol_drugs.txt", what="ch", sep="\n")
# Dataframe where 1st column contains all adverse events (AEs) reported with a cholesterol drug
# and second column is number of different drugs with which that event co-occurred
cholAEs <- unique(aeFreqs$event[aeFreqs$drug %in% cholDrugs])
#------------------------------------------------------------------------------------------
# 1.1 - FILTER ADVERSE EVENTS
#------------------------------------------------------------------------------------------
# Dataframe where 1st column: all AEs reported with a cholesterol drug
# and 2nd column: # of different cholesterol drugs co-occuring with that AE
cholAECounts <- count(aeFreqs$event[aeFreqs$drug %in% cholDrugs])
names(cholAECounts) <- c("event", "numdrugs")
# String of AEs that co-occur with at least 5 different cholesterol drugs
commonCholAEs <- cholAECounts$event[cholAECounts$numdrugs>=5]
commonCholAEs
# Dataframe where 1st column: all AEs reported with a non-cholesterol drug
# and 2nd column: # of different non-cholesterol drugs co-occuring with that AE
nonCholAECounts <- count(aeFreqs$event[!aeFreqs$drug %in% cholDrugs])
names(nonCholAECounts) <- c("event", "numdrugs")
# String of AEs that co-occur with at least 5 different non-cholesterol drugs
commonNonCholAEs <- nonCholAECounts$event[nonCholAECounts$numdrugs>=5]
commonNonCholAEs
# String of AEs that co-occur with at least 5 different cholesterol
# and 5 different non-cholesterol drugs
filteredAEs <- intersect(commonCholAEs, commonNonCholAEs)
filteredAEs
#------------------------------------------------------------------------------------------
# 1.1 - PERFORM STUDENT'S T-TEST, MANN-WHITNEY TEST, and FISHER'S EXACT TEST
#------------------------------------------------------------------------------------------
# For each adverse event co-occuring with at least 5 cholesterol drugs and 5 other drugs,
# Perform three statistical test to determine which adverse events are most enriched/diminished
# in the response variable (whether or not a cholesterol drug or other drug is known to cause
# the adverse event)
# Inputs: index i of the event for which we would like to print a contingency table, and the
# vector in which the event name can be found
# Output: 2x2 contingency table for the frequency with which a cholesterol or other drug
# occurs with the event
contingencyTable <- function(i, AEs) {
chol <- subset(aeFreqs, event %in% AEs[i] & drug %in% cholDrugs)$freq
other <- subset(aeFreqs, event %in% AEs[i] & !drug %in% cholDrugs)$freq
# Convert raw event freqs to bins (<=0.01, >0.01)
chol[chol<=0.01] <- "freq<=0.01"; chol[chol!="freq<=0.01"] <- "freq>0.01"
other[other<=0.01] <- "freq<=0.01"; other[other!="freq<=0.01"] <- "freq>0.01"
# Create 2x2 contingency table
drugs <- c(rep("cholesterol drugs", length(chol)),rep("other drugs", length(other)))
table <- table(factor(drugs, levels=c("cholesterol drugs", "other drugs")), factor(c(chol, other), levels=c("freq>0.01", "freq<=0.01")))
table
}
tTest<-data.frame()
mannWhitney<-data.frame()
fisher<-data.frame()
for(i in 1:length(filteredAEs)){
# Generate two vectors containing frequencies that the current AE was reported with each drug
cholesterol <- subset(aeFreqs, event %in% filteredAEs[i] & drug %in% cholDrugs)$freq
notCholesterol <- subset(aeFreqs, event %in% filteredAEs[i] & !drug %in% cholDrugs)$freq
# Perform t-test and store results
result <- t.test(cholesterol, notCholesterol)
tTest <- rbind(tTest, c(result$p.value, result$estimate[1], result$estimate[2]))
# Perform Mann-Whitney test with Bonferroni Correction
result <- wilcox.test(cholesterol, notCholesterol, correct=T)
mannWhitney <- rbind(mannWhitney, c(result$p.value, median(cholesterol), median(notCholesterol)))
# Perform Fisher's exact test
result <- fisher.test(contingencyTable(i, filteredAEs))
fisher <- rbind(fisher, c(result$p.value, result$estimate))
}
names(tTest) = c("pVal","meanOccurrenceInCholDrugs","meanOccurrenceInNonCholDrugs")
names(mannWhitney) = c("pVal","medianOccurrenceInCholDrugs","medianOccurrenceInNonCholDrugs")
names(fisher) = c("pVal","OR")
#------------------------------------------------------------------------------------------
# 1.1 - REPORT EVENTS MOST ENRICHED and DIMINISHED FOR CHOLESTEROL DRUGS
#------------------------------------------------------------------------------------------
# For each of the three tests performed, report the 10 events most enriched for cholesterol drugs
# as well as the 10 events most diminished for cholesterol drugs, based on lowest p-value
# for student's t-test (assuming unequal variances)
tTest <- tTest[order(tTest$pVal),]
enriched_tTest <- tTest[tTest$meanOccurrenceInCholDrugs > tTest$meanOccurrenceInNonCholDrugs,][1:10,]
enriched_tTest$event <- filteredAEs[strtoi(row.names(enriched_tTest))]
diminished_tTest <- tTest[tTest$meanOccurrenceInCholDrugs < tTest$meanOccurrenceInNonCholDrugs,][1:10,]
diminished_tTest$event <- filteredAEs[strtoi(row.names(diminished_tTest))]
# for Mann-Whitney test (with Bonferroni Correction)
mannWhitney <- mannWhitney[order(mannWhitney$pVal),]
enriched_mannWhitney <- mannWhitney[mannWhitney$medianOccurrenceInCholDrugs > mannWhitney$medianOccurrenceInNonCholDrugs,][1:10,]
enriched_mannWhitney$event <- filteredAEs[strtoi(row.names(enriched_mannWhitney))]
diminished_mannWhitney <- mannWhitney[mannWhitney$medianOccurrenceInCholDrugs < mannWhitney$medianOccurrenceInNonCholDrugs,][1:10,]
diminished_mannWhitney$event <- filteredAEs[strtoi(row.names(diminished_mannWhitney))]
# for Fisher's exact test
fisher <- fisher[order(fisher$pVal),]
fisher <- fisher[is.finite(fisher$OR),]
enriched_fisher <- fisher[fisher$OR>1,][1:10,]
enriched_fisher$event <- filteredAEs[strtoi(row.names(enriched_fisher))]
diminished_fisher <- fisher[fisher$OR<1,][1:10,]
diminished_fisher$event <- filteredAEs[strtoi(row.names(diminished_fisher))]
#------------------------------------------------------------------------------------------
# 1.2 - PLOT EVENT FREQUENCY DISTRIBUTIONS
#------------------------------------------------------------------------------------------
# For the top 2 events enriched in cholesterol drugs and the top 2 events diminished in cholesterol drugs
# (based on lowest Mann-Whitney p-value), plot event frequency distributions for cholesterol drugs and other drugs.
# Inputs: the index i of the adverse event in aeFreqs that is to be plotted,
# and a boolean indicating whether or not the event is enriched
# Output: frequency distribution for an adverse event and its co-occurrence with cholesterol and other drugs
plotMannWhitneyDist <- function(i, enriched) {
if(enriched) {
# Event enriched in cholesterol drugs
chol <- subset(aeFreqs, event %in% enriched_mannWhitney$event[i] & drug %in% cholDrugs)$freq
other <- subset(aeFreqs, event %in% enriched_mannWhitney$event[i] & !drug %in% cholDrugs)$freq
title <- paste("Event Frequency Distribution for\n",enriched_mannWhitney$event[i],"\n#",i,"Event Enriched in Cholesterol Drugs")
} else {
# Event diminished in cholesterol drugs
chol <- subset(aeFreqs, event %in% diminished_mannWhitney$event[i] & drug %in% cholDrugs)$freq
other <- subset(aeFreqs, event %in% diminished_mannWhitney$event[i] & !drug %in% cholDrugs)$freq
title <- paste("Event Frequency Distribution for\n",diminished_mannWhitney$event[i],"\n#",i,"Event Diminished in Cholesterol Drugs")
}
data <- data.frame(drugtype=factor(c(rep("Cholesterol Drugs",length(chol)),rep("Other Drugs",length(other)))),frequency=c(chol,other))
plot <- ggplot(data, aes(x=frequency, colour=drugtype))+geom_density()
plot + ggtitle(title)
}
# Plot freq distributions
plotMannWhitneyDist(1, T) # for most enriched
plotMannWhitneyDist(2, T) # for 2nd most enriched
plotMannWhitneyDist(1, F) # for most diminished
plotMannWhitneyDist(2, F) # for second most diminished
#------------------------------------------------------------------------------------------
# 1.3 - 2X2 CONTINGENCY TABLES
#------------------------------------------------------------------------------------------
# Print out contingency tables for top two enriched and top two diminished events
# used in Fisher's test
paste("Contingency Table for",enriched_fisher$event[1])
print(contingencyTable(1, enriched_fisher$event))
paste("Contingency Table for",enriched_fisher$event[2])
print(contingencyTable(2, enriched_fisher$event))
paste("Contingency Table for",diminished_fisher$event[1])
print(contingencyTable(1, diminished_fisher$event))
paste("Contingency Table for",diminished_fisher$event[2])
print(contingencyTable(2, diminished_fisher$event))
#------------------------------------------------------------------------------------------
# 1.4 - PLOT P-VALUE VS. ODDS RATIO
#------------------------------------------------------------------------------------------
# Plot the Fisher's exact test p-values vs odds ratios for each adverse event
ggplot(data=fisher, aes(x=OR, y=pVal, group=1)) + geom_point() + ggtitle("Fisher's Exact Test:\np-value vs odds ratio")
#------------------------------------------------------------------------------------------
# 1.5 - ANALYZING ENRICHED EVENTS FOR PRESCRIPTION BIAS AND DESIRED SIDE EFFECTS
#------------------------------------------------------------------------------------------
# Print out top 30 most enriched events for cholesterol drugs from Mann-Whitney data
top30enriched_mannWhitney <- mannWhitney[mannWhitney$medianOccurrenceInCholDrugs > mannWhitney$medianOccurrenceInNonCholDrugs,][1:30,]
top30enrichedAEs <- filteredAEs[strtoi(row.names(top30enriched_mannWhitney))]
for(i in 1:length(top30enrichedAEs)){
print(top30enrichedAEs[i])
}
#------------------------------------------------------------------------------------------
# 2.1 - ASSEMBLING SIDE-EFFECT PROFILE FOR CHOLESTEROL DRUGS
#------------------------------------------------------------------------------------------
# We filter features down to the top 5 events most associated with cholesterol drugs
# (according to Fisher's test), and we fit a logistic regression model to the data,
# using these 5 events as predictors for the outcome variable, whether the drug is
# a cholesterol drug or not.
# Vector of top 5 events most associated with cholesterol drugs (in either direction) based
# on Fisher's test
top5AEsByFisher <- filteredAEs[strtoi(row.names(fisher[1:5,]))]
# Input: 1) Dataframe with at least the following 3 columns:
# "drug": drug names (string)
# "event": adverse event names (string)
# "freq": frequency of co-occurence between a drug and adverse event (float)
# 2) Vector containing all adverse events (strings)
# 3) Dataframe column containing all drugs
# 4) For labeled data, a vector containing all sample names (strings) that should be labeled as positive
# If this is an empty list, does not label the data and instead retains a column of drug names.
# Output: Dataframe with drugs as rows and events as columns,
# with a binary output "Y" column
sampleFeatureFormat <- function(data, events, Y, positives) {
result = data.frame(Y)
# For each feature, merge a column of frequencies into the data frame
for(i in 1:length(events)){
column <- data[data$event %in% events[i],]
column <- merge(Y, column, all=T)
result[,i+1] <- c(column$freq)
}
names(result)=c("drug", make.names(events))
result[is.na(result)] <- 0 # Add zeros for missing values
# If positive label names are provided, label
# data. Otherwise, retain a column with the drug names
if(length(positives)>0) {
result$Y[result$drug %in% positives] <- 1
result$Y[!result$drug %in% positives] <- 0
result$drug <- NULL
}
result
}
Y <- data.frame(drug=unique(aeFreqs$drug)) # Dataframe column of all possible drugs
trainingData <- sampleFeatureFormat(aeFreqs, top5AEsByFisher, Y, cholDrugs)
# Build model
glmModel <- glm(formula = Y ~ RHABDOMYOLYSIS + MUSCULAR.WEAKNESS + BLOOD.CREATINE.PHOSPHOKINASE.INCREASED + BLOOD.TRIGLYCERIDES.INCREASED + MUSCLE.SPASMS, data = trainingData, family=binomial(link="logit"))
summary(glmModel)
#------------------------------------------------------------------------------------------
# 2.2 - VALIDATING SIDE-EFFECT PROFILE FOR CHOLESTEROL DRUGS AND AUC CURVE
#------------------------------------------------------------------------------------------
# Dataframe with 3 columns:
# drug - drug names (string)
# event - name of adverse event (AE) a drug was reported with (string)
# freq - fraction of reports for the drug that list the event (float)
aeFreqsRecent <- read.csv("./single_drug_event_frequencies_validation.csv", header=T, as.is=TRUE)
names(aeFreqsRecent) <- c("drug", "event", "freq")
Y <- data.frame(drug=unique(aeFreqsRecent$drug)) # Dataframe column of all possible drugs
testData <- sampleFeatureFormat(aeFreqsRecent, top5AEsByFisher, Y, cholDrugs)
# Vector of probability thresholds above which to classify a sample as positive
threshold <- c(seq(0,1,by=0.001))
# Predictions
glmPredictions <- predict(glmModel, newdata = testData, type="response")
# Dataframe where 1st column is TPR and 2nd column is FPR
rocData <- data.frame()
for(i in 1:length(threshold)) {
# Convert fitted probabilities to binary depending on the current threshold
predictions <- glmPredictions
predictions[predictions >= threshold[i]] <- 1
predictions[predictions < threshold[i]] <- 0
# Compute true positive rate and false positive rate
table <- table(factor(predictions, levels=c("0", "1")), factor(testData$Y, levels=c("0", "1")), dnn=c("predicted", "actual"))
tpr <- table["1","1"]/(table["1","1"] + table["0","1"])
fpr <- 1 - table["0","0"]/(table["0","0"] + table["1","0"])
rocData <- rbind(rocData, c(tpr, fpr))
}
names(rocData) <- c("TPR","FPR")
rocData <- cbind(rocData, threshold)
# Plot receiver operating characteristic curve
ggplot(data=rocData, aes(x=FPR, y=TPR, group=1)) + geom_line() + geom_point() + ggtitle("ROC Curve")
#------------------------------------------------------------------------------------------
# 2.3 - AUC
#------------------------------------------------------------------------------------------
# Inputs: vector of x coordinates, vector of y coordinates
# Output: area under curve
auc <- function(x, y) {
result <- 0
for(p in 1:(length(x)-1)) {
y0 <- y[p]
x0 <- x[p]
y1 <- y[p+1]
x1 <- x[p+1]
square <- abs(x0-x1)*min(y0,y1)
triangle <- (abs(x0-x1)*abs(y0-y1))/2
result <- result + square + triangle
}
result
}
# Area under the ROC curve
aucValue <- auc(rocData$FPR, rocData$TPR)
#------------------------------------------------------------------------------------------
# 3.1 - APPLY MODEL TO NEW DATA: PROCESSING THE NEW DATA
#------------------------------------------------------------------------------------------
# Read in pair drug data and convert to a samples x features matrix
pairDrugAeFreqs <- read.csv("./pair_drug_event_frequencies.csv", header=T, as.is=TRUE)
names(pairDrugAeFreqs) <- c("drug","drug1","drug2","event","freq")
Y <- data.frame(drug=unique(pairDrugAeFreqs$drug)) # Dataframe column of all possible drugs
pairDrugData <- sampleFeatureFormat(pairDrugAeFreqs, top5AEsByFisher, Y, c())
# Number of unique drug pairs in the dataset
nrow(pairDrugData)
# Filter out all non-cholesterol drugs
pairDrugData <- cbind(ID=rownames(pairDrugData),pairDrugData) # Split drug pair into separate columns with name of each drug
pairDrugData <- cbind(pairDrugData, data.frame(do.call('rbind', strsplit(as.character(pairDrugData$drug),',',fixed=TRUE))))
pairDrugData <- pairDrugData[!as.character(pairDrugData$X1) %in% cholDrugs & !as.character(pairDrugData$X2) %in% cholDrugs,] #Filter
# Number of unique non-cholesterol drug pairs
nrow(pairDrugData)
# Clean up dataframe
pairDrugData$ID <- NULL
pairDrugData$X1 <- NULL
pairDrugData$X2 <- NULL
row.names(pairDrugData) <- NULL
#------------------------------------------------------------------------------------------
# 3.3 - APPLYING MODEL TO NEW DATA:
# FIND DRUG PAIRS THAT MATCH THE SIDE-EFFECT PROFILE OF A CHOLESTEROL DRUG
#------------------------------------------------------------------------------------------
# At or above this threshold, classify drug pair as matching side effect profile for cholesterol drugs
optimalProbThreshold <- 0.009
# Apply logistic model we built in part 2.1 to the new data
glmPredictions <- predict(glmModel, newdata = pairDrugData, type="response")
# Restrict list to those whose score is greater than the threshold and rank pairs by profile score
glmPredictions <- glmPredictions[glmPredictions >= optimalProbThreshold]
glmPredictions <- sort(glmPredictions[glmPredictions >= optimalProbThreshold], decreasing=T)
length(glmPredictions) # Sanity check - should be ~1700
# Submit list of drug-pairs as tab-delimited file called "ps2_problem3.tsv"
drugPairInteractions <- data.frame(pairDrugData[names(glmPredictions),]$drug, glmPredictions)
rownames(drugPairInteractions) <- NULL
names(drugPairInteractions) <- c("drug", "score")
write.table(drugPairInteractions, file="ps2_problem3.tsv", sep="\t")
#------------------------------------------------------------------------------------------
# 3.4 - COMPARE OUR RESULTS TO VA'S LIST
#------------------------------------------------------------------------------------------
# List of 3086 known drug-drug interactions; obtained from Veteran's Association Hospital in Tucson, AZ
knownInteractions <- read.csv("./va_drug_drug_interactions.csv", header=T, as.is=TRUE)
# Merge two drugs into single comma-delimited string for easy comparison
knownInteractions <- within(knownInteractions, drug <- paste(drug1,drug2,sep=','))
# Find drug pairs we predicted as interacting to cause cholesterol-drug-like side effects,
# which also appear in the VA's list of known drug interactions
results <- drugPairInteractions[as.character(drugPairInteractions$drug) %in% knownInteractions$drug,]
indices <- c(which(knownInteractions$drug %in% results$drug))
# List of drug pairs and their severities
results <- knownInteractions[indices,c("drug","type")]
#------------------------------------------------------------------------------------------
# 3.5 - EXAMINE EVENT FREQUENCIES
#------------------------------------------------------------------------------------------
# Examine event frequencies X1, X2, X3, X4, X5 for top 20 hits
eventFreqsForTopHits<-pairDrugData[pairDrugData$drug %in% drugPairInteractions$drug[1:20],]
drugPairInteractions$score[1:20]
|
/HW2/drug_safety_latent_signal_detection.R
|
no_license
|
ellenlt/BMI215
|
R
| false
| false
| 20,223
|
r
|
setwd("/Users/ellen/BMI215/HW2")
library(plyr)
library(ggplot2)
#------------------------------------------------------------------------------------------
# GENERAL IDEA
#------------------------------------------------------------------------------------------
# The point of this assignment is to build a "side-effect" profile for cholesterol drugs
# to predict new drug-drug interactions.
# Broader implication: can identify and study new, potentially dangerous interactions.
# Can also determine which drug combinations could complicate the symptoms of those
# with high cholesterol. Finally, can potentially find drug combinations with a similar beneficial
# effect as a cholesterol drug.
# Methods:
# 1) First figure out which adverse events are enriched for cholesterol drugs
# 2) Use feature selection to identify the 5 most informative adverse events for predicting
# whether a drug is a cholesterol drug or not
# 3) Use model to predict drug-drug interactions causing cholesterol-related effects
# 4) Inputs are non-cholesterol drugs, so we can be sure that any hits are due to interaction effects
# rather than side effects of a known cholesterol drugs in the pair
# Dataframe with 3 columns:
# drug - drug names (string)
# event - name of adverse event (AE) a drug was reported with (string)
# freq - fraction of reports for the drug that list the event (float)
aeFreqs <- read.csv("./single_drug_event_frequencies.csv", header=T, as.is=TRUE)
names(aeFreqs) <- c("drug", "event", "freq")
# List/vector of cholesterol drug names (strings)
cholDrugs <- scan("./cholesterol_drugs.txt", what="ch", sep="\n")
# Dataframe where 1st column contains all adverse events (AEs) reported with a cholesterol drug
# and second column is number of different drugs with which that event co-occurred
cholAEs <- unique(aeFreqs$event[aeFreqs$drug %in% cholDrugs])
#------------------------------------------------------------------------------------------
# 1.1 - FILTER ADVERSE EVENTS
#------------------------------------------------------------------------------------------
# Dataframe where 1st column: all AEs reported with a cholesterol drug
# and 2nd column: # of different cholesterol drugs co-occuring with that AE
cholAECounts <- count(aeFreqs$event[aeFreqs$drug %in% cholDrugs])
names(cholAECounts) <- c("event", "numdrugs")
# String of AEs that co-occur with at least 5 different cholesterol drugs
commonCholAEs <- cholAECounts$event[cholAECounts$numdrugs>=5]
commonCholAEs
# Dataframe where 1st column: all AEs reported with a non-cholesterol drug
# and 2nd column: # of different non-cholesterol drugs co-occuring with that AE
nonCholAECounts <- count(aeFreqs$event[!aeFreqs$drug %in% cholDrugs])
names(nonCholAECounts) <- c("event", "numdrugs")
# String of AEs that co-occur with at least 5 different non-cholesterol drugs
commonNonCholAEs <- nonCholAECounts$event[nonCholAECounts$numdrugs>=5]
commonNonCholAEs
# String of AEs that co-occur with at least 5 different cholesterol
# and 5 different non-cholesterol drugs
filteredAEs <- intersect(commonCholAEs, commonNonCholAEs)
filteredAEs
#------------------------------------------------------------------------------------------
# 1.1 - PERFORM STUDENT'S T-TEST, MANN-WHITNEY TEST, and FISHER'S EXACT TEST
#------------------------------------------------------------------------------------------
# For each adverse event co-occuring with at least 5 cholesterol drugs and 5 other drugs,
# Perform three statistical test to determine which adverse events are most enriched/diminished
# in the response variable (whether or not a cholesterol drug or other drug is known to cause
# the adverse event)
# Inputs: index i of the event for which we would like to print a contingency table, and the
# vector in which the event name can be found
# Output: 2x2 contingency table for the frequency with which a cholesterol or other drug
# occurs with the event
contingencyTable <- function(i, AEs) {
chol <- subset(aeFreqs, event %in% AEs[i] & drug %in% cholDrugs)$freq
other <- subset(aeFreqs, event %in% AEs[i] & !drug %in% cholDrugs)$freq
# Convert raw event freqs to bins (<=0.01, >0.01)
chol[chol<=0.01] <- "freq<=0.01"; chol[chol!="freq<=0.01"] <- "freq>0.01"
other[other<=0.01] <- "freq<=0.01"; other[other!="freq<=0.01"] <- "freq>0.01"
# Create 2x2 contingency table
drugs <- c(rep("cholesterol drugs", length(chol)),rep("other drugs", length(other)))
table <- table(factor(drugs, levels=c("cholesterol drugs", "other drugs")), factor(c(chol, other), levels=c("freq>0.01", "freq<=0.01")))
table
}
tTest<-data.frame()
mannWhitney<-data.frame()
fisher<-data.frame()
for(i in 1:length(filteredAEs)){
# Generate two vectors containing frequencies that the current AE was reported with each drug
cholesterol <- subset(aeFreqs, event %in% filteredAEs[i] & drug %in% cholDrugs)$freq
notCholesterol <- subset(aeFreqs, event %in% filteredAEs[i] & !drug %in% cholDrugs)$freq
# Perform t-test and store results
result <- t.test(cholesterol, notCholesterol)
tTest <- rbind(tTest, c(result$p.value, result$estimate[1], result$estimate[2]))
# Perform Mann-Whitney test with Bonferroni Correction
result <- wilcox.test(cholesterol, notCholesterol, correct=T)
mannWhitney <- rbind(mannWhitney, c(result$p.value, median(cholesterol), median(notCholesterol)))
# Perform Fisher's exact test
result <- fisher.test(contingencyTable(i, filteredAEs))
fisher <- rbind(fisher, c(result$p.value, result$estimate))
}
names(tTest) = c("pVal","meanOccurrenceInCholDrugs","meanOccurrenceInNonCholDrugs")
names(mannWhitney) = c("pVal","medianOccurrenceInCholDrugs","medianOccurrenceInNonCholDrugs")
names(fisher) = c("pVal","OR")
#------------------------------------------------------------------------------------------
# 1.1 - REPORT EVENTS MOST ENRICHED and DIMINISHED FOR CHOLESTEROL DRUGS
#------------------------------------------------------------------------------------------
# For each of the three tests performed, report the 10 events most enriched for cholesterol drugs
# as well as the 10 events most diminished for cholesterol drugs, based on lowest p-value
# for student's t-test (assuming unequal variances)
tTest <- tTest[order(tTest$pVal),]
enriched_tTest <- tTest[tTest$meanOccurrenceInCholDrugs > tTest$meanOccurrenceInNonCholDrugs,][1:10,]
enriched_tTest$event <- filteredAEs[strtoi(row.names(enriched_tTest))]
diminished_tTest <- tTest[tTest$meanOccurrenceInCholDrugs < tTest$meanOccurrenceInNonCholDrugs,][1:10,]
diminished_tTest$event <- filteredAEs[strtoi(row.names(diminished_tTest))]
# for Mann-Whitney test (with Bonferroni Correction)
mannWhitney <- mannWhitney[order(mannWhitney$pVal),]
enriched_mannWhitney <- mannWhitney[mannWhitney$medianOccurrenceInCholDrugs > mannWhitney$medianOccurrenceInNonCholDrugs,][1:10,]
enriched_mannWhitney$event <- filteredAEs[strtoi(row.names(enriched_mannWhitney))]
diminished_mannWhitney <- mannWhitney[mannWhitney$medianOccurrenceInCholDrugs < mannWhitney$medianOccurrenceInNonCholDrugs,][1:10,]
diminished_mannWhitney$event <- filteredAEs[strtoi(row.names(diminished_mannWhitney))]
# for Fisher's exact test
fisher <- fisher[order(fisher$pVal),]
fisher <- fisher[is.finite(fisher$OR),]
enriched_fisher <- fisher[fisher$OR>1,][1:10,]
enriched_fisher$event <- filteredAEs[strtoi(row.names(enriched_fisher))]
diminished_fisher <- fisher[fisher$OR<1,][1:10,]
diminished_fisher$event <- filteredAEs[strtoi(row.names(diminished_fisher))]
#------------------------------------------------------------------------------------------
# 1.2 - PLOT EVENT FREQUENCY DISTRIBUTIONS
#------------------------------------------------------------------------------------------
# For the top 2 events enriched in cholesterol drugs and the top 2 events diminished in cholesterol drugs
# (based on lowest Mann-Whitney p-value), plot event frequency distributions for cholesterol drugs and other drugs.
# Inputs: the index i of the adverse event in aeFreqs that is to be plotted,
# and a boolean indicating whether or not the event is enriched
# Output: frequency distribution for an adverse event and its co-occurrence with cholesterol and other drugs
plotMannWhitneyDist <- function(i, enriched) {
if(enriched) {
# Event enriched in cholesterol drugs
chol <- subset(aeFreqs, event %in% enriched_mannWhitney$event[i] & drug %in% cholDrugs)$freq
other <- subset(aeFreqs, event %in% enriched_mannWhitney$event[i] & !drug %in% cholDrugs)$freq
title <- paste("Event Frequency Distribution for\n",enriched_mannWhitney$event[i],"\n#",i,"Event Enriched in Cholesterol Drugs")
} else {
# Event diminished in cholesterol drugs
chol <- subset(aeFreqs, event %in% diminished_mannWhitney$event[i] & drug %in% cholDrugs)$freq
other <- subset(aeFreqs, event %in% diminished_mannWhitney$event[i] & !drug %in% cholDrugs)$freq
title <- paste("Event Frequency Distribution for\n",diminished_mannWhitney$event[i],"\n#",i,"Event Diminished in Cholesterol Drugs")
}
data <- data.frame(drugtype=factor(c(rep("Cholesterol Drugs",length(chol)),rep("Other Drugs",length(other)))),frequency=c(chol,other))
plot <- ggplot(data, aes(x=frequency, colour=drugtype))+geom_density()
plot + ggtitle(title)
}
# Plot freq distributions
plotMannWhitneyDist(1, T) # for most enriched
plotMannWhitneyDist(2, T) # for 2nd most enriched
plotMannWhitneyDist(1, F) # for most diminished
plotMannWhitneyDist(2, F) # for second most diminished
#------------------------------------------------------------------------------------------
# 1.3 - 2X2 CONTINGENCY TABLES
#------------------------------------------------------------------------------------------
# Print out contingency tables for top two enriched and top two diminished events
# used in Fisher's test
paste("Contingency Table for",enriched_fisher$event[1])
print(contingencyTable(1, enriched_fisher$event))
paste("Contingency Table for",enriched_fisher$event[2])
print(contingencyTable(2, enriched_fisher$event))
paste("Contingency Table for",diminished_fisher$event[1])
print(contingencyTable(1, diminished_fisher$event))
paste("Contingency Table for",diminished_fisher$event[2])
print(contingencyTable(2, diminished_fisher$event))
#------------------------------------------------------------------------------------------
# 1.4 - PLOT P-VALUE VS. ODDS RATIO
#------------------------------------------------------------------------------------------
# Plot the Fisher's exact test p-values vs odds ratios for each adverse event
ggplot(data=fisher, aes(x=OR, y=pVal, group=1)) + geom_point() + ggtitle("Fisher's Exact Test:\np-value vs odds ratio")
#------------------------------------------------------------------------------------------
# 1.5 - ANALYZING ENRICHED EVENTS FOR PRESCRIPTION BIAS AND DESIRED SIDE EFFECTS
#------------------------------------------------------------------------------------------
# Print out top 30 most enriched events for cholesterol drugs from Mann-Whitney data
top30enriched_mannWhitney <- mannWhitney[mannWhitney$medianOccurrenceInCholDrugs > mannWhitney$medianOccurrenceInNonCholDrugs,][1:30,]
top30enrichedAEs <- filteredAEs[strtoi(row.names(top30enriched_mannWhitney))]
for(i in 1:length(top30enrichedAEs)){
print(top30enrichedAEs[i])
}
#------------------------------------------------------------------------------------------
# 2.1 - ASSEMBLING SIDE-EFFECT PROFILE FOR CHOLESTEROL DRUGS
#------------------------------------------------------------------------------------------
# We filter features down to the top 5 events most associated with cholesterol drugs
# (according to Fisher's test), and we fit a logistic regression model to the data,
# using these 5 events as predictors for the outcome variable, whether the drug is
# a cholesterol drug or not.
# Vector of top 5 events most associated with cholesterol drugs (in either direction) based
# on Fisher's test
top5AEsByFisher <- filteredAEs[strtoi(row.names(fisher[1:5,]))]
# Input: 1) Dataframe with at least the following 3 columns:
# "drug": drug names (string)
# "event": adverse event names (string)
# "freq": frequency of co-occurence between a drug and adverse event (float)
# 2) Vector containing all adverse events (strings)
# 3) Dataframe column containing all drugs
# 4) For labeled data, a vector containing all sample names (strings) that should be labeled as positive
# If this is an empty list, does not label the data and instead retains a column of drug names.
# Output: Dataframe with drugs as rows and events as columns,
# with a binary output "Y" column
sampleFeatureFormat <- function(data, events, Y, positives) {
result = data.frame(Y)
# For each feature, merge a column of frequencies into the data frame
for(i in 1:length(events)){
column <- data[data$event %in% events[i],]
column <- merge(Y, column, all=T)
result[,i+1] <- c(column$freq)
}
names(result)=c("drug", make.names(events))
result[is.na(result)] <- 0 # Add zeros for missing values
# If positive label names are provided, label
# data. Otherwise, retain a column with the drug names
if(length(positives)>0) {
result$Y[result$drug %in% positives] <- 1
result$Y[!result$drug %in% positives] <- 0
result$drug <- NULL
}
result
}
Y <- data.frame(drug=unique(aeFreqs$drug)) # Dataframe column of all possible drugs
trainingData <- sampleFeatureFormat(aeFreqs, top5AEsByFisher, Y, cholDrugs)
# Build model
glmModel <- glm(formula = Y ~ RHABDOMYOLYSIS + MUSCULAR.WEAKNESS + BLOOD.CREATINE.PHOSPHOKINASE.INCREASED + BLOOD.TRIGLYCERIDES.INCREASED + MUSCLE.SPASMS, data = trainingData, family=binomial(link="logit"))
summary(glmModel)
#------------------------------------------------------------------------------------------
# 2.2 - VALIDATING SIDE-EFFECT PROFILE FOR CHOLESTEROL DRUGS AND AUC CURVE
#------------------------------------------------------------------------------------------
# Dataframe with 3 columns:
# drug - drug names (string)
# event - name of adverse event (AE) a drug was reported with (string)
# freq - fraction of reports for the drug that list the event (float)
aeFreqsRecent <- read.csv("./single_drug_event_frequencies_validation.csv", header=T, as.is=TRUE)
names(aeFreqsRecent) <- c("drug", "event", "freq")
Y <- data.frame(drug=unique(aeFreqsRecent$drug)) # Dataframe column of all possible drugs
testData <- sampleFeatureFormat(aeFreqsRecent, top5AEsByFisher, Y, cholDrugs)
# Vector of probability thresholds above which to classify a sample as positive
threshold <- c(seq(0,1,by=0.001))
# Predictions
glmPredictions <- predict(glmModel, newdata = testData, type="response")
# Dataframe where 1st column is TPR and 2nd column is FPR
rocData <- data.frame()
for(i in 1:length(threshold)) {
# Convert fitted probabilities to binary depending on the current threshold
predictions <- glmPredictions
predictions[predictions >= threshold[i]] <- 1
predictions[predictions < threshold[i]] <- 0
# Compute true positive rate and false positive rate
table <- table(factor(predictions, levels=c("0", "1")), factor(testData$Y, levels=c("0", "1")), dnn=c("predicted", "actual"))
tpr <- table["1","1"]/(table["1","1"] + table["0","1"])
fpr <- 1 - table["0","0"]/(table["0","0"] + table["1","0"])
rocData <- rbind(rocData, c(tpr, fpr))
}
names(rocData) <- c("TPR","FPR")
rocData <- cbind(rocData, threshold)
# Plot receiver operating characteristic curve
ggplot(data=rocData, aes(x=FPR, y=TPR, group=1)) + geom_line() + geom_point() + ggtitle("ROC Curve")
#------------------------------------------------------------------------------------------
# 2.3 - AUC
#------------------------------------------------------------------------------------------
# Inputs: vector of x coordinates, vector of y coordinates
# Output: area under curve
auc <- function(x, y) {
result <- 0
for(p in 1:(length(x)-1)) {
y0 <- y[p]
x0 <- x[p]
y1 <- y[p+1]
x1 <- x[p+1]
square <- abs(x0-x1)*min(y0,y1)
triangle <- (abs(x0-x1)*abs(y0-y1))/2
result <- result + square + triangle
}
result
}
# Area under the ROC curve
aucValue <- auc(rocData$FPR, rocData$TPR)
#------------------------------------------------------------------------------------------
# 3.1 - APPLY MODEL TO NEW DATA: PROCESSING THE NEW DATA
#------------------------------------------------------------------------------------------
# Read in pair drug data and convert to a samples x features matrix
pairDrugAeFreqs <- read.csv("./pair_drug_event_frequencies.csv", header=T, as.is=TRUE)
names(pairDrugAeFreqs) <- c("drug","drug1","drug2","event","freq")
Y <- data.frame(drug=unique(pairDrugAeFreqs$drug)) # Dataframe column of all possible drugs
pairDrugData <- sampleFeatureFormat(pairDrugAeFreqs, top5AEsByFisher, Y, c())
# Number of unique drug pairs in the dataset
nrow(pairDrugData)
# Filter out all non-cholesterol drugs
pairDrugData <- cbind(ID=rownames(pairDrugData),pairDrugData) # Split drug pair into separate columns with name of each drug
pairDrugData <- cbind(pairDrugData, data.frame(do.call('rbind', strsplit(as.character(pairDrugData$drug),',',fixed=TRUE))))
pairDrugData <- pairDrugData[!as.character(pairDrugData$X1) %in% cholDrugs & !as.character(pairDrugData$X2) %in% cholDrugs,] #Filter
# Number of unique non-cholesterol drug pairs
nrow(pairDrugData)
# Clean up dataframe
pairDrugData$ID <- NULL
pairDrugData$X1 <- NULL
pairDrugData$X2 <- NULL
row.names(pairDrugData) <- NULL
#------------------------------------------------------------------------------------------
# 3.3 - APPLYING MODEL TO NEW DATA:
# FIND DRUG PAIRS THAT MATCH THE SIDE-EFFECT PROFILE OF A CHOLESTEROL DRUG
#------------------------------------------------------------------------------------------
# At or above this threshold, classify drug pair as matching side effect profile for cholesterol drugs
optimalProbThreshold <- 0.009
# Apply logistic model we built in part 2.1 to the new data
glmPredictions <- predict(glmModel, newdata = pairDrugData, type="response")
# Restrict list to those whose score is greater than the threshold and rank pairs by profile score
glmPredictions <- glmPredictions[glmPredictions >= optimalProbThreshold]
glmPredictions <- sort(glmPredictions[glmPredictions >= optimalProbThreshold], decreasing=T)
length(glmPredictions) # Sanity check - should be ~1700
# Submit list of drug-pairs as tab-delimited file called "ps2_problem3.tsv"
drugPairInteractions <- data.frame(pairDrugData[names(glmPredictions),]$drug, glmPredictions)
rownames(drugPairInteractions) <- NULL
names(drugPairInteractions) <- c("drug", "score")
write.table(drugPairInteractions, file="ps2_problem3.tsv", sep="\t")
#------------------------------------------------------------------------------------------
# 3.4 - COMPARE OUR RESULTS TO VA'S LIST
#------------------------------------------------------------------------------------------
# List of 3086 known drug-drug interactions; obtained from Veteran's Association Hospital in Tucson, AZ
knownInteractions <- read.csv("./va_drug_drug_interactions.csv", header=T, as.is=TRUE)
# Merge two drugs into single comma-delimited string for easy comparison
knownInteractions <- within(knownInteractions, drug <- paste(drug1,drug2,sep=','))
# Find drug pairs we predicted as interacting to cause cholesterol-drug-like side effects,
# which also appear in the VA's list of known drug interactions
results <- drugPairInteractions[as.character(drugPairInteractions$drug) %in% knownInteractions$drug,]
indices <- c(which(knownInteractions$drug %in% results$drug))
# List of drug pairs and their severities
results <- knownInteractions[indices,c("drug","type")]
#------------------------------------------------------------------------------------------
# 3.5 - EXAMINE EVENT FREQUENCIES
#------------------------------------------------------------------------------------------
# Examine event frequencies X1, X2, X3, X4, X5 for top 20 hits
eventFreqsForTopHits<-pairDrugData[pairDrugData$drug %in% drugPairInteractions$drug[1:20],]
drugPairInteractions$score[1:20]
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/update.R
\docType{methods}
\name{update,rfh-method}
\alias{update,fitrfh-method}
\alias{update,rfh-method}
\title{Update a fitted object}
\usage{
\S4method{update}{rfh}(object, formula, ..., where = parent.frame(2))
\S4method{update}{fitrfh}(object, ...)
}
\arguments{
\item{object}{(rfh) an object fitted by \link{rfh}}
\item{formula}{see \link{update.formula}}
\item{...}{arguments passed to \link{update.default}}
\item{where}{(environment) should not be specified by the user}
}
\description{
This is a method which can be used to update a \link{rfh} result object and
refit it. The fitted parameter values from the current object are used as
starting values, then \link{update.default} is called.
}
|
/man/update.Rd
|
no_license
|
CentreForSAE/saeRobust
|
R
| false
| true
| 787
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/update.R
\docType{methods}
\name{update,rfh-method}
\alias{update,fitrfh-method}
\alias{update,rfh-method}
\title{Update a fitted object}
\usage{
\S4method{update}{rfh}(object, formula, ..., where = parent.frame(2))
\S4method{update}{fitrfh}(object, ...)
}
\arguments{
\item{object}{(rfh) an object fitted by \link{rfh}}
\item{formula}{see \link{update.formula}}
\item{...}{arguments passed to \link{update.default}}
\item{where}{(environment) should not be specified by the user}
}
\description{
This is a method which can be used to update a \link{rfh} result object and
refit it. The fitted parameter values from the current object are used as
starting values, then \link{update.default} is called.
}
|
# Sun Mar 24 08:24:59 2019 ------------------------------
# Logistic regression model from greta w/ varying intercepts
library(tensorflow)
use_condaenv("greta")
library(greta)
library(tidyverse)
library(bayesplot)
library(readxl)
# Download data set from Riehl et al. 2019
dataURL <- "https://datadryad.org/stash/downloads/file_stream/82205"
download.file(dataURL, destfile = "data.xlsx")
(allTabs <- excel_sheets("data.xlsx")) # list tabs
# Read female reproductive output and discard records w/ NAs
fro <- read_xlsx("data.xlsx", sheet = allTabs[2])
fro <- fro[complete.cases(fro),]
# Use cross-classified varying intercepts for year, female ID and group ID
female_id <- as.integer(factor(fro$Female_ID_coded))
year <- as.integer(factor(fro$Year))
group_id <- as.integer(factor(fro$Group_ID_coded))
# Define and standardize model vars
Age <- as_data(scale(fro$Min_age))
Eggs_laid <- as_data(scale(fro$Eggs_laid))
Mean_eggsize <- as_data(scale(fro$Mean_eggsize))
Group_size <- as_data(scale(fro$Group_size))
Parasite <- as_data(fro$Parasite)
# Define model effects
sigmaML <- cauchy(0, 1, truncation = c(0, Inf), dim = 3)
a_fem <- normal(0, sigmaML[1], dim = max(female_id))
a_year <- normal(0, sigmaML[2], dim = max(year))
a_group <- normal(0, sigmaML[3], dim = max(group_id))
a <- normal(0, 5)
bA <- normal(0, 3)
bEL <- normal(0, 3)
bES <- normal(0, 3)
bGS <- normal(0, 3)
bP <- normal(0, 3)
bPA <- normal(0, 3)
# Model setup
mu <- a + a_fem[female_id] + a_year[year] + a_group[group_id] +
Age*bA + Eggs_laid*bEL + Mean_eggsize*bES + Parasite*bP +
Group_size*bGS + Parasite*Age*bPA
p <- ilogit(mu)
distribution(fro$Successful) <- bernoulli(p)
cuckooModel <- model(a, bA, bEL, bES, bP, bGS, bPA)
# Plot
plot(cuckooModel)
# HMC sampling
draws <- mcmc(cuckooModel, n_samples = 4000,
warmup = 1000, chains = 4, n_cores = 10)
# Trace plots
mcmc_trace(draws)
# Parameter posterior
mcmc_intervals(draws, prob = .95)
# Simulation with average eggs laid, egg size and group size, w/ and w/o parasitism
seqX <- seq(-3, 3, length.out = 100)
probsNoPar <- sapply(seqX, function(x){
scenario <- ilogit(a + x*bA)
probs <- calculate(scenario, draws)
return(unlist(probs))
})
probsPar <- sapply(seqX, function(x){
scenario <- ilogit(a + x*bA + bP + x*bPA)
probs <- calculate(scenario, draws)
return(unlist(probs))
})
plot(seqX, apply(probsNoPar, 2, mean), type = "l", ylim = 0:1,
xlab = "Min age (standardized)", ylab = "P(Successful)",
yaxp = c(0, 1, 2))
rethinking::shade(apply(probsNoPar, 2, rethinking::HPDI, prob = .95),
seqX)
lines(seqX, apply(probsPar, 2, mean), lty = 2, col = "red")
rethinking::shade(apply(probsPar, 2, rethinking::HPDI, prob = .95),
seqX, col = rgb(1,0,0,.25))
# Write sessioninfo
writeLines(capture.output(sessionInfo()), "sessionInfo")
|
/dbinomSuccessful.R
|
no_license
|
johnypark/cuckooParasitism
|
R
| false
| false
| 2,872
|
r
|
# Sun Mar 24 08:24:59 2019 ------------------------------
# Logistic regression model from greta w/ varying intercepts
library(tensorflow)
use_condaenv("greta")
library(greta)
library(tidyverse)
library(bayesplot)
library(readxl)
# Download data set from Riehl et al. 2019
dataURL <- "https://datadryad.org/stash/downloads/file_stream/82205"
download.file(dataURL, destfile = "data.xlsx")
(allTabs <- excel_sheets("data.xlsx")) # list tabs
# Read female reproductive output and discard records w/ NAs
fro <- read_xlsx("data.xlsx", sheet = allTabs[2])
fro <- fro[complete.cases(fro),]
# Use cross-classified varying intercepts for year, female ID and group ID
female_id <- as.integer(factor(fro$Female_ID_coded))
year <- as.integer(factor(fro$Year))
group_id <- as.integer(factor(fro$Group_ID_coded))
# Define and standardize model vars
Age <- as_data(scale(fro$Min_age))
Eggs_laid <- as_data(scale(fro$Eggs_laid))
Mean_eggsize <- as_data(scale(fro$Mean_eggsize))
Group_size <- as_data(scale(fro$Group_size))
Parasite <- as_data(fro$Parasite)
# Define model effects
sigmaML <- cauchy(0, 1, truncation = c(0, Inf), dim = 3)
a_fem <- normal(0, sigmaML[1], dim = max(female_id))
a_year <- normal(0, sigmaML[2], dim = max(year))
a_group <- normal(0, sigmaML[3], dim = max(group_id))
a <- normal(0, 5)
bA <- normal(0, 3)
bEL <- normal(0, 3)
bES <- normal(0, 3)
bGS <- normal(0, 3)
bP <- normal(0, 3)
bPA <- normal(0, 3)
# Model setup
mu <- a + a_fem[female_id] + a_year[year] + a_group[group_id] +
Age*bA + Eggs_laid*bEL + Mean_eggsize*bES + Parasite*bP +
Group_size*bGS + Parasite*Age*bPA
p <- ilogit(mu)
distribution(fro$Successful) <- bernoulli(p)
cuckooModel <- model(a, bA, bEL, bES, bP, bGS, bPA)
# Plot
plot(cuckooModel)
# HMC sampling
draws <- mcmc(cuckooModel, n_samples = 4000,
warmup = 1000, chains = 4, n_cores = 10)
# Trace plots
mcmc_trace(draws)
# Parameter posterior
mcmc_intervals(draws, prob = .95)
# Simulation with average eggs laid, egg size and group size, w/ and w/o parasitism
seqX <- seq(-3, 3, length.out = 100)
probsNoPar <- sapply(seqX, function(x){
scenario <- ilogit(a + x*bA)
probs <- calculate(scenario, draws)
return(unlist(probs))
})
probsPar <- sapply(seqX, function(x){
scenario <- ilogit(a + x*bA + bP + x*bPA)
probs <- calculate(scenario, draws)
return(unlist(probs))
})
plot(seqX, apply(probsNoPar, 2, mean), type = "l", ylim = 0:1,
xlab = "Min age (standardized)", ylab = "P(Successful)",
yaxp = c(0, 1, 2))
rethinking::shade(apply(probsNoPar, 2, rethinking::HPDI, prob = .95),
seqX)
lines(seqX, apply(probsPar, 2, mean), lty = 2, col = "red")
rethinking::shade(apply(probsPar, 2, rethinking::HPDI, prob = .95),
seqX, col = rgb(1,0,0,.25))
# Write sessioninfo
writeLines(capture.output(sessionInfo()), "sessionInfo")
|
############################################################################################################
#Script does the following:
#1. Merges the training and the test sets to create one data set.
#2. Extracts only the measurements on the mean and standard deviation for each measurement.
#3. Uses descriptive activity names to name the activities in the data set
#4. Appropriately labels the data set with descriptive variable names.
#5. From the data set in step 4, creates a second, independent tidy data set with the
# average of each variable for each activity and each subject.
############################################################################################################
#Download and unzip files
fileUrl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
destFile <- file.path(getwd(), "Dataset.zip")
download.file(fileUrl, destFile)
unzip(destFile)
#Get data into R
##Features and activities
activity_labels <- read.table("UCI HAR Dataset\\activity_labels.txt")
features <- read.table("UCI HAR Dataset\\features.txt")
##Test files
subject_test <- read.table("UCI HAR Dataset\\test\\subject_test.txt")
x_test <- read.table("UCI HAR Dataset\\test\\X_test.txt")
y_test <- read.table("UCI HAR Dataset\\test\\y_test.txt")
##Train files
subject_train <- read.table("UCI HAR Dataset\\train\\subject_train.txt")
x_train <- read.table("UCI HAR Dataset\\train\\X_train.txt")
y_train <- read.table("UCI HAR Dataset\\train\\y_train.txt")
#Gradually merge data
##Merge Test and Train Sets
testTrainSets <- rbind(x_test, x_train)
##Merge activities
activitiesData <- rbind(y_test, y_train)
##Merge subjects
testTrainSubj <- rbind(subject_test, subject_train)
#Change variable names to more descriptive names
names(testTrainSets) <- features[, 2]
names(activitiesData) <- "activities"
names(testTrainSubj) <- "subject"
#Merge all data together
act_sub_data <- cbind(activitiesData, testTrainSubj)
data <- cbind(testTrainSets, act_sub_data)
#Get mean and std measurements
colIndx <- grep(".*mean.*|.*std.", features[,2])
features2 <- features[colIndx,]
required_column_names <- c(as.character(features2[,2]), "activities", "subject")
allData <- subset(data, select = required_column_names)
#Use descriptive activity names to name the activities in the data set
allData$activities <- as.character(allData$activities)
activity_labels <- as.character(activity_labels[,2])
for (i in 1:length(allData$activities)){
allData$activities[i] <- activity_labels[as.numeric(allData$activities[i])]
}
#Appropriately label the data set with descriptive variable names
names(allData) <- gsub("^t", "time", names(allData))
names(allData) <- gsub("^f", "frequency", names(allData))
names(allData) <- gsub("Acc", "Accelerometer", names(allData))
names(allData) <- gsub("Gyro", "Gyroscope", names(allData))
names(allData) <- gsub("Mag", "Magnitude", names(allData))
names(allData) <- gsub("BodyBody", "Body", names(allData))
#Create Independent tidy dataset with the average of each variable for each activity and each subject
tidyData <- aggregate(allData, by=list(activities = as.factor(allData$activities), subject=as.factor(allData$subject)), mean)
tidyData <- tidyData[,1:(ncol(tidyData)-2)]
write.table(tidyData, "tidy_data.txt", row.name=FALSE)
|
/run_analysis.R
|
no_license
|
TK2008/Getting_and_Cleaning_Data_Course_Project
|
R
| false
| false
| 3,333
|
r
|
############################################################################################################
#Script does the following:
#1. Merges the training and the test sets to create one data set.
#2. Extracts only the measurements on the mean and standard deviation for each measurement.
#3. Uses descriptive activity names to name the activities in the data set
#4. Appropriately labels the data set with descriptive variable names.
#5. From the data set in step 4, creates a second, independent tidy data set with the
# average of each variable for each activity and each subject.
############################################################################################################
#Download and unzip files
fileUrl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
destFile <- file.path(getwd(), "Dataset.zip")
download.file(fileUrl, destFile)
unzip(destFile)
#Get data into R
##Features and activities
activity_labels <- read.table("UCI HAR Dataset\\activity_labels.txt")
features <- read.table("UCI HAR Dataset\\features.txt")
##Test files
subject_test <- read.table("UCI HAR Dataset\\test\\subject_test.txt")
x_test <- read.table("UCI HAR Dataset\\test\\X_test.txt")
y_test <- read.table("UCI HAR Dataset\\test\\y_test.txt")
##Train files
subject_train <- read.table("UCI HAR Dataset\\train\\subject_train.txt")
x_train <- read.table("UCI HAR Dataset\\train\\X_train.txt")
y_train <- read.table("UCI HAR Dataset\\train\\y_train.txt")
#Gradually merge data
##Merge Test and Train Sets
testTrainSets <- rbind(x_test, x_train)
##Merge activities
activitiesData <- rbind(y_test, y_train)
##Merge subjects
testTrainSubj <- rbind(subject_test, subject_train)
#Change variable names to more descriptive names
names(testTrainSets) <- features[, 2]
names(activitiesData) <- "activities"
names(testTrainSubj) <- "subject"
#Merge all data together
act_sub_data <- cbind(activitiesData, testTrainSubj)
data <- cbind(testTrainSets, act_sub_data)
#Get mean and std measurements
colIndx <- grep(".*mean.*|.*std.", features[,2])
features2 <- features[colIndx,]
required_column_names <- c(as.character(features2[,2]), "activities", "subject")
allData <- subset(data, select = required_column_names)
#Use descriptive activity names to name the activities in the data set
allData$activities <- as.character(allData$activities)
activity_labels <- as.character(activity_labels[,2])
for (i in 1:length(allData$activities)){
allData$activities[i] <- activity_labels[as.numeric(allData$activities[i])]
}
#Appropriately label the data set with descriptive variable names
names(allData) <- gsub("^t", "time", names(allData))
names(allData) <- gsub("^f", "frequency", names(allData))
names(allData) <- gsub("Acc", "Accelerometer", names(allData))
names(allData) <- gsub("Gyro", "Gyroscope", names(allData))
names(allData) <- gsub("Mag", "Magnitude", names(allData))
names(allData) <- gsub("BodyBody", "Body", names(allData))
#Create Independent tidy dataset with the average of each variable for each activity and each subject
tidyData <- aggregate(allData, by=list(activities = as.factor(allData$activities), subject=as.factor(allData$subject)), mean)
tidyData <- tidyData[,1:(ncol(tidyData)-2)]
write.table(tidyData, "tidy_data.txt", row.name=FALSE)
|
setwd("~/R_Cap_Stone/final/en_US/Final_release/quanteda")
install.packages("quanteda.textmodels")
install.packages("quanteda.corpora")
install.packages("topicmodels")
install.packages("newsmap")
install.packages("remotes")
remotes::install_github("koheiw/Newsmap")
library("tidytext")
library(tidyverse)
library(dplyr)
library(ggplot2)
library(stopwords)
library(tm)
library(xfun)
library(textrecipes)
library(tokenizers)
library(qdap)
library(quanteda)
library(readtext)
library("gsl")
library("topicmodels")
library("caret")
library("newsmap")
library("tidyr")
library("data.table")
filenames <- dir(pattern = "[.]txt$")
set.seed(123)
#blogs
con <- file(filenames[1], "rb")
blogs <- readLines(con, -1L, encoding="UTF-8", skipNul=TRUE) # to read the totality of the file
close(con)
# news
con <- file(filenames[2], "rb")
news <- readLines(con, -1L, encoding="UTF-8", skipNul=TRUE) # to read the totality of the file
close(con)
# twitter
con <- file(filenames[3], "rb")
twitter <- readLines(con, -1L, encoding="UTF-8", skipNul=TRUE) # to read the totality of the file
close(con)
sampleBlogs <- blogs[sample(1:length(blogs), round(0.01*length(blogs)))]
sampleNews <- news[sample(1:length(news), round(0.01*length(news)))]
sampleTwitter <- twitter[sample(1:length(twitter), round(0.01*length(twitter)))]
rm(blogs, news, twitter)
sampleBlogs <- gsub(pattern = "\\b[A-z]\\b{1}", replace = " ", sampleBlogs) # remove single letters
sampleBlogs <- gsub(pattern = "[<->]", replace = " ", sampleBlogs) # removal < and >
#sampleBlogs <- gsub(pattern = REGEX, replace = " ", sampleBlogs) # for emoticons, symbols and transport symbol
sampleBlogs <- str_replace_all(sampleBlogs, "[[:punct:]]", "") # removal of special characters
sampleBlogs <- replace_symbol(sampleBlogs)
sampleNews <- gsub(pattern = "\\b[A-z]\\b{1}", replace = " ", sampleNews) # remove single letters
sampleNews <- gsub(pattern = "[<->]", replace = " ", sampleNews) # removal < and >
sampleNews <- str_replace_all(sampleNews, "[[:punct:]]", "") # removal of special characters
sampleNews <- replace_symbol(sampleNews)
sampleTwitter <- gsub(pattern = "\\b[A-z]\\b{1}", replace = " ", sampleTwitter) # remove single letters
sampleTwitter <- gsub(pattern = "[<->]", replace = " ", sampleTwitter) # removal < and >
sampleTwitter <- str_replace_all(sampleTwitter, "[[:punct:]]", "") # removal of special characters
sampleTwitter <- replace_symbol(sampleTwitter)
blogs <- data.table(sampleBlogs)
news <- data.table(sampleNews)
twitter <- data.table(sampleTwitter)
names(blogs) = names(news) = names(twitter) = "text"
document <- rbind(blogs, news, twitter) # a data.frame of 42696 elements
# corpus
doc.corpus <- corpus(document)
"Corpus consisting of 1 document"
"The bruschetta however missed the mark Instead of manageab..."
# Cleaning and creating tokens
# This is the process of cleaning the text with the following actions:
# - replace_symbol ($ becomes dollar)
# - removePonctuation (like , . ? are skipped)
# - bracketX (remove texts within brackets)
# - replace_contraction (shouldn't becomes should not)
# - replace_abreviation (Sr becomes senior)
# - removeNumbers (suppress as it gives valuable insight)
# - stripWhitespace (strip extra white space)
# - tolower (change to lower case)
# tokens
doc.tokens <- tokens(doc.corpus)
doc.tokens <- tokens(doc.tokens, remove_punct = TRUE)
#doc.tokens <- tokens_select(doc.tokens, stopwords('english'),selection='remove')
#doc.tokens <- tokens_wordstem(doc.tokens) # unsure we want this
doc.tokens <- tokens_tolower(doc.tokens)
# n-grams
token_4 <- tokens_ngrams(doc.tokens, n=4L, skip = 0L, concatenator = " ") # 53 MB -> 87 MB
token_3 <- tokens_ngrams(doc.tokens, n=3L, skip = 0L, concatenator = " ") # 54 MB -> 77 MB
token_2 <- tokens_ngrams(doc.tokens, n=2L, skip = 0L, concatenator = " ") # 45 MB -> 50 MB
# dfm
# for the 1-gram
my_dfm <- dfm(doc.tokens)
top <- topfeatures(my_dfm, 15)
barplot(top, main= "Repartition of top words" , ylab = "Number of repetitions", las=2)
# for the 2-grams
my_dfm2 <- dfm(token_2)
top2 <- topfeatures(my_dfm2, 15)
barplot(top2, main= "Repartition of top words" , ylab = "Number of repetitions", las=2)
# for the 3-grams
my_dfm3 <- dfm(token_3)
top3 <- topfeatures(my_dfm3, 15)
barplot(top3, main= "Repartition of top words" , ylab = "Number of repetitions", las=2)
# for the 4-grams
my_dfm4 <- dfm(token_4)
top4 <- topfeatures(my_dfm4, 15)
par(mar = c( 12, 3, 3, 3))
barplot(top4, main= "Repartition of top words" , ylab = "Number of repetitions", las=2)
# Unknown categories: Unsupervised machine learning -
# Latent Dirichlet Allocation (LDA)
# LDA: Latent Dirichlet Allocation
# belong to topic modeling.
mydfm <- dfm(doc.tokens,
tolower = TRUE,
stem = TRUE,
remove = stopwords("english"))
mydfm.un.trim <-
dfm_trim(
mydfm,
min_docfreq = 0.01,
#min_docfreq = 0.075,
# min 7.5%
max_docfreq = 0.90,
# max 90%
docfreq_type = "prop"
)
topic.count <- 20 # Assigns the number of topics for the model
# Convert the trimmed DFM to a topicmodels object
dfm2topicmodels <- convert(mydfm.un.trim, to = "topicmodels")
lda.model <- LDA(dfm2topicmodels, topic.count)
lda.model
# A LDA_VEM topic model with 20 topics.
as.data.frame(terms(lda.model, 6))
# probability of a term in a topic
lda_topics_beta <- tidy(lda.model, matrix = "beta")
head(lda_topics_beta)
# probability of a topic in a document
lda_topics_gamma <- tidy(lda.model, matrix = "gamma")
head(lda_topics_gamma)
lda.similarity <- as.data.frame(lda.model@beta) %>%
scale() %>%
dist(method = "euclidean") %>%
hclust(method = "ward.D2")
# how tppics are connected
par(mar = c(0, 4, 4, 2))
plot(lda.similarity,
main = "LDA topic similarity by features",
xlab = "",
sub = "")
# Documents in which topic are particularly strong
head(data.frame(Topic = topics(lda.model)),10)
# find the 10 terms that are most common within each topic
# nice display with ggplot
ap_top_terms <- lda_topics_beta %>%
group_by(topic) %>%
top_n(10, beta) %>%
ungroup() %>%
arrange(topic, -beta)
ap_top_terms %>%
mutate(term = reorder_within(term, beta, topic)) %>%
ggplot(aes(beta, term, fill = factor(topic))) +
geom_col(show.legend = FALSE) +
facet_wrap(~ topic, scales = "free") +
scale_y_reordered()
# A prediction in term of topics (20 topics) of a sentence on which we apply the lda.model
text <- blogs[201]
my_text <- dfm(tokens(corpus(text)),tolower = TRUE, stem = TRUE, remove = stopwords("english"))
# reponse à une sollicitation
resp <- posterior(lda.model, my_text)
# Le text (modifié en my) est destiné à etre le plus proche d'un des topics sans dire le mot n+1 !
|
/Report_final_quanteda_1.R
|
no_license
|
cochetstephane/CapStone
|
R
| false
| false
| 6,967
|
r
|
setwd("~/R_Cap_Stone/final/en_US/Final_release/quanteda")
install.packages("quanteda.textmodels")
install.packages("quanteda.corpora")
install.packages("topicmodels")
install.packages("newsmap")
install.packages("remotes")
remotes::install_github("koheiw/Newsmap")
library("tidytext")
library(tidyverse)
library(dplyr)
library(ggplot2)
library(stopwords)
library(tm)
library(xfun)
library(textrecipes)
library(tokenizers)
library(qdap)
library(quanteda)
library(readtext)
library("gsl")
library("topicmodels")
library("caret")
library("newsmap")
library("tidyr")
library("data.table")
filenames <- dir(pattern = "[.]txt$")
set.seed(123)
#blogs
con <- file(filenames[1], "rb")
blogs <- readLines(con, -1L, encoding="UTF-8", skipNul=TRUE) # to read the totality of the file
close(con)
# news
con <- file(filenames[2], "rb")
news <- readLines(con, -1L, encoding="UTF-8", skipNul=TRUE) # to read the totality of the file
close(con)
# twitter
con <- file(filenames[3], "rb")
twitter <- readLines(con, -1L, encoding="UTF-8", skipNul=TRUE) # to read the totality of the file
close(con)
sampleBlogs <- blogs[sample(1:length(blogs), round(0.01*length(blogs)))]
sampleNews <- news[sample(1:length(news), round(0.01*length(news)))]
sampleTwitter <- twitter[sample(1:length(twitter), round(0.01*length(twitter)))]
rm(blogs, news, twitter)
sampleBlogs <- gsub(pattern = "\\b[A-z]\\b{1}", replace = " ", sampleBlogs) # remove single letters
sampleBlogs <- gsub(pattern = "[<->]", replace = " ", sampleBlogs) # removal < and >
#sampleBlogs <- gsub(pattern = REGEX, replace = " ", sampleBlogs) # for emoticons, symbols and transport symbol
sampleBlogs <- str_replace_all(sampleBlogs, "[[:punct:]]", "") # removal of special characters
sampleBlogs <- replace_symbol(sampleBlogs)
sampleNews <- gsub(pattern = "\\b[A-z]\\b{1}", replace = " ", sampleNews) # remove single letters
sampleNews <- gsub(pattern = "[<->]", replace = " ", sampleNews) # removal < and >
sampleNews <- str_replace_all(sampleNews, "[[:punct:]]", "") # removal of special characters
sampleNews <- replace_symbol(sampleNews)
sampleTwitter <- gsub(pattern = "\\b[A-z]\\b{1}", replace = " ", sampleTwitter) # remove single letters
sampleTwitter <- gsub(pattern = "[<->]", replace = " ", sampleTwitter) # removal < and >
sampleTwitter <- str_replace_all(sampleTwitter, "[[:punct:]]", "") # removal of special characters
sampleTwitter <- replace_symbol(sampleTwitter)
blogs <- data.table(sampleBlogs)
news <- data.table(sampleNews)
twitter <- data.table(sampleTwitter)
names(blogs) = names(news) = names(twitter) = "text"
document <- rbind(blogs, news, twitter) # a data.frame of 42696 elements
# corpus
doc.corpus <- corpus(document)
"Corpus consisting of 1 document"
"The bruschetta however missed the mark Instead of manageab..."
# Cleaning and creating tokens
# This is the process of cleaning the text with the following actions:
# - replace_symbol ($ becomes dollar)
# - removePonctuation (like , . ? are skipped)
# - bracketX (remove texts within brackets)
# - replace_contraction (shouldn't becomes should not)
# - replace_abreviation (Sr becomes senior)
# - removeNumbers (suppress as it gives valuable insight)
# - stripWhitespace (strip extra white space)
# - tolower (change to lower case)
# tokens
doc.tokens <- tokens(doc.corpus)
doc.tokens <- tokens(doc.tokens, remove_punct = TRUE)
#doc.tokens <- tokens_select(doc.tokens, stopwords('english'),selection='remove')
#doc.tokens <- tokens_wordstem(doc.tokens) # unsure we want this
doc.tokens <- tokens_tolower(doc.tokens)
# n-grams
token_4 <- tokens_ngrams(doc.tokens, n=4L, skip = 0L, concatenator = " ") # 53 MB -> 87 MB
token_3 <- tokens_ngrams(doc.tokens, n=3L, skip = 0L, concatenator = " ") # 54 MB -> 77 MB
token_2 <- tokens_ngrams(doc.tokens, n=2L, skip = 0L, concatenator = " ") # 45 MB -> 50 MB
# dfm
# for the 1-gram
my_dfm <- dfm(doc.tokens)
top <- topfeatures(my_dfm, 15)
barplot(top, main= "Repartition of top words" , ylab = "Number of repetitions", las=2)
# for the 2-grams
my_dfm2 <- dfm(token_2)
top2 <- topfeatures(my_dfm2, 15)
barplot(top2, main= "Repartition of top words" , ylab = "Number of repetitions", las=2)
# for the 3-grams
my_dfm3 <- dfm(token_3)
top3 <- topfeatures(my_dfm3, 15)
barplot(top3, main= "Repartition of top words" , ylab = "Number of repetitions", las=2)
# for the 4-grams
my_dfm4 <- dfm(token_4)
top4 <- topfeatures(my_dfm4, 15)
par(mar = c( 12, 3, 3, 3))
barplot(top4, main= "Repartition of top words" , ylab = "Number of repetitions", las=2)
# Unknown categories: Unsupervised machine learning -
# Latent Dirichlet Allocation (LDA)
# LDA: Latent Dirichlet Allocation
# belong to topic modeling.
mydfm <- dfm(doc.tokens,
tolower = TRUE,
stem = TRUE,
remove = stopwords("english"))
mydfm.un.trim <-
dfm_trim(
mydfm,
min_docfreq = 0.01,
#min_docfreq = 0.075,
# min 7.5%
max_docfreq = 0.90,
# max 90%
docfreq_type = "prop"
)
topic.count <- 20 # Assigns the number of topics for the model
# Convert the trimmed DFM to a topicmodels object
dfm2topicmodels <- convert(mydfm.un.trim, to = "topicmodels")
lda.model <- LDA(dfm2topicmodels, topic.count)
lda.model
# A LDA_VEM topic model with 20 topics.
as.data.frame(terms(lda.model, 6))
# probability of a term in a topic
lda_topics_beta <- tidy(lda.model, matrix = "beta")
head(lda_topics_beta)
# probability of a topic in a document
lda_topics_gamma <- tidy(lda.model, matrix = "gamma")
head(lda_topics_gamma)
lda.similarity <- as.data.frame(lda.model@beta) %>%
scale() %>%
dist(method = "euclidean") %>%
hclust(method = "ward.D2")
# how tppics are connected
par(mar = c(0, 4, 4, 2))
plot(lda.similarity,
main = "LDA topic similarity by features",
xlab = "",
sub = "")
# Documents in which topic are particularly strong
head(data.frame(Topic = topics(lda.model)),10)
# find the 10 terms that are most common within each topic
# nice display with ggplot
ap_top_terms <- lda_topics_beta %>%
group_by(topic) %>%
top_n(10, beta) %>%
ungroup() %>%
arrange(topic, -beta)
ap_top_terms %>%
mutate(term = reorder_within(term, beta, topic)) %>%
ggplot(aes(beta, term, fill = factor(topic))) +
geom_col(show.legend = FALSE) +
facet_wrap(~ topic, scales = "free") +
scale_y_reordered()
# A prediction in term of topics (20 topics) of a sentence on which we apply the lda.model
text <- blogs[201]
my_text <- dfm(tokens(corpus(text)),tolower = TRUE, stem = TRUE, remove = stopwords("english"))
# reponse à une sollicitation
resp <- posterior(lda.model, my_text)
# Le text (modifié en my) est destiné à etre le plus proche d'un des topics sans dire le mot n+1 !
|
## Across the United States, how have emissions from coal combustion-related sources changed from 1999-2008?
setwd("F:/JHU/3_EXPLORATORY DATA/ASSIGNMENT 2")
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
head(SCC)
summary(SCC$EI.Sector)
coal <- grepl("Coal", SCC$EI.Sector)
coalsources <- SCC[coal,]
head(coalsources)
# Locate emissions from coal sources and group by year
e <- NEI[(NEI$SCC %in% coalsources$SCC), ]
ey <- aggregate(Emissions ~ year, data=e, FUN=sum)
ey
## year Emissions
##1 1999 572126.5
##2 2002 546789.2
##3 2005 552881.5
##4 2008 343432.2
library(ggplot2)
png("plot4.png")
ggplot(ey, aes(x=factor(year), y=Emissions)) +
geom_bar(stat="identity") +
xlab("year") +
ylab("total PM2.5 emissions") +
ggtitle("Emissions from coal sources")
dev.off()
|
/plot4.R
|
no_license
|
msi888/ExData_Plotting2
|
R
| false
| false
| 840
|
r
|
## Across the United States, how have emissions from coal combustion-related sources changed from 1999-2008?
setwd("F:/JHU/3_EXPLORATORY DATA/ASSIGNMENT 2")
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
head(SCC)
summary(SCC$EI.Sector)
coal <- grepl("Coal", SCC$EI.Sector)
coalsources <- SCC[coal,]
head(coalsources)
# Locate emissions from coal sources and group by year
e <- NEI[(NEI$SCC %in% coalsources$SCC), ]
ey <- aggregate(Emissions ~ year, data=e, FUN=sum)
ey
## year Emissions
##1 1999 572126.5
##2 2002 546789.2
##3 2005 552881.5
##4 2008 343432.2
library(ggplot2)
png("plot4.png")
ggplot(ey, aes(x=factor(year), y=Emissions)) +
geom_bar(stat="identity") +
xlab("year") +
ylab("total PM2.5 emissions") +
ggtitle("Emissions from coal sources")
dev.off()
|
#' Is the input the empty model?
#'
#' Checks to see if the input is the empty model.
#'
#' @param x Input to check.
#' @param .xname Not intended to be used directly.
#' @param severity How severe should the consequences of the assertion be?
#' Either \code{"stop"}, \code{"warning"}, \code{"message"}, or \code{"none"}.
#' @return \code{is_[non_]empty_model} returns \code{TRUE} if the input is an
#' [non] empty model. (\code{has_terms} is used to determine that a variable
#' is a model object.) The model is considered empty if there are no
#' factors and no intercept. The \code{assert_*} functions return nothing
#' but throw an error if the corresponding \code{is_*} function returns
#' \code{FALSE}.
#' @seealso \code{\link[stats]{is.empty.model}} and \code{is_empty}.
#' @examples
#' # empty models have no intercept and no factors
#' an_empty_model <- lm(uptake ~ 0, CO2)
#' is_empty_model(an_empty_model)
#'
#' a_model_with_an_intercept <- lm(uptake ~ 1, CO2)
#' a_model_with_factors <- lm(uptake ~ conc * Type, CO2)
#' is_non_empty_model(a_model_with_an_intercept)
#' is_non_empty_model(a_model_with_factors)
#'
#' assertive.base::dont_stop(assert_is_empty_model(a_model_with_factors))
#' @importFrom stats terms
#' @export
is_empty_model <- function(x, .xname = get_name_in_parent(x))
{
if(!has_terms(x))
{
return(
false(
gettext("%s has no terms; is probably not a model."),
.xname
)
)
}
tt <- terms(x)
factors <- attr(tt, "factors")
if(length(factors) != 0L)
{
return(
false(
ngettext(
length(factors),
"%s has factor %s.",
"%s has factors %s."
),
.xname,
toString(colnames(factors))
)
)
}
if(attr(tt, "intercept") != 0L)
{
return(false(gettext("%s has an intercept."), .xname))
}
TRUE
}
#' @rdname is_empty_model
#' @importFrom stats terms
#' @export
is_non_empty_model <- function(x, .xname = get_name_in_parent(x))
{
if(!has_terms(x))
{
return(
false(
gettext("%s has no terms; is probably not a model."),
.xname
)
)
}
tt <- terms(x)
if(length(attr(tt, "factors")) == 0L && attr(tt, "intercept") == 0L)
{
return(false(gettext("%s is an empty model."), .xname))
}
TRUE
}
|
/R/is-empty-model.R
|
no_license
|
cran/assertive.models
|
R
| false
| false
| 2,312
|
r
|
#' Is the input the empty model?
#'
#' Checks to see if the input is the empty model.
#'
#' @param x Input to check.
#' @param .xname Not intended to be used directly.
#' @param severity How severe should the consequences of the assertion be?
#' Either \code{"stop"}, \code{"warning"}, \code{"message"}, or \code{"none"}.
#' @return \code{is_[non_]empty_model} returns \code{TRUE} if the input is an
#' [non] empty model. (\code{has_terms} is used to determine that a variable
#' is a model object.) The model is considered empty if there are no
#' factors and no intercept. The \code{assert_*} functions return nothing
#' but throw an error if the corresponding \code{is_*} function returns
#' \code{FALSE}.
#' @seealso \code{\link[stats]{is.empty.model}} and \code{is_empty}.
#' @examples
#' # empty models have no intercept and no factors
#' an_empty_model <- lm(uptake ~ 0, CO2)
#' is_empty_model(an_empty_model)
#'
#' a_model_with_an_intercept <- lm(uptake ~ 1, CO2)
#' a_model_with_factors <- lm(uptake ~ conc * Type, CO2)
#' is_non_empty_model(a_model_with_an_intercept)
#' is_non_empty_model(a_model_with_factors)
#'
#' assertive.base::dont_stop(assert_is_empty_model(a_model_with_factors))
#' @importFrom stats terms
#' @export
is_empty_model <- function(x, .xname = get_name_in_parent(x))
{
if(!has_terms(x))
{
return(
false(
gettext("%s has no terms; is probably not a model."),
.xname
)
)
}
tt <- terms(x)
factors <- attr(tt, "factors")
if(length(factors) != 0L)
{
return(
false(
ngettext(
length(factors),
"%s has factor %s.",
"%s has factors %s."
),
.xname,
toString(colnames(factors))
)
)
}
if(attr(tt, "intercept") != 0L)
{
return(false(gettext("%s has an intercept."), .xname))
}
TRUE
}
#' @rdname is_empty_model
#' @importFrom stats terms
#' @export
is_non_empty_model <- function(x, .xname = get_name_in_parent(x))
{
if(!has_terms(x))
{
return(
false(
gettext("%s has no terms; is probably not a model."),
.xname
)
)
}
tt <- terms(x)
if(length(attr(tt, "factors")) == 0L && attr(tt, "intercept") == 0L)
{
return(false(gettext("%s is an empty model."), .xname))
}
TRUE
}
|
##############################
#HW 3 Christian Gao- Modeling#
##############################
library(h2o)
library(gbm)
library(randomForest)
library(xgboost)
###############
##### GBM #####
###############
###GBM- H2o###
h2o.init(nthreads = -1)
df <- h2o.importFile("data/sentiment_df.csv")
splits <- h2o.splitFrame(
df, ## splitting the H2O frame we read above
ratios = c(.1,.05), ## create splits
seed=1234)
train <- h2o.assign(splits[[1]], "train.hex")
valid <- h2o.assign(splits[[2]], "valid.hex")
#GBM Base Model#
gbm_base<-h2o.gbm(y = "Sentiment", training_frame = train)
gbm_base
h2o.auc(h2o.performance(gbm_base, newdata = valid))
#
#Increase Tress
gbm_1 <- h2o.gbm(y = "Sentiment", training_frame = train, distribution = "bernoulli",
ntrees = 300, max_depth = 5, learn_rate = 0.1,
nbins = 20, seed = 123)
gbm_1
h2o.auc(h2o.performance(gbm_1, newdata = valid))
#0.7704041
#Increase Learning Rate
gbm_2 <- h2o.gbm(y = "Sentiment", training_frame = train, distribution = "bernoulli",
ntrees = 50, max_depth = 5, learn_rate = 0.2,
nbins = 20, seed = 123)
gbm_2
h2o.auc(h2o.performance(gbm_2, newdata = valid))
#0.7704041
#Increase Depth
gbm_3 <- h2o.gbm(y = "Sentiment", training_frame = train, distribution = "bernoulli",
ntrees = 50, max_depth = 20, learn_rate = 0.1,
nbins = 20, seed = 123)
gbm_3
h2o.auc(h2o.performance(gbm_3, newdata = valid))
#0.752772
#Increase Data with stopping
splits_2 <- h2o.splitFrame(
df, ## splitting the H2O frame we read above
ratios = .8, ## create splits
seed=1234)
train_2 <- h2o.assign(splits_2[[1]], "train.hex")
valid_2 <- h2o.assign(splits_2[[2]], "valid.hex")
system.time({gbm_4 <- h2o.gbm(y = "Sentiment", training_frame = train_2, distribution = "bernoulli",
ntrees = 400, max_depth = 10, learn_rate = 0.02,
nbins = 30, seed = 123,
stopping_rounds=5, stopping_tolerance=0.005,stopping_metric="AUC")})
gbm_4
h2o.auc(h2o.performance(gbm_4, newdata = valid_2))
plot(h2o.performance(gbm_4, newdata = valid_2),col = "blue",main = "True Positives vs False Positives GBM")
#0.7833309
###############
#Using XGboost#
###############
library(readr)
library(xgboost)
library(ROCR)
sentiment_df <- read_csv("data/sentiment_df.csv")
set.seed(123)
N <- nrow(sentiment_df)
idx <- sample(1:N, 0.6*N)
d_train <- sentiment_df[idx,]
d_test <- sentiment_df[-idx,]
### Base Case ###
X <- Matrix::sparse.model.matrix(dep_delayed_15min ~ . - 1, data = sentiment_df)
X_train <- X[idx,]
X_test <- X[-idx,]
dxgb_train <- xgb.DMatrix(data = X_train, label = ifelse(d_train$Sentiment=='Y',1,0))
system.time({
n_proc <- parallel::detectCores()
md <- xgb.train(data = dxgb_train, nthread = n_proc, objective = "binary:logistic",
nround = 300, max_depth = 20, eta = 0.1)
})
predictions <- predict(md, newdata = X_test)
sentiment_pred <- prediction(predictions, d_test$dep_delayed_15min)
performance(sentiment_pred, "auc")@y.values[[1]]
|
/Hw3ModelingGBM.R
|
no_license
|
flyingabove/418-hw3
|
R
| false
| false
| 3,116
|
r
|
##############################
#HW 3 Christian Gao- Modeling#
##############################
library(h2o)
library(gbm)
library(randomForest)
library(xgboost)
###############
##### GBM #####
###############
###GBM- H2o###
h2o.init(nthreads = -1)
df <- h2o.importFile("data/sentiment_df.csv")
splits <- h2o.splitFrame(
df, ## splitting the H2O frame we read above
ratios = c(.1,.05), ## create splits
seed=1234)
train <- h2o.assign(splits[[1]], "train.hex")
valid <- h2o.assign(splits[[2]], "valid.hex")
#GBM Base Model#
gbm_base<-h2o.gbm(y = "Sentiment", training_frame = train)
gbm_base
h2o.auc(h2o.performance(gbm_base, newdata = valid))
#
#Increase Tress
gbm_1 <- h2o.gbm(y = "Sentiment", training_frame = train, distribution = "bernoulli",
ntrees = 300, max_depth = 5, learn_rate = 0.1,
nbins = 20, seed = 123)
gbm_1
h2o.auc(h2o.performance(gbm_1, newdata = valid))
#0.7704041
#Increase Learning Rate
gbm_2 <- h2o.gbm(y = "Sentiment", training_frame = train, distribution = "bernoulli",
ntrees = 50, max_depth = 5, learn_rate = 0.2,
nbins = 20, seed = 123)
gbm_2
h2o.auc(h2o.performance(gbm_2, newdata = valid))
#0.7704041
#Increase Depth
gbm_3 <- h2o.gbm(y = "Sentiment", training_frame = train, distribution = "bernoulli",
ntrees = 50, max_depth = 20, learn_rate = 0.1,
nbins = 20, seed = 123)
gbm_3
h2o.auc(h2o.performance(gbm_3, newdata = valid))
#0.752772
#Increase Data with stopping
splits_2 <- h2o.splitFrame(
df, ## splitting the H2O frame we read above
ratios = .8, ## create splits
seed=1234)
train_2 <- h2o.assign(splits_2[[1]], "train.hex")
valid_2 <- h2o.assign(splits_2[[2]], "valid.hex")
system.time({gbm_4 <- h2o.gbm(y = "Sentiment", training_frame = train_2, distribution = "bernoulli",
ntrees = 400, max_depth = 10, learn_rate = 0.02,
nbins = 30, seed = 123,
stopping_rounds=5, stopping_tolerance=0.005,stopping_metric="AUC")})
gbm_4
h2o.auc(h2o.performance(gbm_4, newdata = valid_2))
plot(h2o.performance(gbm_4, newdata = valid_2),col = "blue",main = "True Positives vs False Positives GBM")
#0.7833309
###############
#Using XGboost#
###############
library(readr)
library(xgboost)
library(ROCR)
sentiment_df <- read_csv("data/sentiment_df.csv")
set.seed(123)
N <- nrow(sentiment_df)
idx <- sample(1:N, 0.6*N)
d_train <- sentiment_df[idx,]
d_test <- sentiment_df[-idx,]
### Base Case ###
X <- Matrix::sparse.model.matrix(dep_delayed_15min ~ . - 1, data = sentiment_df)
X_train <- X[idx,]
X_test <- X[-idx,]
dxgb_train <- xgb.DMatrix(data = X_train, label = ifelse(d_train$Sentiment=='Y',1,0))
system.time({
n_proc <- parallel::detectCores()
md <- xgb.train(data = dxgb_train, nthread = n_proc, objective = "binary:logistic",
nround = 300, max_depth = 20, eta = 0.1)
})
predictions <- predict(md, newdata = X_test)
sentiment_pred <- prediction(predictions, d_test$dep_delayed_15min)
performance(sentiment_pred, "auc")@y.values[[1]]
|
#' @author Harald FIedler
#' @title plotFirma
#' @description Wrapper für EventPlot.
#' @details Plottet einzelne Firmen in ihren individuellen Konfidenzbänder
plotFirma <- function(Firmenanalyse, FirmaISIN="FI0009010391", DDay){
# Firmenanalyse ist ein Objekt was von analysiereFirmen() erzeugt wird
CAR_it <- Firmenanalyse[[2]][[1]]
critical_values_left_CAR_it <- Firmenanalyse[[2]][[2]]
critical_values_right_CAR_it <- Firmenanalyse[[2]][[3]]
EventPlot(CAR_it[, "FI0009010391"],
critical_values_left_CAR_it[, FirmaISIN],
critical_values_right_CAR_it[ , FirmaISIN],
left=DDay+1)
}
|
/MacKinlay/R/plotFirma.R
|
no_license
|
cavorit/MacKinlay
|
R
| false
| false
| 635
|
r
|
#' @author Harald FIedler
#' @title plotFirma
#' @description Wrapper für EventPlot.
#' @details Plottet einzelne Firmen in ihren individuellen Konfidenzbänder
plotFirma <- function(Firmenanalyse, FirmaISIN="FI0009010391", DDay){
# Firmenanalyse ist ein Objekt was von analysiereFirmen() erzeugt wird
CAR_it <- Firmenanalyse[[2]][[1]]
critical_values_left_CAR_it <- Firmenanalyse[[2]][[2]]
critical_values_right_CAR_it <- Firmenanalyse[[2]][[3]]
EventPlot(CAR_it[, "FI0009010391"],
critical_values_left_CAR_it[, FirmaISIN],
critical_values_right_CAR_it[ , FirmaISIN],
left=DDay+1)
}
|
context("Meta Analysis")
options <- jaspTools::analysisOptions("ClassicalMetaAnalysis")
options$covariates <- "contcor2"
options$dependent <- "contNormal"
options$factors <- c("facGender", "facExperim")
options$forestPlot <- TRUE
options$funnelPlot <- TRUE
options$funnelPlotAsymmetryTest <- TRUE
options$method <- "Restricted ML"
options$modelTerms <- list(list(components = "contcor2"),
list(components = "facGender"),
list(components = "facExperim"))
options$plotResidualsCovariates <- TRUE
options$plotResidualsDependent <- TRUE
options$plotResidualsPredicted <- TRUE
options$rSquaredChange <- TRUE
options$regressionCoefficientsConfidenceIntervals <- TRUE
options$regressionCoefficientsCovarianceMatrix <- TRUE
options$residualsCasewiseDiagnostics <- TRUE
options$residualsParameters <- TRUE
options$studyLabels <- "contBinom"
options$trimFillPlot <- TRUE
options$wlsWeights <- "debCollin1"
options$regressionCoefficientsEstimates <- TRUE
options$regressionCoefficientsConfidenceIntervalsInterval <- .95
options$test <- "z"
options$modelFit <- TRUE
options$plotResidualsQQ <- TRUE
set.seed(1)
results <- jaspTools::runAnalysis("ClassicalMetaAnalysis", "debug.csv", options)
test_that("Influence Measures table results match", {
table <- results[["results"]][["modelContainer"]][["collection"]][["modelContainer_casewiseTable"]][["data"]]
expect_equal_tables(table,
list(262.732350870067, 0.0400378184752688, 1.07230885151213, 0.199763610175039,
0.0551419254175412, 1.1, 0.826671964662481, 0.714326031980455,
1.02013544871556, 256.178327946014, 0.129653797670661, 0.992489685001572,
-0.363356145031213, 0.0532361232571621, 0.1, -1.53026301584699,
0.693044256062938, 1.17325184481961, 252.104071340083, 0.130107703282504,
0.955799988714369, 0.365882892108392, 0.0448001488250868, 0.2,
1.6848043442563, 0.684994293447751, 1.25741507559644, 259.540993791085,
0.0624768342258867, 1.00380567459012, 0.250944606537475, 0.0345435263374786,
1.2, 1.32453918608363, 0.70174247901759, 1.01909462525274, 263.230540141172,
0.014909247768068, 1.06903418273161, -0.121661900735042, 0.0391217340610604,
1.3, -0.604582056053913, 0.718254191512592, 1.08744711789683,
256.90637912375, 0.0875666622802145, 0.983097211158387, -0.29825995693398,
0.0381522318730287, 1.4, -1.49460948337919, 0.69482469344226,
1.13683800695382, 264.391499077268, 0.00113847072121011, 1.06171002333134,
-0.0336055771280454, 0.0248380518467288, 0.3, -0.209296627571492,
0.720495080400654, 0.906538477607375, 263.185696184134, 0.022832125525009,
1.07131544837111, -0.15070486684049, 0.0466571914708536, 0.4,
-0.681930278940677, 0.716607810291418, 1.00865294840775, 263.503982057869,
0.0150143574590837, 1.0633698594597, 0.122195250475624, 0.0374681406173809,
0.5, 0.62007207281698, 0.717218478915165, 0.957568702046681,
264.303646920765, 0.00491869083920503, 1.08903804851365, -0.0698406163632372,
0.0489589452046736, 0.6, -0.305633358500751, 0.720600532353734,
0.960061694175131, 263.130014054492, 0.0157520342175195, 1.04526955506304,
0.125265510956865, 0.0273714728953213, 0.7, 0.746833568420508,
0.715299483591345, 0.94081355402465, 263.923890851722, 0.00651024016783402,
1.07064000024066, 0.0803106715475914, 0.0329679112274638, 1.5,
0.434799690509734, 0.72050745705593, 1.07800888366839, 263.637416957089,
0.0213816529890195, 1.08568632737589, -0.145790569069402, 0.0556091729001072,
0.8, -0.599783062488506, 0.717713933250214, 0.9598516578749,
264.064019536477, 0.00469439998031469, 1.05768256863823, -0.0682815782236001,
0.0257115224456209, 0.9, -0.420585487425781, 0.719151799042604,
0.913755734694117, 252.112140337978, 0.166554937076435, 0.887092570380186,
-0.415553354189001, 0.0326029174346965, 0.1, -2.25734082389787,
0.668058676936457, 0.956162544177124, 262.019669281644, 0.0492079174138656,
1.08849500599352, -0.221367831739761, 0.0679451840487333, 0.11,
-0.82025156155775, 0.714717406822812, 1.16008969471307, 263.940584701539,
0.00984278052869501, 1.07818925856344, 0.0988032106876967, 0.0422108455857725,
1.6, 0.469565366081506, 0.719767462515528, 1.03066477221473,
262.635998813336, 0.0277884742151193, 1.0540039336839, 0.166447527593902,
0.0394506405195474, 0.12, 0.821753833431341, 0.714111619826483,
0.986166033035772, 259.7462165447, 0.0649675347550098, 1.00973469328686,
-0.255798417913255, 0.0373051108291292, 0.13, -1.29793790855314,
0.702593803717153, 1.01716197271916, 261.892920274869, 0.0276194775159346,
1.03051207870342, 0.166153419014527, 0.0278737446530384, 0.14,
0.981180886648714, 0.711111089152778, 1.02456732152875, 263.410997302875,
0.0163818848063185, 1.04745366368733, -0.127806314244881, 0.0308685802106828,
0.15, -0.716622116361629, 0.71486267251089, 0.780659988310177,
264.486126634818, 9.42765967859871e-05, 1.09693876458671, -0.00966882346617827,
0.0566452486183803, 1.7, -0.0376060978734504, 0.720329555584944,
0.846581708463617, 264.467216113266, 0.000347663681632075, 1.12078961047271,
-0.0185528845994789, 0.0733174843774056, 0.16, -0.0662837042013889,
0.721393604591763, 0.949292948169384, 264.084401539423, 0.00582571672299131,
1.08177342889251, 0.0760064288879191, 0.0434992819082006, 0.17,
0.358511618579171, 0.720327554289507, 0.991719996768945, 249.807617980615,
0.296071798669854, 0.862570603808377, 0.556996539324682, 0.0471343702938604,
0.18, 2.51526893842518, 0.656354535075111, 0.965213760722877,
263.237229906306, 0.0129781716807686, 1.06427327517358, -0.113487732485723,
0.0340885708947719, 0.19, -0.605472911645839, 0.718471396576595,
1.11383590369921, 264.477139925839, 0.000241363832875931, 1.10271858944973,
0.0154535436569728, 0.0579092285640316, 0.2, 0.0620624629963332,
0.721464770583211, 0.955843246357338, 261.592349033842, 0.0383826843075627,
1.03821523152685, -0.195911090440106, 0.0366654104761845, 1.8,
-1.00419180922472, 0.710646361209932, 1.08126598605143, 262.031531803146,
0.0523221803493081, 1.05658703904295, 0.228688920569409, 0.0518091933978843,
1.9, 0.978321396192137, 0.711127420763697, 0.986008111237554,
264.334192024572, 0.00170018192868327, 1.06932394906259, 0.0410260087217109,
0.0279703377923599, 0.21, 0.240971183816203, 0.721627084235482,
1.02915920952368, 260.090898997263, 0.0586951920070977, 1.00372351040421,
-0.243094816746173, 0.0323351912136825, 0.22, -1.32847951049045,
0.702367557275806, 0.941311872170127, 264.473062127682, 3.45601878789042e-05,
1.08736723880078, 0.00582826919393597, 0.0395764013571677, 0.23,
0.0315044745520896, 0.722990864379933, 1.09520218639425, 260.156280370592,
0.0443225530934775, 1.01040059762166, 0.211078164318548, 0.0296249670629047,
1.1, 1.20721091819925, 0.705019002426001, 1.06577540862248,
262.084543331013, 0.03095444230226, 1.03770604219735, 0.175870502580463,
0.0332205524089201, 0.24, 0.948941127884395, 0.711519999060467,
0.970747228146241, 264.462181817776, 0.000306366892144499, 1.07252279981121,
-0.0174049333562665, 0.0297863138786156, 0.25, -0.0986702777455402,
0.721946566469843, 1.00752893980872, 263.199487748789, 0.0178212903684757,
1.06770348297196, 0.133045213981202, 0.0397098808383198, 1.11,
0.653655734739846, 0.71772220224666, 1.09207612142027, 264.414307389591,
0.000710358994776927, 1.0677671844022, 0.0265230345696218, 0.0271103785012906,
1.12, 0.160102482611091, 0.721460471519471, 0.981144883255629,
259.790203136184, 0.0725276301870289, 1.00590558899725, 0.270397742704451,
0.0379613916590061, 1.13, 1.3627774206512, 0.701335856457346,
0.993106748469139, 264.202563630793, 0.00353560503720366, 1.0671530651487,
-0.0592518854324624, 0.0326923949591393, 1.14, -0.324217342937164,
0.71964212751066, 0.892076466536199, 263.301053090431, 0.0117573333163135,
1.05482541084631, -0.10808505531397, 0.0283239550278417, 0.26,
-0.633912819941443, 0.717618997656188, 1.04555961161393, 264.282863064434,
0.00251203339343546, 1.06980942367922, -0.0498848915151949,
0.0299101371547695, 0.27, -0.282739864658242, 0.721184457990021,
1.01000844076643, 259.231442911687, 0.0800389943663721, 0.996362396565014,
-0.284340700289489, 0.0373293369426948, 1.15, -1.4444383827718,
0.698843144833853, 0.977072351937748, 264.211959185435, 0.00357796893460874,
1.07904630904712, -0.0595076833909259, 0.0368126541473998, 0.28,
-0.304298802618047, 0.721607860439429, 1.07014601249758, 262.987216670837,
0.0303266901175037, 1.07177258641339, -0.173788796983328, 0.0511268003882449,
0.29, -0.749147803482661, 0.715388201289679, 0.983653989095089,
263.91501713302, 0.00664985056785539, 1.05194732030687, -0.0813134513981273,
0.0242324715305451, 1.16, -0.51470019483984, 0.718031767923289,
0.881779902966192, 258.216307742723, 0.0850030233094573, 1.00033295743934,
0.293231631247393, 0.0419618315044775, 0.3, 1.39710830452012,
0.698590578221128, 1.08883920788334, 264.446586118049, 0.000620656638867984,
1.09545268540313, -0.024756463092253, 0.0468874546121777, 1.17,
-0.111726660749746, 0.722927124078898, 1.10284462812178, 264.018041777372,
0.00576819964447564, 1.06477300042041, 0.0756412387925055, 0.0297146665445654,
1.18, 0.430969687240779, 0.719889438740922, 0.999570587218623,
249.000285930228, 0.181559824467493, 0.843572355106713, -0.436758829752031,
0.0298260274797383, 1.19, -2.49536437982044, 0.65517932573377,
0.997824926430928, 264.480335012354, 0.000103936465204631, 1.08854696180227,
0.0101171093065668, 0.0422944749736182, 0.31, 0.0481546700990743,
0.722482445252907, 1.04905179991346, 264.408346779589, 0.000951135479999111,
1.05934358954801, -0.0307260165206774, 0.023998454197374, 1.2,
-0.194848700543862, 0.720100769284271, 0.864343492689289, 258.368515845456,
0.0691111885522944, 0.970133273970689, -0.264665895994432, 0.0268100449674393,
1.21, -1.59510609803463, 0.694417443179183, 0.94731200407551,
263.000840449961, 0.0311582839178547, 1.07241011399101, 0.176109573586821,
0.0506342844037787, 0.32, 0.761665531377413, 0.715710235284731,
1.03915875016335, 264.485607271366, 0.000137321650901439, 1.09880850775722,
-0.0116419221733311, 0.0494264825407868, 1.22, -0.0485535247105303,
0.723040168732542, 1.10048112594451, 244.993636623832, 0.435718059993702,
0.793631102358943, 0.68125096457791, 0.0487189327131783, "0.33*",
3.03868188160114, 0.63373502700228, 0.912003267964907, 264.397562424106,
0.00176404366457001, 1.06896398992258, -0.0418402122071319,
0.0319725289485183, 1.23, -0.227584134309606, 0.720339431614264,
0.895359586946968, 263.682169614338, 0.0166991829795047, 1.08327845987416,
0.128810513059293, 0.0519053135497598, 0.34, 0.551303096189933,
0.718199004143768, 0.971586474203684, 264.471276026243, 0.000299985603565483,
1.06995157967555, 0.0172455956482921, 0.0318437760394784, 0.35,
0.0935650194031671, 0.720639681017737, 0.885500312230464, 264.333344669015,
0.00176718059686404, 1.05551047547065, 0.0419102806677818, 0.0238178047538881,
0.36, 0.269151514569511, 0.719114746158142, 0.80504829426005,
264.365978722454, 0.0014877404704249, 1.0635056169653, 0.0384106859130233,
0.0259347060824912, 0.37, 0.233976946388302, 0.720656931083091,
0.931843387372788, 262.950252307175, 0.023293447381001, 1.04413964512889,
0.152427562334511, 0.0314725911045964, 0.38, 0.844636046239098,
0.713788423943177, 0.917615352513552, 262.786058067816, 0.0255309324155376,
1.0452986996255, -0.159585299884791, 0.0332126721762813, 0.39,
-0.860280337238193, 0.713593174038668, 0.954703676749585, 260.848882894022,
0.0747437681320256, 1.03143034511094, -0.273986123720313, 0.0478484920151539,
1.24, -1.22308868517711, 0.705487505287832, 0.968284159650685,
249.37224560888, 0.163009895357697, 0.872694564502329, 0.413323792806685,
0.0333305228438864, 1.25, 2.22332792760769, 0.663345642805233,
1.11138539645608, 264.263285345332, 0.00408183844281455, 1.07300861486857,
0.0636241017047156, 0.0352159607893979, 1.26, 0.330621622453422,
0.720465569605701, 0.966467342682284, 263.547388650994, 0.0162131427195097,
1.06577311255296, -0.126958713925477, 0.0389157120710873, 1.27,
-0.629820389652029, 0.717434383359289, 0.983361653113583, 257.861519568661,
0.100070328526072, 0.964539426610873, 0.31889937208111, 0.0334000114773794,
1.28, 1.72260605012161, 0.690910625674687, 0.93054970941149,
262.885206032293, 0.0226378429791355, 1.0755650462777, 0.14996441923553,
0.0478939862307731, 0.4, 0.669841756057418, 0.717376530507227,
1.11597018939174, 263.9311181682, 0.0118276744209341, 1.12528708660025,
-0.108095096375888, 0.0746214113506653, 0.41, -0.380284802618594,
0.72219617008024, 1.20750228957917, 263.633670771905, 0.00913876368490845,
1.07363859297916, 0.0951248108009196, 0.035898770357793, 0.42,
0.493211057261931, 0.720446513508, 1.15384630290158, 264.152766390337,
0.00528576923429818, 1.07458459499069, -0.0724096968247506,
0.0376987141569674, 1.29, -0.365625887153166, 0.720147675639769,
0.968324830851101, 262.922668106312, 0.0169256038657695, 1.04418733369507,
-0.129855235628153, 0.0274244241123892, 0.43, -0.773723038635124,
0.714989687748589, 0.978363651951187, 264.155309044222, 0.00309215698110062,
1.07354643115722, 0.0553000972711742, 0.0307631485502674, 1.3,
0.31167808646752, 0.721940656629064, 1.11214391584654, 262.277720755017,
0.023475745490142, 1.03261283467631, -0.153121187898894, 0.0264947988940786,
0.44, -0.928234642676591, 0.712091456209771, 0.973600996531128,
253.179693939302, 0.206116599950869, 0.972205811747802, -0.460423506077168,
0.0650966047580989, 0.45, -1.73691425077676, 0.683755107393275,
1.18247846263982, 264.250555889891, 0.00285924531675716, 1.05872482226843,
0.0532835859361663, 0.0250483234782882, 0.46, 0.331467049198659,
0.71962635396489, 0.887331840316315, 264.287096049129, 0.00315253252146909,
1.06675661988464, -0.0559446519575265, 0.0316431285565131, 1.31,
-0.307863825910765, 0.719845627039266, 0.891081194595183, 264.203813246495,
0.00441388307908344, 1.06773329986483, 0.066200617267153, 0.0333447104550212,
0.47, 0.356027469629469, 0.719604325652675, 0.902876328166279,
259.283889896958, 0.0701175534425527, 1.00549028144472, -0.265928515613535,
0.0379914266301231, 0.48, -1.33560926775154, 0.70120531529104,
1.03491409134237, 264.450442631079, 0.000677088098861326, 1.08508617547386,
0.0258967730060505, 0.0428901154205797, 0.49, 0.122363582611671,
0.721380593474931, 0.960432884352045, 262.284905947309, 0.0298763914266522,
1.04418036083254, -0.172704130056014, 0.0357084345169947, 0.5,
-0.897894843186011, 0.712554793380592, 0.977865356709139, 263.326580913546,
0.0459927390561056, 1.13941072614198, 0.213742141023718, 0.0999217667730841,
0.51, 0.64111746871274, 0.717797896650162, 1.06964746833055,
227.862349461133, 0.400100993527829, 0.627364704748626, 0.677764368512727,
0.032970833075749, "0.52*", 3.59886579622404, 0.577355414942708,
1.11025096934081, 264.422983828242, 0.000606774934210689, 1.06532003298717,
-0.0245176416445016, 0.0257088358727732, 1.32, -0.151899096254993,
0.721212844094056, 0.954896418186984, 264.210650343259, 0.00318800194189171,
1.06227140287373, -0.0562364321411354, 0.0264013638569607, 1.33,
-0.34051150352262, 0.720189866667162, 0.951323929225293, 264.146063808404,
0.00792457287408484, 1.0852416102957, 0.0887461379005808, 0.0512159741611121,
1.34, 0.382063155630509, 0.718909377739538, 0.848936406110151,
264.429714720284, 0.00115798749927809, 1.0736205527411, 0.0338743573213564,
0.0330364556565167, 0.53, 0.180835325886534, 0.721274933243793,
0.966456809635037, 264.347049522363, 0.00148782840131322, 1.08788080390753,
-0.0383354650110578, 0.0403447687234004, 1.35, -0.188712314819792,
0.722899509307816, 1.12945313877843, 262.239015324698, 0.0405667829919693,
1.05896686332706, -0.201218634823186, 0.0483763262407339, 1.36,
-0.892662879169897, 0.71279328759091, 1.0057858458384, 264.339042830507,
0.00249547211662034, 1.07669071647814, -0.0497100437361629,
0.0349748727487329, 0.54, -0.258917851997594, 0.721522866081739,
1.02632841888735, 258.98128252755, 0.114434455174354, 1.0305961710837,
-0.339824991804473, 0.0613682115506481, 0.55, -1.32898304047584,
0.70121109120504, 1.10538894706546, 264.289548545314, 0.00299269543653001,
1.09195122599515, -0.0543447470555744, 0.0426950629004374, 1.37,
-0.255182034524808, 0.723280164168046, 1.1976778549948, 263.942262202072,
0.0259131692586968, 1.13258498678982, 0.160490129135178, 0.0925399977507914,
1.38, 0.501882152795277, 0.718380871772597, 0.891464958254635,
264.483134323259, 0.000223067552319245, 1.07748641736248, -0.014882056274052,
0.0407250818906758, 1.39, -0.0699815779908251, 0.720003379348952,
0.821984466864253, 264.477358812021, 0.000102824886037142, 1.06658971138016,
0.0100839170365109, 0.0284390778273268, 1.4, 0.059315383267866,
0.720746474038474, 0.892556075718011, 264.221965135642, 0.00399736923362332,
1.07389749687345, 0.062945400623009, 0.0350651227776714, 0.56,
0.328782282406387, 0.72074827307326, 0.996678038437571, 264.461935329477,
0.000519052043396647, 1.10766878855957, 0.0226463298088812,
0.0584995019584291, 1.41, 0.0907130585447701, 0.722588438082422,
1.06664745485891, 264.485958211608, 1.18149369745156e-05, 1.06470799045691,
-0.00340463293606924, 0.0285334915283407, 0.57, -0.0174794156711009,
0.720209798234173, 0.840484566567, 260.887242493081, 0.0458911671569587,
1.03696075095317, -0.214367307801245, 0.0404687705383334, 1.42,
-1.04339288337071, 0.70918053539942, 1.08865793243946, 262.431804838242,
0.0283445172487908, 1.04522443621758, 0.168194264235212, 0.0354266658559477,
0.58, 0.878119135080372, 0.712921459645946, 0.964495696169567))
})
test_that("Coefficients table results match", {
table <- results[["results"]][["modelContainer"]][["collection"]][["modelContainer_coeffTable"]][["data"]]
expect_equal_tables(table,
list(-0.442978799963177, -0.796105934144409, "intercept", 0.0139453388413607,
0.180170215078078, -0.0898516657819454, -2.4586683196843, 0.0627001759624519,
-0.150376029750759, "contcor2", 0.564113469510082, 0.108714347030799,
0.275776381675662, 0.576742423377559, 0.488477502169315, 0.0606088917506714,
"facGender (m)", 0.0252472258519366, 0.218304321878368, 0.916346112587959,
2.23759886183782, 0.011079948618749, -0.417996968163601, "facExperim (experimental)",
0.95963496153271, 0.218920816042528, 0.440156865401099, 0.0506116723801931
))
})
test_that("Parameter Covariances table results match", {
table <- results[["results"]][["modelContainer"]][["collection"]][["modelContainer_covMatTable"]][["data"]]
expect_equal_tables(table,
list(-0.00359216662392862, -0.0207831006432576, -0.0204871467233023,
0.0324613064012809, "intercept", 0.011818809250333, 0.00308945286134289,
0.00252843577029779, -0.00359216662392862, "contcor2", 0.00252843577029779,
-0.00715927260823057, 0.0476567769507742, -0.0204871467233023,
"facGender (m)", 0.00308945286134289, 0.0479263236967265, -0.00715927260823057,
-0.0207831006432576, "facExperim (experimental)"))
})
test_that("File Drawer Analysis table results match", {
table <- results[["results"]][["modelContainer"]][["collection"]][["modelContainer_failSafeTable"]][["data"]]
expect_equal_tables(table,
list(0.05, 208, "Rosenthal", 0.00196593842759479))
})
test_that("Fixed and Random Effects table results match", {
table <- results[["results"]][["modelContainer"]][["collection"]][["modelContainer_fixRandTable"]][["data"]]
expect_equal_tables(table,
list(3, "Omnibus test of Model Coefficients", 0.154998519713527, 5.24067551299979,
96, "Test of Residual Heterogeneity", 1.11540464463198e-17,
264.486146695969))
})
test_that("Diagnostic Plots matches", {
plotName <- results[["results"]][["modelContainer"]][["collection"]][["modelContainer_plots"]][["collection"]][["modelContainer_plots_diagnosticPlot"]][["data"]]
testPlot <- results[["state"]][["figures"]][[plotName]][["obj"]]
expect_equal_plots(testPlot, "diagnostic-plots")
})
test_that("Forest plot matches", {
plotName <- results[["results"]][["modelContainer"]][["collection"]][["modelContainer_plots"]][["collection"]][["modelContainer_plots_forest"]][["data"]]
testPlot <- results[["state"]][["figures"]][[plotName]][["obj"]]
expect_equal_plots(testPlot, "forest-plot")
})
test_that("Funnel Plot matches", {
plotName <- results[["results"]][["modelContainer"]][["collection"]][["modelContainer_plots"]][["collection"]][["modelContainer_plots_funnel"]][["data"]]
testPlot <- results[["state"]][["figures"]][[plotName]][["obj"]]
expect_equal_plots(testPlot, "funnel-plot")
})
test_that("Profile plot matches", {
plotName <- results[["results"]][["modelContainer"]][["collection"]][["modelContainer_plots"]][["collection"]][["modelContainer_plots_profile"]][["data"]]
testPlot <- results[["state"]][["figures"]][[plotName]][["obj"]]
expect_equal_plots(testPlot, "profile")
})
test_that("Trim-fill Analysis plot matches", {
plotName <- results[["results"]][["modelContainer"]][["collection"]][["modelContainer_plots"]][["collection"]][["modelContainer_plots_trimFill"]][["data"]]
testPlot <- results[["state"]][["figures"]][[plotName]][["obj"]]
expect_equal_plots(testPlot, "trim-fill-analysis")
})
test_that("Rank correlation test for Funnel plot asymmetry table results match", {
table <- results[["results"]][["modelContainer"]][["collection"]][["modelContainer_rankTestTable"]][["data"]]
expect_equal_tables(table,
list(0.00686868686868687, "Rank test", 0.921921630071705))
})
test_that("Regression test for Funnel plot asymmetry (\"Egger's test\") table results match", {
table <- results[["results"]][["modelContainer"]][["collection"]][["modelContainer_regTestTable"]][["data"]]
expect_equal_tables(table,
list("sei", 0.99470394721492, 0.00663766656825617))
})
test_that("Residual Heterogeneity Estimates table results match", {
table <- results[["results"]][["modelContainer"]][["collection"]][["modelContainer_residualTable"]][["data"]]
expect_equal_tables(table,
list(0.71060416156282, 0.429926531732936, "<unicode><unicode><unicode><unicode>",
1.0688514723806, 0.842973405015141, 0.655687831008733, "<unicode><unicode>",
1.03385273244336, 62.9258791193162, 50.663389411549, "I<unicode><unicode> (%)",
71.8546415573073, 2.69729929191932, 2.02689237884956, "H<unicode><unicode>",
3.55298370790381))
})
test_that("Analysis handles errors", {
options <- jaspTools::analysisOptions("ClassicalMetaAnalysis")
options$dependent <- "debInf"
options$wlsWeights <- "contGamma"
results <- jaspTools::runAnalysis("ClassicalMetaAnalysis", "test.csv", options)
expect_identical(results[["status"]], "validationError", label="Inf dependent check")
options$dependent <- "contNormal"
options$wlsWeights <- "debInf"
results <- jaspTools::runAnalysis("ClassicalMetaAnalysis", "test.csv", options)
expect_identical(results[["status"]], "validationError", label="Inf covariate check")
options$dependent <- "debSame"
options$wlsWeights <- "contGamma"
results <- jaspTools::runAnalysis("ClassicalMetaAnalysis", "test.csv", options)
expect_identical(results[["status"]], "validationError", label="No variance dependent check")
options$dependent <- "contNormal"
options$wlsWeights <- "debSame"
results <- jaspTools::runAnalysis("ClassicalMetaAnalysis", "test.csv", options)
expect_identical(results[["status"]], "validationError", label="No variance covariate check")
options$dependent <- "contGamma"
options$wlsWeights <- "contcor1"
results <- jaspTools::runAnalysis("ClassicalMetaAnalysis", "test.csv", options)
expect_identical(results[["status"]], "validationError", label="Negative wlsWeights check")
})
#model interaction tests
options <- jaspTools::analysisOptions("ClassicalMetaAnalysis")
options$covariates <- c("contcor1", "contcor2")
options$dependent <- "contNormal"
options$factors <- c("facGender", "facExperim")
options$forestPlot <- TRUE
options$funnelPlot <- TRUE
options$funnelPlotAsymmetryTest <- TRUE
options$method <- "Restricted ML"
options$modelTerms <- list(list(components = "contcor1"),
list(components = "contcor2"),
list(components = "facGender"),
list(components = "facExperim"),
list(components = c("contcor1", "contcor2")),
list(components = c("contcor1", "facGender")),
list(components = c("contcor1", "facExperim")),
list(components = c("contcor2", "facGender")),
list(components = c("contcor2", "facExperim")),
list(components = c("facGender", "facExperim")),
list(components = c("contcor1", "contcor2", "facGender")),
list(components = c("contcor1", "contcor2", "facExperim")),
list(components = c("contcor1", "facGender", "facExperim")),
list(components = c("contcor2", "facGender", "facExperim")),
list(components = c("contcor1", "contcor2", "facGender", "facExperim")))
options$plotResidualsCovariates <- TRUE
options$plotResidualsDependent <- TRUE
options$plotResidualsPredicted <- TRUE
options$rSquaredChange <- TRUE
options$regressionCoefficientsConfidenceIntervals <- TRUE
options$regressionCoefficientsCovarianceMatrix <- TRUE
options$residualsCasewiseDiagnostics <- TRUE
options$studyLabels <- "contBinom"
options$trimFillPlot <- TRUE
options$wlsWeights <- "contGamma"
options$regressionCoefficientsEstimates <- TRUE
options$regressionCoefficientsConfidenceIntervalsInterval <- .95
options$test <- "z"
options$modelFit <- TRUE
options$plotResidualsQQ <- TRUE
options$residualsParameters <- TRUE
set.seed(1)
results <- jaspTools::runAnalysis("ClassicalMetaAnalysis", "debug.csv", options)
test_that("Influence Measures table results match - model interactions", {
table <- results[["results"]][["modelContainer"]][["collection"]][["modelContainer_casewiseTable"]][["data"]]
expect_equal_tables(table,
list(96.3371703556965, 0.456496281316976, 1.27649506279766, -0.679623585618869,
0.341081015672735, 1.1, -0.932891346420732, 0.530801439885553,
1.76415541098568, 98.8869537626651, 0.214460469386254, 1.35263522432983,
-0.462926817652488, 0.23958627443837, 0.1, -0.824663140989147,
0.5472761775865, 0.616549638005037, 98.7836293012193, 0.155565567986494,
1.32229378356002, 0.39322940594554, 0.146463806768648, 0.2,
0.937026335276387, 0.55496517291785, 1.05589972876348, 97.0944573721634,
0.47036090850364, 1.17995854425348, 0.687099824296088, 0.242902270135589,
1.2, 1.21055566596263, 0.535756620957678, 0.879081843682491,
99.1037587864346, 0.0384890582744572, 1.45753761691511, -0.195822541019073,
0.26799274622884, 1.3, -0.328309238028867, 0.550298193961395,
1.04618666564716, 99.0600932289584, 0.0281950438082037, 1.07607572896303,
-0.167898552504149, 0.0602583977501949, 1.4, -0.663789319470744,
0.545868926535264, 0.304649254940967, 99.6340850335343, 0.000332296511019668,
1.13490611446419, -0.0151894837403314, 0.0291409526101749, 0.3,
-0.0795160239759953, 0.552954450984663, 0.706176227824763, 96.0676246521706,
1.17370837636842, 1.59630523429472, -1.0944638594596, 0.466019258615557,
0.4, -1.16564848306904, 0.531383730902773, 3.05807895984599,
93.2286892187441, 0.573328603156085, 1.35012459102312, 0.762254245606347,
0.338135650558741, 0.5, 1.06350403520061, 0.535656951495965,
2.959867508508, 99.5178604370425, 0.0106861413113273, 1.17754308322083,
-0.103314555305367, 0.125694398261395, 0.6, -0.27309444953793,
0.54734600530051, 0.246596041183609, 99.5656476788789, 0.000376930858648799,
1.02157596036324, 0.0193582616299253, 0.00697383464961719, 0.7,
0.234356427630475, 0.546133783378388, 0.124521235886435, 99.2029169040868,
0.0187709703197116, 1.06626147896566, 0.137000209120695, 0.05550260054142,
1.5, 0.565926306351903, 0.545532533930859, 0.267977363041949,
93.1206764059604, 1.12756082326341, 1.56758687351851, -1.07563719041813,
0.494783994065642, "0.8*", -1.07841341580968, 0.525739308899614,
2.75272672808099, 99.4210133508834, 0.0113983912800917, 1.42522705898144,
-0.102908578591365, 0.112785714314819, 0.9, -0.289869345809962,
0.564517470995094, 2.027867800595, 98.5682083618731, 0.0782156209637672,
1.04301541288732, -0.279715326863375, 0.09531022742894, 0.1,
-0.854726675840007, 0.540213601785798, 0.293283297704831, 99.6320747060504,
2.7301314260226e-05, 1.07241375810413, -0.00408545696844653,
0.0378942701641663, 0.11, -0.0217070833702994, 0.547522607176888,
0.177555444554951, 99.5764305970416, 0.000377291643425515, 1.12541320390989,
-0.018408970252163, 0.0519442658855141, 1.6, -0.0903339836460815,
0.55028665216978, 0.490241587976079, 99.2930161352079, 0.0349023072917978,
1.19928024172504, 0.186616240236588, 0.119173039677954, 0.12,
0.507465481758718, 0.549469587486718, 0.558730889179477, 99.0629448765626,
0.0231823435583276, 1.06093884396815, -0.152260918404121, 0.0607645834476172,
0.13, -0.59809650349968, 0.544664429761423, 0.350666598359688,
96.4290438659607, 0.157813441696117, 0.87575501943706, 0.399211433692902,
0.0843934957585663, 0.14, 1.28482463436949, 0.527023370259163,
1.42089217183844, 99.1675666352809, 0.0301524997043968, 1.0787207586954,
-0.173646414487839, 0.074982991687078, 0.15, -0.609723587615874,
0.54477661744951, 0.237620583289403, 99.5054046630201, 0.0575353508633493,
1.87797032610779, -0.239638888106549, 0.435361694233239, 1.7,
-0.273180625888596, 0.549782640953181, 0.51872897763932, 99.3648160106486,
0.00600365797825088, 1.02971149303728, -0.0774817553108188,
0.0350887856127527, 0.16, -0.404857262747201, 0.544426458933545,
0.0773724959165598, 99.6281823087033, 0.00153245562928651, 1.34148720750426,
0.0376002818488803, 0.174592237863466, 0.17, 0.0805879001195639,
0.553391048642001, 0.828910828967494, 90.9854049688813, 1.84607929505174,
0.692576063120245, 1.41016014434955, 0.326118321243338, "0.18*",
2.01549039326494, 0.483800460304176, 2.47627536653868, 99.6064601057906,
8.21473452450623e-05, 1.04376378751542, 0.00896828250344568,
0.0284529816116606, 0.19, 0.0561366069131431, 0.546101364840013,
0.0931525802044637, 99.6232033716969, 8.61723687511721e-05,
1.03307688159099, 0.00921355512581654, 0.0211176630856855, 0.2,
0.0647543248713598, 0.54587422789875, 0.0750449829897274, 99.3336204763962,
0.010441833342158, 1.07378001183462, -0.102109309820828, 0.0380977072784802,
1.8, -0.514139056120247, 0.547611262901876, 0.332331821838211,
99.3650838472059, 0.0150542507763518, 1.0954924088389, 0.122669707561274,
0.0697867134428913, 1.9, 0.449079640234325, 0.546500910494261,
0.236820858009263, 99.1691992071566, 0.0295506382675599, 1.40252880817806,
0.169252029281289, 0.131974424374896, 0.21, 0.438727710096578,
0.561360549393711, 2.07762773309507, 99.4190807387288, 0.00200972309037426,
1.01664402751083, -0.0448268232355744, 0.0115560470788022, 0.22,
-0.416181218913605, 0.545355545571563, 0.127358337416194, 99.628955328974,
3.10961250103544e-05, 1.14179238875679, -0.000174083541188705,
0.0760760994921192, 0.23, 0.000683162611278819, 0.549353910306294,
0.368996044284243, 97.9526231098694, 0.0528303796145737, 1.03042806218711,
0.229940934221611, 0.0491530451406111, 1.1, 1.01029892042986,
0.543280739497788, 1.07682287939815, 97.6069588769302, 1.14165870955828,
2.90390978455085, 1.06054080731773, 0.619264115812464, "0.24*",
0.833523219229026, 0.553677799795685, 3.48929736333834, 99.5382412888392,
0.000751888352613661, 1.44150052269179, 0.00680577412927626,
0.097148981796659, 0.25, 0.0422701215403784, 0.566901721325923,
2.03164512796304, 99.5702842214053, 0.000226186802821931, 1.01324355086194,
0.0150290453702749, 0.00688068781480822, 1.11, 0.184605842242131,
0.54546805548156, 0.080519997727041, 99.6154678514371, 0.000159218669072343,
1.05280125566202, 0.0120854086033989, 0.016460159219516, 1.12,
0.0962278279608115, 0.547820760669214, 0.247143172174384, 97.3624395764905,
0.246769069341781, 1.06990854570643, 0.497399235584502, 0.159716810423891,
1.13, 1.13567541074509, 0.536313820499895, 0.704414958003459,
99.5844606001605, 0.000596158644937112, 1.03053750418902, -0.0243491617798623,
0.0128078199099073, 1.14, -0.214764666977019, 0.546367081796924,
0.125584142308918, 99.6137061390605, 0.00122087735052475, 1.3285440426295,
-0.030424056523754, 0.0950425615740762, 0.26, -0.0656441341463135,
0.560223109068849, 1.33315006499304, 99.5938741790235, 0.00916529487491505,
1.48655149966305, -0.0913572793212016, 0.139438947431113, 0.27,
-0.222938830278564, 0.56542127182227, 1.91000376904465, 98.4679924337193,
0.310243707252932, 1.7824078069519, -0.553165595793847, 0.351239014764223,
1.15, -0.750326686301846, 0.557113577898357, 2.20884421575698,
99.6260316072155, 0.0227333257484807, 1.99881856360405, -0.145624958973444,
0.335092928387113, 0.28, -0.199012432176368, 0.5690934228972,
2.61714045841573, 99.6104176549667, 0.0664069667083704, 1.56299086477304,
-0.257085202756782, 0.266200758245468, 0.29, -0.419255877416523,
0.556098793693554, 0.33343319233723, 99.571307396626, 0.00312879417339476,
1.12560832840723, -0.0553006087573252, 0.0402050354610894, 1.16,
-0.266621211738127, 0.551319894684875, 0.564317534610873, 99.5804050824252,
0.000784011546734139, 1.02949678562434, 0.0279457294729346,
0.0123412214716801, 0.3, 0.247738184840747, 0.546323561060829,
0.105988152586636, 99.5657312750271, 0.000434468448029113, 1.24161618298609,
0.0161849866701029, 0.07860466603684, 1.17, 0.0726056038316265,
0.556071328219657, 1.03214069342237, 99.5157594730634, 0.000938046169224952,
1.03329034804321, 0.0305768954130851, 0.0140216630203405, 1.18,
0.263821452295804, 0.546485541200398, 0.215613316714348, 61.2663424767168,
2.07108967579621, 0.00429837622820491, -1.82359502687641, 0.144266789586428,
"1.19*", -4.25666643238791, 0.200595920203848, 2.85625632021547,
99.3451304550568, 0.127600874599829, 2.46149788893631, 0.350377419125357,
0.479683702359611, 0.31, 0.365208049821577, 0.566453772157831,
3.20117262754815, 99.6332261985828, 0.00332240080553162, 1.53541576191297,
-0.0461572994184263, 0.0974958672139665, 1.2, -0.12037790957634,
0.572299781511991, 2.58036083183749, 98.601263435181, 0.335043058972649,
1.59983025465177, -0.575260740429679, 0.273690112953904, 1.21,
-0.929447783901487, 0.557443050372189, 1.92659360457737, 99.1120521548583,
0.497013352753578, 2.18722634696302, -0.704359414366081, 0.521106846753421,
"0.32*", -0.675415222916651, 0.548773898023907, 0.931821758340883,
99.6124302196893, 0.000991252743101212, 1.09761397512565, -0.0312961827397897,
0.058827339433405, 1.22, -0.126119252928827, 0.547628616038572,
0.246984087994497, 99.6347572447786, 0.0133261292051028, 2.02655578162516,
0.114444209599819, 0.437231266945347, 0.33, 0.128557116644964,
0.555684122393195, 0.627108142745137, 99.450090847031, 0.00406165929652207,
1.08313064810759, -0.0635945892837411, 0.0399341177025338, 1.23,
-0.316959118558204, 0.54816593100777, 0.363737283716105, 99.6227618318863,
0.00022546685059272, 1.03098118516384, 0.0148982055028093, 0.0123378249375784,
0.34, 0.130487562541352, 0.54644168341365, 0.108023507105491,
98.7751707447984, 0.0233634710322405, 1.33110838604415, 0.151793746496233,
0.146222396994535, 0.35, 0.383222346301919, 0.555524737754622,
1.60867398460893, 99.576515646394, 0.00277740549846082, 1.15350559696259,
0.0519371222937254, 0.0581162714457157, 0.36, 0.208109177859316,
0.551785169000919, 0.604194737474366, 99.4467755780838, 0.00217429014735891,
1.0435655944456, 0.0465878930117798, 0.021769297343813, 0.37,
0.319604011503614, 0.546649847287167, 0.286292957964169, 99.501457178244,
0.0196688491391593, 1.38877278238256, 0.139097405992478, 0.169801974960015,
0.38, 0.308358918542158, 0.55661646480645, 0.961021976172292,
99.2675530367414, 0.0550886767629268, 1.23863406309008, -0.234229824919644,
0.125409973169987, 0.39, -0.61713087103065, 0.551559255585927,
0.877392403328742, 98.5688274927965, 0.211969345798226, 1.27324322095087,
-0.460316904339828, 0.205130460495482, 1.24, -0.906166298499748,
0.545934957203253, 0.746182036887624, 98.7184626735135, 0.0186227885976608,
1.01030129180045, 0.136468538313633, 0.0239462116438353, 1.25,
0.868900116498524, 0.543806609165966, 0.264600761540305, 99.6031245851408,
0.184182623199955, 3.75170594046661, -0.422891595202222, 0.666165806714429,
"1.26*", -0.299165269922118, 0.564489288362896, 2.7823767662687,
99.6088353415526, 0.00274556962115147, 1.1117092087688, -0.052241257255033,
0.0660890200324689, 1.27, -0.1953293019361, 0.54804189068826,
0.261396118118749, 96.2738523954663, 1.0702295363524, 1.269017211009,
1.038507991662, 0.302461163622036, 1.28, 1.57986958280957, 0.534897240902579,
1.5801608043901, 99.577628702319, 0.00217589801292, 1.06483016208615,
0.0466156626925793, 0.0459610476766441, 0.4, 0.212759592416038,
0.546250087953023, 0.12315526358838, 99.6227551081177, 0.000118599841272146,
1.03521457150618, -0.0107570877833677, 0.0180991797930619, 0.41,
-0.0805669043929181, 0.546296415869489, 0.0914822540403977,
99.6340978532534, 5.58798661112545e-05, 1.02825406666424, 0.00732448248775069,
0.0129983904459344, 0.42, 0.0602817072921072, 0.54616866824168,
0.0813726569047265, 99.6341187186811, 0.000879859629407126,
1.15217560867823, -0.0284514832789621, 0.0572234014266447, 1.29,
-0.105517057545831, 0.551765966768637, 0.542213394621752, 99.4884554624027,
0.0345765111193272, 1.46051594996944, -0.182638661079046, 0.124512957796426,
0.43, -0.457011040873676, 0.565480738931074, 2.01397393247324,
96.4808338363115, 0.0403747962341611, 1.42409799123437, 0.196827752992612,
0.120502147420149, 1.3, 0.549280568660405, 0.563799107677805,
3.07480319718214, 99.5190477917848, 0.00283444533819313, 1.08533750387021,
-0.0528639822709547, 0.0229031279436326, 0.44, -0.3387394106602,
0.549790562816901, 0.436842429894619, 99.4020671628105, 0.028553521097423,
1.1688035635675, -0.168662860652799, 0.0689105137295757, 0.45,
-0.606964815814015, 0.551902620736052, 0.368926820205178, 99.6224394925908,
0.00169620299111446, 1.14930403794316, 0.040290609322695, 0.0561570021881634,
0.46, 0.158665624420202, 0.551652269916983, 0.537918874608041,
99.5562718444709, 0.00859946632947953, 1.29620680294048, -0.0917201837983387,
0.132492338870959, 1.31, -0.236470803298107, 0.554630086637897,
0.917409636023193, 99.4258231426553, 0.0551837319023138, 1.47153546305942,
0.232752676414493, 0.173672466495623, 0.47, 0.495822161529097,
0.561100605775342, 1.43633130079509, 99.5840714530907, 0.000126631711881769,
1.00454558832528, -0.0112517110705855, 0.00280527149195533,
0.48, -0.213008024940962, 0.545096151972286, 0.0263132970805883,
99.5322969859414, 0.00270699847829301, 1.05036226318412, 0.0519809735824607,
0.028037343198549, 0.49, 0.307299810087722, 0.546654014352046,
0.159977126685082, 99.1430224032566, 0.0243941270159084, 1.06560671522521,
-0.156190478133343, 0.0661916233652922, 0.5, -0.585982382063428,
0.544549221637668, 0.322384477320382, 99.4858259432433, 0.0176309640368555,
1.85820250218614, 0.132058514720384, 0.393304518579293, 0.51,
0.166387811806029, 0.554685770306653, 0.586520403982654, 78.2604881131105,
1.2272330348813, 0.0945186365202983, 1.17223525256392, 0.091318222640909,
0.52, 3.63223016187379, 0.36772976334388, 1.76390402293031,
98.7561644546767, 0.20955122456515, 1.63258819684821, -0.457310210946004,
0.361090324791359, 1.32, -0.6093051346917, 0.548420561438851,
1.12485834713176, 99.3864099059091, 0.0289596469932513, 1.66916443498145,
-0.164394672641393, 0.190009827771149, 1.33, -0.335059578148082,
0.570340948863684, 2.77391452333484, 99.3851483201073, 0.312467997025662,
2.43754376670483, 0.558749564850011, 0.567939047357311, "1.34*",
0.487276731333661, 0.54919514666715, 0.367346184690901, 99.4884399140513,
0.00346540027258657, 1.59246141303086, -0.0520237403161038,
0.188477833943534, 0.53, -0.119807121225751, 0.566382967484281,
2.14476260304108, 99.6260889556802, 4.07387879535877e-06, 1.01509644461308,
0.00187642969731744, 0.00675934204166654, 1.35, 0.0279267254927512,
0.545628315805731, 0.0582084948793067, 96.4827931880599, 0.281259163227303,
0.921411371627934, -0.532239483756629, 0.137807842274185, 1.36,
-1.30848083410423, 0.526339426225538, 0.946292346733219, 99.4909105441175,
0.00955389405271385, 1.18454386801651, -0.0973450939460735,
0.0887315260451843, 0.54, -0.314371854017346, 0.551231971433898,
0.586808053235012, 98.9898760150617, 0.0793888043864291, 1.24037357964499,
-0.281430904456412, 0.131818937883485, 0.55, -0.719614763342714,
0.550998703618177, 0.544186305993934, 99.5355907412666, 0.000836244171536608,
1.94430926571465, -0.0257193799040055, 0.416150984764389, 1.37,
-0.0282210271606256, 0.555419497495948, 0.896098693716998, 99.1947522785183,
0.101003572393597, 1.46332683826974, 0.31769128878179, 0.291669237773938,
1.38, 0.49590296062427, 0.547875112290879, 0.434729782581912,
99.5932992335663, 0.00222123852414899, 1.12416334634063, -0.0470428032565756,
0.0863565954877701, 1.39, -0.153760784650784, 0.547150976176223,
0.200998669805516, 99.4709911091794, 0.0141641891532711, 1.59059376071144,
0.116329451174651, 0.228675316214048, 1.4, 0.219516701382983,
0.561890864585373, 1.67233787175663, 99.6101282028459, 4.89511951388839e-05,
1.01185503718934, 0.00696857811593593, 0.00487016624957594,
0.56, 0.104770077072443, 0.545521670727813, 0.0652302740895396,
96.6531076025142, 0.189541618700232, 1.94610324798301, 0.433241063435495,
0.435891090911215, 1.41, 0.497378899130929, 0.552770756780389,
2.43450663477824, 98.2841315594552, 0.0785785146690848, 1.65278031635436,
-0.276631766710734, 0.261117411159917, 0.57, -0.474535581347138,
0.5616521618466, 2.62984693542172, 99.2035896033499, 0.0275132006462676,
1.08075796463495, -0.165875278301942, 0.0838397589656111, 1.42,
-0.54722102961504, 0.544143615567333, 0.211417609274386, 99.6192068072974,
0.0205765073499802, 1.39070981141705, 0.142141673161286, 0.160393878505634,
0.58, 0.314036681062748, 0.557746691131937, 0.982572824775827))
})
test_that("Coefficients table results match - model interactions", {
table <- results[["results"]][["modelContainer"]][["collection"]][["modelContainer_coeffTable"]][["data"]]
expect_equal_tables(table,
list(-0.572718566787404, -1.09946121080177, "intercept", 0.0330861402214251,
0.268751184139131, -0.0459759227730375, -2.13103644034889, -0.011344235150078,
-0.642469295738674, "contcor1", 0.971896648131824, 0.322008497509155,
0.619780825438518, -0.0352296142425729, 0.155626762050519, -0.521177801399562,
"contcor2", 0.652219592593599, 0.345314794472984, 0.8324313255006,
0.450680841196032, 0.810936822812212, -0.191857623947587, "facGender (m)",
0.112971369848264, 0.511639219032911, 1.81373126957201, 1.58497783720534,
0.264826014631206, -0.800896697966652, "facExperim (experimental)",
0.626230075708153, 0.543746066944277, 1.33054872722907, 0.487039871606732,
-0.00739141244590683, -0.49584882566078, "contcor1<unicode><unicode><unicode>contcor2",
0.976339429043278, 0.249217544269026, 0.481066000768966, -0.0296584755603239,
0.383525909051226, -0.9186749397377, "contcor1<unicode><unicode><unicode>facGender (m)",
0.56376979016118, 0.664400393770777, 1.68572675784015, 0.577251176620381,
0.275164751760064, -0.673036347314896, "contcor1<unicode><unicode><unicode>facExperim (experimental)",
0.569508895924281, 0.48378495850712, 1.22336585083502, 0.568774921422064,
-0.837947033316145, -2.60129890526181, "contcor2<unicode><unicode><unicode>facGender (m)",
0.351658397211503, 0.899685850432921, 0.92540483862952, -0.931377361234404,
-0.0547440672776936, -1.47364099110235, "contcor2<unicode><unicode><unicode>facExperim (experimental)",
0.939721752166373, 0.723940301364412, 1.36415285654697, -0.0756195879335869,
-0.956967290543106, -2.47783031693304, "facGender (m)<unicode><unicode><unicode>facExperim (experimental)",
0.217478311350627, 0.775964778816294, 0.56389573584683, -1.233261246732,
0.895878849829479, -0.299117432378454, "contcor1<unicode><unicode><unicode>contcor2<unicode><unicode><unicode>facGender (m)",
0.141732777207294, 0.609703181496127, 2.09087513203741, 1.46936882899498,
0.0123442438993629, -1.50100672553294, "contcor1<unicode><unicode><unicode>contcor2<unicode><unicode><unicode>facExperim (experimental)",
0.987244588052306, 0.772132026284057, 1.52569521333167, 0.0159872191272398,
-0.105594581512077, -1.81983877265936, "contcor1<unicode><unicode><unicode>facGender (m)<unicode><unicode><unicode>facExperim (experimental)",
0.903904491260768, 0.874630450960592, 1.60864960963521, -0.12073051126462,
0.441981212201374, -1.83832285577444, "contcor2<unicode><unicode><unicode>facGender (m)<unicode><unicode><unicode>facExperim (experimental)",
0.704026240849918, 1.16344181628299, 2.72228528017719, 0.379891117901739,
-0.591170761891612, -2.65383196042148, "contcor1<unicode><unicode><unicode>contcor2<unicode><unicode><unicode>facGender (m)<unicode><unicode><unicode>facExperim (experimental)",
0.574295124344442, 1.05239749597266, 1.47149043663825, -0.561737142243226
))
})
test_that("Parameter Covariances table results match - model interactions", {
table <- results[["results"]][["modelContainer"]][["collection"]][["modelContainer_covMatTable"]][["data"]]
expect_equal_tables(table,
list(0.0135925892343855, -0.030354391324863, 0.0303543913248631, 0.030354391324863,
-0.0303543913248631, -0.0135925892343855, -0.0135925892343855,
0.0135925892343855, 0.00155085613023708, -0.00155085613023707,
-0.00155085613023709, 0.00155085613023705, -0.0722271989761851,
-0.0722271989761851, 0.0722271989761851, 0.0722271989761851,
"intercept", 0.103689472468104, -0.0324857254108197, 0.0324857254108198,
0.0324857254108198, -0.0324857254108199, -0.103689472468104,
-0.103689472468104, 0.103689472468104, -0.0478536338503915,
0.0478536338503915, 0.0478536338503915, -0.0478536338503915,
-0.0135925892343854, -0.0135925892343856, 0.0135925892343855,
0.0135925892343855, "contcor1", -0.0478536338503915, -0.029279621587305,
0.0292796215873049, 0.0292796215873051, -0.0292796215873049,
0.0478536338503915, 0.0478536338503917, -0.0478536338503917,
0.119242307281919, -0.119242307281919, -0.119242307281919, 0.119242307281919,
-0.00155085613023713, -0.00155085613023705, 0.00155085613023709,
0.00155085613023708, "contcor2", -0.0135925892343856, 0.0303543913248631,
-0.0303543913248633, -0.153470866117865, 0.153470866117865,
0.0135925892343856, -0.0534785573193151, 0.0534785573193152,
-0.00155085613023705, 0.00155085613023687, 0.12474135270184,
-0.12474135270184, 0.0722271989761853, 0.261774690452607, -0.261774690452607,
-0.0722271989761851, "facGender (m)", -0.0135925892343854, 0.030354391324863,
-0.216078078653158, -0.0303543913248632, 0.216078078653158,
-0.00962318199687896, 0.0135925892343852, 0.00962318199687922,
-0.00155085613023713, -0.066597094865394, 0.00155085613023769,
0.0665970948653935, 0.29565978531737, 0.0722271989761853, -0.29565978531737,
-0.0722271989761851, "facExperim (experimental)", -0.0324857254108197,
0.0621093843714837, -0.0621093843714838, -0.0621093843714839,
0.0621093843714839, 0.0324857254108197, 0.0324857254108197,
-0.0324857254108197, -0.029279621587305, 0.029279621587305,
0.0292796215873051, -0.0292796215873051, 0.030354391324863,
0.0303543913248631, -0.0303543913248631, -0.030354391324863,
"contcor1<unicode><unicode><unicode>contcor2", -0.103689472468104, 0.0324857254108197,
-0.0324857254108197, -0.0390061038181272, 0.0390061038181271,
0.103689472468104, 0.441427883242763, -0.441427883242764, 0.0478536338503917,
-0.0478536338503913, -0.329079692471366, 0.329079692471366,
0.0135925892343852, -0.0534785573193151, 0.0534785573193155,
-0.0135925892343855, "contcor1<unicode><unicode><unicode>facGender (m)", -0.103689472468104,
0.0324857254108197, -0.086623183012083, -0.0324857254108199,
0.0866231830120831, 0.234047886077736, 0.103689472468104, -0.234047886077736,
0.0478536338503915, -0.141715989713044, -0.0478536338503914,
0.141715989713043, -0.00962318199687896, 0.0135925892343856,
0.00962318199687883, -0.0135925892343855, "contcor1<unicode><unicode><unicode>facExperim (experimental)",
0.0478536338503915, 0.0292796215873051, -0.0292796215873053,
-0.322505604097216, 0.322505604097216, -0.0478536338503914,
-0.329079692471366, 0.329079692471367, -0.119242307281919, 0.119242307281918,
0.809434629469209, -0.809434629469209, 0.00155085613023769,
0.12474135270184, -0.12474135270184, -0.00155085613023709, "contcor2<unicode><unicode><unicode>facGender (m)",
0.0478536338503915, 0.029279621587305, 0.0591596914802318, -0.0292796215873048,
-0.059159691480232, -0.141715989713044, -0.0478536338503913,
0.141715989713043, -0.119242307281919, 0.524089559939596, 0.119242307281918,
-0.524089559939596, -0.066597094865394, 0.00155085613023687,
0.0665970948653942, -0.00155085613023707, "contcor2<unicode><unicode><unicode>facExperim (experimental)",
0.0135925892343855, -0.0303543913248631, 0.216078078653158,
0.153470866117865, -0.428872615422987, 0.00962318199687883,
0.0534785573193155, -0.131087553796561, 0.00155085613023709,
0.0665970948653942, -0.12474135270184, 0.0749568287842243, -0.29565978531737,
-0.261774690452607, 0.60212133796342, 0.0722271989761851, "facGender (m)<unicode><unicode><unicode>facExperim (experimental)",
0.0324857254108198, -0.0621093843714839, 0.0621093843714841,
0.3717379695265, -0.3717379695265, -0.0324857254108199, -0.0390061038181272,
0.0390061038181273, 0.0292796215873051, -0.0292796215873048,
-0.322505604097216, 0.322505604097216, -0.0303543913248632,
-0.153470866117865, 0.153470866117865, 0.030354391324863, "contcor1<unicode><unicode><unicode>contcor2<unicode><unicode><unicode>facGender (m)",
0.0324857254108198, -0.0621093843714838, 0.596187866013523,
0.0621093843714841, -0.596187866013524, -0.086623183012083,
-0.0324857254108197, 0.0866231830120827, 0.0292796215873049,
0.0591596914802318, -0.0292796215873053, -0.0591596914802314,
-0.216078078653158, -0.0303543913248633, 0.216078078653158,
0.0303543913248631, "contcor1<unicode><unicode><unicode>contcor2<unicode><unicode><unicode>facExperim (experimental)",
0.103689472468104, -0.0324857254108197, 0.0866231830120827,
0.0390061038181273, -0.0129567418193662, -0.234047886077736,
-0.441427883242764, 0.764978425747528, -0.0478536338503917,
0.141715989713043, 0.329079692471367, -0.506301910727773, 0.00962318199687922,
0.0534785573193152, -0.131087553796561, 0.0135925892343855,
"contcor1:facGender (m)<unicode><unicode><unicode>facExperim (experimental)", -0.0478536338503915,
-0.0292796215873051, -0.0591596914802314, 0.322505604097216,
-0.219580324628296, 0.141715989713043, 0.329079692471366, -0.506301910727773,
0.119242307281919, -0.524089559939596, -0.809434629469209, 1.35359685987586,
0.0665970948653935, -0.12474135270184, 0.0749568287842243, 0.00155085613023705,
"contcor2:facGender (m)<unicode><unicode><unicode>facExperim (experimental)", -0.0324857254108199,
0.0621093843714839, -0.596187866013524, -0.3717379695265, 1.10754048952954,
0.0866231830120831, 0.0390061038181271, -0.0129567418193662,
-0.0292796215873049, -0.059159691480232, 0.322505604097216,
-0.219580324628296, 0.216078078653158, 0.153470866117865, -0.428872615422987,
-0.0303543913248631, "contcor1<unicode><unicode><unicode>contcor2<unicode><unicode><unicode>facGender (m)<unicode><unicode><unicode>facExperim (experimental)"
))
})
test_that("File Drawer Analysis table results match - model interactions", {
table <- results[["results"]][["modelContainer"]][["collection"]][["modelContainer_failSafeTable"]][["data"]]
expect_equal_tables(table,
list(0.05, 0, "Rosenthal", 0.0813278786958635))
})
test_that("Fixed and Random Effects table results match - model interactions", {
table <- results[["results"]][["modelContainer"]][["collection"]][["modelContainer_fixRandTable"]][["data"]]
expect_equal_tables(table,
list(15, "Omnibus test of Model Coefficients", 0.216907507488967, 18.9303488095121,
84, "Test of Residual Heterogeneity", 0.117156071611725, 99.6347772817611
))
})
test_that("Diagnostic Plots matches - model interactions", {
plotName <- results[["results"]][["modelContainer"]][["collection"]][["modelContainer_plots"]][["collection"]][["modelContainer_plots_diagnosticPlot"]][["data"]]
testPlot <- results[["state"]][["figures"]][[plotName]][["obj"]]
expect_equal_plots(testPlot, "diagnostic-plots-model")
})
test_that("Forest plot matches - model interactions", {
plotName <- results[["results"]][["modelContainer"]][["collection"]][["modelContainer_plots"]][["collection"]][["modelContainer_plots_forest"]][["data"]]
testPlot <- results[["state"]][["figures"]][[plotName]][["obj"]]
expect_equal_plots(testPlot, "forest-plot-model")
})
test_that("Funnel Plot matches - model interactions", {
plotName <- results[["results"]][["modelContainer"]][["collection"]][["modelContainer_plots"]][["collection"]][["modelContainer_plots_funnel"]][["data"]]
testPlot <- results[["state"]][["figures"]][[plotName]][["obj"]]
expect_equal_plots(testPlot, "funnel-plot-model")
})
test_that("Profile plot matches - model interactions", {
plotName <- results[["results"]][["modelContainer"]][["collection"]][["modelContainer_plots"]][["collection"]][["modelContainer_plots_profile"]][["data"]]
testPlot <- results[["state"]][["figures"]][[plotName]][["obj"]]
expect_equal_plots(testPlot, "profile-model")
})
test_that("Trim-fill Analysis plot matches - model interactions", {
plotName <- results[["results"]][["modelContainer"]][["collection"]][["modelContainer_plots"]][["collection"]][["modelContainer_plots_trimFill"]][["data"]]
testPlot <- results[["state"]][["figures"]][[plotName]][["obj"]]
expect_equal_plots(testPlot, "trim-fill-analysis-model")
})
test_that("Rank correlation test for Funnel plot asymmetry table results match - model interactions", {
table <- results[["results"]][["modelContainer"]][["collection"]][["modelContainer_rankTestTable"]][["data"]]
expect_equal_tables(table,
list(0.0703030303030303, "Rank test", 0.302256516349067))
})
test_that("Regression test for Funnel plot asymmetry (\"Egger's test\") table results match - model interactions", {
table <- results[["results"]][["modelContainer"]][["collection"]][["modelContainer_regTestTable"]][["data"]]
expect_equal_tables(table,
list("sei", 0.794904131826642, -0.259947679937865))
})
test_that("Residual Heterogeneity Estimates table results match - model interactions", {
table <- results[["results"]][["modelContainer"]][["collection"]][["modelContainer_residualTable"]][["data"]]
expect_equal_tables(table,
list(0.544954299586419, 0, "<unicode><unicode><unicode><unicode>",
0.24418480479452, 0.738210200137074, 0, "<unicode><unicode>",
0.494150589187668, 34.1721304558822, 0, "I<unicode><unicode> (%)",
18.8710710587465, 1.51911341947623, 1, "H<unicode><unicode>",
1.23260594346576))
})
# test the diagnostic plot without the Q-Q plot
options <- jaspTools::analysisOptions("ClassicalMetaAnalysis")
options$.meta <- list(covariates = list(containsColumn = TRUE), dependent = list(
containsColumn = TRUE), factors = list(containsColumn = TRUE),
studyLabels = list(containsColumn = TRUE), wlsWeights = list(
containsColumn = TRUE))
options$dependent <- "ES"
options$method <- "Fixed Effects"
options$plotResidualsDependent <- TRUE
options$plotResidualsQQ <- FALSE
options$regressionCoefficientsEstimates <- FALSE
options$residualsParameters <- FALSE
options$wlsWeights <- "SE"
options$regressionCoefficientsConfidenceIntervalsInterval <- .95
options$test <- "z"
options$modelFit <- TRUE
options$regressionCoefficientsCovarianceMatrix <- TRUE
options$rSquaredChange <- TRUE
options$funnelPlotAsymmetryTest <- FALSE
options$residualsCasewiseDiagnostics <- TRUE
options$plotResidualsCovariates <- FALSE
options$forestPlot <- FALSE
options$plotResidualsPredicted <- FALSE
options$trimFillPlot <- FALSE
set.seed(1)
results <- jaspTools::runAnalysis("ClassicalMetaAnalysis", "BCG Vaccine", options)
test_that("Diagnostic Plots matches without Q-Q plot", {
plotName <- results[["results"]][["modelContainer"]][["collection"]][["modelContainer_plots"]][["collection"]][["modelContainer_plots_diagnosticPlot"]][["data"]]
testPlot <- results[["state"]][["figures"]][[plotName]][["obj"]]
expect_equal_plots(testPlot, "diagnostic-plots-no-qq")
})
|
/tests/testthat/test-classicalmetaanalysis.R
|
no_license
|
Owain-S/jaspMetaAnalysis
|
R
| false
| false
| 72,157
|
r
|
context("Meta Analysis")
options <- jaspTools::analysisOptions("ClassicalMetaAnalysis")
options$covariates <- "contcor2"
options$dependent <- "contNormal"
options$factors <- c("facGender", "facExperim")
options$forestPlot <- TRUE
options$funnelPlot <- TRUE
options$funnelPlotAsymmetryTest <- TRUE
options$method <- "Restricted ML"
options$modelTerms <- list(list(components = "contcor2"),
list(components = "facGender"),
list(components = "facExperim"))
options$plotResidualsCovariates <- TRUE
options$plotResidualsDependent <- TRUE
options$plotResidualsPredicted <- TRUE
options$rSquaredChange <- TRUE
options$regressionCoefficientsConfidenceIntervals <- TRUE
options$regressionCoefficientsCovarianceMatrix <- TRUE
options$residualsCasewiseDiagnostics <- TRUE
options$residualsParameters <- TRUE
options$studyLabels <- "contBinom"
options$trimFillPlot <- TRUE
options$wlsWeights <- "debCollin1"
options$regressionCoefficientsEstimates <- TRUE
options$regressionCoefficientsConfidenceIntervalsInterval <- .95
options$test <- "z"
options$modelFit <- TRUE
options$plotResidualsQQ <- TRUE
set.seed(1)
results <- jaspTools::runAnalysis("ClassicalMetaAnalysis", "debug.csv", options)
test_that("Influence Measures table results match", {
table <- results[["results"]][["modelContainer"]][["collection"]][["modelContainer_casewiseTable"]][["data"]]
expect_equal_tables(table,
list(262.732350870067, 0.0400378184752688, 1.07230885151213, 0.199763610175039,
0.0551419254175412, 1.1, 0.826671964662481, 0.714326031980455,
1.02013544871556, 256.178327946014, 0.129653797670661, 0.992489685001572,
-0.363356145031213, 0.0532361232571621, 0.1, -1.53026301584699,
0.693044256062938, 1.17325184481961, 252.104071340083, 0.130107703282504,
0.955799988714369, 0.365882892108392, 0.0448001488250868, 0.2,
1.6848043442563, 0.684994293447751, 1.25741507559644, 259.540993791085,
0.0624768342258867, 1.00380567459012, 0.250944606537475, 0.0345435263374786,
1.2, 1.32453918608363, 0.70174247901759, 1.01909462525274, 263.230540141172,
0.014909247768068, 1.06903418273161, -0.121661900735042, 0.0391217340610604,
1.3, -0.604582056053913, 0.718254191512592, 1.08744711789683,
256.90637912375, 0.0875666622802145, 0.983097211158387, -0.29825995693398,
0.0381522318730287, 1.4, -1.49460948337919, 0.69482469344226,
1.13683800695382, 264.391499077268, 0.00113847072121011, 1.06171002333134,
-0.0336055771280454, 0.0248380518467288, 0.3, -0.209296627571492,
0.720495080400654, 0.906538477607375, 263.185696184134, 0.022832125525009,
1.07131544837111, -0.15070486684049, 0.0466571914708536, 0.4,
-0.681930278940677, 0.716607810291418, 1.00865294840775, 263.503982057869,
0.0150143574590837, 1.0633698594597, 0.122195250475624, 0.0374681406173809,
0.5, 0.62007207281698, 0.717218478915165, 0.957568702046681,
264.303646920765, 0.00491869083920503, 1.08903804851365, -0.0698406163632372,
0.0489589452046736, 0.6, -0.305633358500751, 0.720600532353734,
0.960061694175131, 263.130014054492, 0.0157520342175195, 1.04526955506304,
0.125265510956865, 0.0273714728953213, 0.7, 0.746833568420508,
0.715299483591345, 0.94081355402465, 263.923890851722, 0.00651024016783402,
1.07064000024066, 0.0803106715475914, 0.0329679112274638, 1.5,
0.434799690509734, 0.72050745705593, 1.07800888366839, 263.637416957089,
0.0213816529890195, 1.08568632737589, -0.145790569069402, 0.0556091729001072,
0.8, -0.599783062488506, 0.717713933250214, 0.9598516578749,
264.064019536477, 0.00469439998031469, 1.05768256863823, -0.0682815782236001,
0.0257115224456209, 0.9, -0.420585487425781, 0.719151799042604,
0.913755734694117, 252.112140337978, 0.166554937076435, 0.887092570380186,
-0.415553354189001, 0.0326029174346965, 0.1, -2.25734082389787,
0.668058676936457, 0.956162544177124, 262.019669281644, 0.0492079174138656,
1.08849500599352, -0.221367831739761, 0.0679451840487333, 0.11,
-0.82025156155775, 0.714717406822812, 1.16008969471307, 263.940584701539,
0.00984278052869501, 1.07818925856344, 0.0988032106876967, 0.0422108455857725,
1.6, 0.469565366081506, 0.719767462515528, 1.03066477221473,
262.635998813336, 0.0277884742151193, 1.0540039336839, 0.166447527593902,
0.0394506405195474, 0.12, 0.821753833431341, 0.714111619826483,
0.986166033035772, 259.7462165447, 0.0649675347550098, 1.00973469328686,
-0.255798417913255, 0.0373051108291292, 0.13, -1.29793790855314,
0.702593803717153, 1.01716197271916, 261.892920274869, 0.0276194775159346,
1.03051207870342, 0.166153419014527, 0.0278737446530384, 0.14,
0.981180886648714, 0.711111089152778, 1.02456732152875, 263.410997302875,
0.0163818848063185, 1.04745366368733, -0.127806314244881, 0.0308685802106828,
0.15, -0.716622116361629, 0.71486267251089, 0.780659988310177,
264.486126634818, 9.42765967859871e-05, 1.09693876458671, -0.00966882346617827,
0.0566452486183803, 1.7, -0.0376060978734504, 0.720329555584944,
0.846581708463617, 264.467216113266, 0.000347663681632075, 1.12078961047271,
-0.0185528845994789, 0.0733174843774056, 0.16, -0.0662837042013889,
0.721393604591763, 0.949292948169384, 264.084401539423, 0.00582571672299131,
1.08177342889251, 0.0760064288879191, 0.0434992819082006, 0.17,
0.358511618579171, 0.720327554289507, 0.991719996768945, 249.807617980615,
0.296071798669854, 0.862570603808377, 0.556996539324682, 0.0471343702938604,
0.18, 2.51526893842518, 0.656354535075111, 0.965213760722877,
263.237229906306, 0.0129781716807686, 1.06427327517358, -0.113487732485723,
0.0340885708947719, 0.19, -0.605472911645839, 0.718471396576595,
1.11383590369921, 264.477139925839, 0.000241363832875931, 1.10271858944973,
0.0154535436569728, 0.0579092285640316, 0.2, 0.0620624629963332,
0.721464770583211, 0.955843246357338, 261.592349033842, 0.0383826843075627,
1.03821523152685, -0.195911090440106, 0.0366654104761845, 1.8,
-1.00419180922472, 0.710646361209932, 1.08126598605143, 262.031531803146,
0.0523221803493081, 1.05658703904295, 0.228688920569409, 0.0518091933978843,
1.9, 0.978321396192137, 0.711127420763697, 0.986008111237554,
264.334192024572, 0.00170018192868327, 1.06932394906259, 0.0410260087217109,
0.0279703377923599, 0.21, 0.240971183816203, 0.721627084235482,
1.02915920952368, 260.090898997263, 0.0586951920070977, 1.00372351040421,
-0.243094816746173, 0.0323351912136825, 0.22, -1.32847951049045,
0.702367557275806, 0.941311872170127, 264.473062127682, 3.45601878789042e-05,
1.08736723880078, 0.00582826919393597, 0.0395764013571677, 0.23,
0.0315044745520896, 0.722990864379933, 1.09520218639425, 260.156280370592,
0.0443225530934775, 1.01040059762166, 0.211078164318548, 0.0296249670629047,
1.1, 1.20721091819925, 0.705019002426001, 1.06577540862248,
262.084543331013, 0.03095444230226, 1.03770604219735, 0.175870502580463,
0.0332205524089201, 0.24, 0.948941127884395, 0.711519999060467,
0.970747228146241, 264.462181817776, 0.000306366892144499, 1.07252279981121,
-0.0174049333562665, 0.0297863138786156, 0.25, -0.0986702777455402,
0.721946566469843, 1.00752893980872, 263.199487748789, 0.0178212903684757,
1.06770348297196, 0.133045213981202, 0.0397098808383198, 1.11,
0.653655734739846, 0.71772220224666, 1.09207612142027, 264.414307389591,
0.000710358994776927, 1.0677671844022, 0.0265230345696218, 0.0271103785012906,
1.12, 0.160102482611091, 0.721460471519471, 0.981144883255629,
259.790203136184, 0.0725276301870289, 1.00590558899725, 0.270397742704451,
0.0379613916590061, 1.13, 1.3627774206512, 0.701335856457346,
0.993106748469139, 264.202563630793, 0.00353560503720366, 1.0671530651487,
-0.0592518854324624, 0.0326923949591393, 1.14, -0.324217342937164,
0.71964212751066, 0.892076466536199, 263.301053090431, 0.0117573333163135,
1.05482541084631, -0.10808505531397, 0.0283239550278417, 0.26,
-0.633912819941443, 0.717618997656188, 1.04555961161393, 264.282863064434,
0.00251203339343546, 1.06980942367922, -0.0498848915151949,
0.0299101371547695, 0.27, -0.282739864658242, 0.721184457990021,
1.01000844076643, 259.231442911687, 0.0800389943663721, 0.996362396565014,
-0.284340700289489, 0.0373293369426948, 1.15, -1.4444383827718,
0.698843144833853, 0.977072351937748, 264.211959185435, 0.00357796893460874,
1.07904630904712, -0.0595076833909259, 0.0368126541473998, 0.28,
-0.304298802618047, 0.721607860439429, 1.07014601249758, 262.987216670837,
0.0303266901175037, 1.07177258641339, -0.173788796983328, 0.0511268003882449,
0.29, -0.749147803482661, 0.715388201289679, 0.983653989095089,
263.91501713302, 0.00664985056785539, 1.05194732030687, -0.0813134513981273,
0.0242324715305451, 1.16, -0.51470019483984, 0.718031767923289,
0.881779902966192, 258.216307742723, 0.0850030233094573, 1.00033295743934,
0.293231631247393, 0.0419618315044775, 0.3, 1.39710830452012,
0.698590578221128, 1.08883920788334, 264.446586118049, 0.000620656638867984,
1.09545268540313, -0.024756463092253, 0.0468874546121777, 1.17,
-0.111726660749746, 0.722927124078898, 1.10284462812178, 264.018041777372,
0.00576819964447564, 1.06477300042041, 0.0756412387925055, 0.0297146665445654,
1.18, 0.430969687240779, 0.719889438740922, 0.999570587218623,
249.000285930228, 0.181559824467493, 0.843572355106713, -0.436758829752031,
0.0298260274797383, 1.19, -2.49536437982044, 0.65517932573377,
0.997824926430928, 264.480335012354, 0.000103936465204631, 1.08854696180227,
0.0101171093065668, 0.0422944749736182, 0.31, 0.0481546700990743,
0.722482445252907, 1.04905179991346, 264.408346779589, 0.000951135479999111,
1.05934358954801, -0.0307260165206774, 0.023998454197374, 1.2,
-0.194848700543862, 0.720100769284271, 0.864343492689289, 258.368515845456,
0.0691111885522944, 0.970133273970689, -0.264665895994432, 0.0268100449674393,
1.21, -1.59510609803463, 0.694417443179183, 0.94731200407551,
263.000840449961, 0.0311582839178547, 1.07241011399101, 0.176109573586821,
0.0506342844037787, 0.32, 0.761665531377413, 0.715710235284731,
1.03915875016335, 264.485607271366, 0.000137321650901439, 1.09880850775722,
-0.0116419221733311, 0.0494264825407868, 1.22, -0.0485535247105303,
0.723040168732542, 1.10048112594451, 244.993636623832, 0.435718059993702,
0.793631102358943, 0.68125096457791, 0.0487189327131783, "0.33*",
3.03868188160114, 0.63373502700228, 0.912003267964907, 264.397562424106,
0.00176404366457001, 1.06896398992258, -0.0418402122071319,
0.0319725289485183, 1.23, -0.227584134309606, 0.720339431614264,
0.895359586946968, 263.682169614338, 0.0166991829795047, 1.08327845987416,
0.128810513059293, 0.0519053135497598, 0.34, 0.551303096189933,
0.718199004143768, 0.971586474203684, 264.471276026243, 0.000299985603565483,
1.06995157967555, 0.0172455956482921, 0.0318437760394784, 0.35,
0.0935650194031671, 0.720639681017737, 0.885500312230464, 264.333344669015,
0.00176718059686404, 1.05551047547065, 0.0419102806677818, 0.0238178047538881,
0.36, 0.269151514569511, 0.719114746158142, 0.80504829426005,
264.365978722454, 0.0014877404704249, 1.0635056169653, 0.0384106859130233,
0.0259347060824912, 0.37, 0.233976946388302, 0.720656931083091,
0.931843387372788, 262.950252307175, 0.023293447381001, 1.04413964512889,
0.152427562334511, 0.0314725911045964, 0.38, 0.844636046239098,
0.713788423943177, 0.917615352513552, 262.786058067816, 0.0255309324155376,
1.0452986996255, -0.159585299884791, 0.0332126721762813, 0.39,
-0.860280337238193, 0.713593174038668, 0.954703676749585, 260.848882894022,
0.0747437681320256, 1.03143034511094, -0.273986123720313, 0.0478484920151539,
1.24, -1.22308868517711, 0.705487505287832, 0.968284159650685,
249.37224560888, 0.163009895357697, 0.872694564502329, 0.413323792806685,
0.0333305228438864, 1.25, 2.22332792760769, 0.663345642805233,
1.11138539645608, 264.263285345332, 0.00408183844281455, 1.07300861486857,
0.0636241017047156, 0.0352159607893979, 1.26, 0.330621622453422,
0.720465569605701, 0.966467342682284, 263.547388650994, 0.0162131427195097,
1.06577311255296, -0.126958713925477, 0.0389157120710873, 1.27,
-0.629820389652029, 0.717434383359289, 0.983361653113583, 257.861519568661,
0.100070328526072, 0.964539426610873, 0.31889937208111, 0.0334000114773794,
1.28, 1.72260605012161, 0.690910625674687, 0.93054970941149,
262.885206032293, 0.0226378429791355, 1.0755650462777, 0.14996441923553,
0.0478939862307731, 0.4, 0.669841756057418, 0.717376530507227,
1.11597018939174, 263.9311181682, 0.0118276744209341, 1.12528708660025,
-0.108095096375888, 0.0746214113506653, 0.41, -0.380284802618594,
0.72219617008024, 1.20750228957917, 263.633670771905, 0.00913876368490845,
1.07363859297916, 0.0951248108009196, 0.035898770357793, 0.42,
0.493211057261931, 0.720446513508, 1.15384630290158, 264.152766390337,
0.00528576923429818, 1.07458459499069, -0.0724096968247506,
0.0376987141569674, 1.29, -0.365625887153166, 0.720147675639769,
0.968324830851101, 262.922668106312, 0.0169256038657695, 1.04418733369507,
-0.129855235628153, 0.0274244241123892, 0.43, -0.773723038635124,
0.714989687748589, 0.978363651951187, 264.155309044222, 0.00309215698110062,
1.07354643115722, 0.0553000972711742, 0.0307631485502674, 1.3,
0.31167808646752, 0.721940656629064, 1.11214391584654, 262.277720755017,
0.023475745490142, 1.03261283467631, -0.153121187898894, 0.0264947988940786,
0.44, -0.928234642676591, 0.712091456209771, 0.973600996531128,
253.179693939302, 0.206116599950869, 0.972205811747802, -0.460423506077168,
0.0650966047580989, 0.45, -1.73691425077676, 0.683755107393275,
1.18247846263982, 264.250555889891, 0.00285924531675716, 1.05872482226843,
0.0532835859361663, 0.0250483234782882, 0.46, 0.331467049198659,
0.71962635396489, 0.887331840316315, 264.287096049129, 0.00315253252146909,
1.06675661988464, -0.0559446519575265, 0.0316431285565131, 1.31,
-0.307863825910765, 0.719845627039266, 0.891081194595183, 264.203813246495,
0.00441388307908344, 1.06773329986483, 0.066200617267153, 0.0333447104550212,
0.47, 0.356027469629469, 0.719604325652675, 0.902876328166279,
259.283889896958, 0.0701175534425527, 1.00549028144472, -0.265928515613535,
0.0379914266301231, 0.48, -1.33560926775154, 0.70120531529104,
1.03491409134237, 264.450442631079, 0.000677088098861326, 1.08508617547386,
0.0258967730060505, 0.0428901154205797, 0.49, 0.122363582611671,
0.721380593474931, 0.960432884352045, 262.284905947309, 0.0298763914266522,
1.04418036083254, -0.172704130056014, 0.0357084345169947, 0.5,
-0.897894843186011, 0.712554793380592, 0.977865356709139, 263.326580913546,
0.0459927390561056, 1.13941072614198, 0.213742141023718, 0.0999217667730841,
0.51, 0.64111746871274, 0.717797896650162, 1.06964746833055,
227.862349461133, 0.400100993527829, 0.627364704748626, 0.677764368512727,
0.032970833075749, "0.52*", 3.59886579622404, 0.577355414942708,
1.11025096934081, 264.422983828242, 0.000606774934210689, 1.06532003298717,
-0.0245176416445016, 0.0257088358727732, 1.32, -0.151899096254993,
0.721212844094056, 0.954896418186984, 264.210650343259, 0.00318800194189171,
1.06227140287373, -0.0562364321411354, 0.0264013638569607, 1.33,
-0.34051150352262, 0.720189866667162, 0.951323929225293, 264.146063808404,
0.00792457287408484, 1.0852416102957, 0.0887461379005808, 0.0512159741611121,
1.34, 0.382063155630509, 0.718909377739538, 0.848936406110151,
264.429714720284, 0.00115798749927809, 1.0736205527411, 0.0338743573213564,
0.0330364556565167, 0.53, 0.180835325886534, 0.721274933243793,
0.966456809635037, 264.347049522363, 0.00148782840131322, 1.08788080390753,
-0.0383354650110578, 0.0403447687234004, 1.35, -0.188712314819792,
0.722899509307816, 1.12945313877843, 262.239015324698, 0.0405667829919693,
1.05896686332706, -0.201218634823186, 0.0483763262407339, 1.36,
-0.892662879169897, 0.71279328759091, 1.0057858458384, 264.339042830507,
0.00249547211662034, 1.07669071647814, -0.0497100437361629,
0.0349748727487329, 0.54, -0.258917851997594, 0.721522866081739,
1.02632841888735, 258.98128252755, 0.114434455174354, 1.0305961710837,
-0.339824991804473, 0.0613682115506481, 0.55, -1.32898304047584,
0.70121109120504, 1.10538894706546, 264.289548545314, 0.00299269543653001,
1.09195122599515, -0.0543447470555744, 0.0426950629004374, 1.37,
-0.255182034524808, 0.723280164168046, 1.1976778549948, 263.942262202072,
0.0259131692586968, 1.13258498678982, 0.160490129135178, 0.0925399977507914,
1.38, 0.501882152795277, 0.718380871772597, 0.891464958254635,
264.483134323259, 0.000223067552319245, 1.07748641736248, -0.014882056274052,
0.0407250818906758, 1.39, -0.0699815779908251, 0.720003379348952,
0.821984466864253, 264.477358812021, 0.000102824886037142, 1.06658971138016,
0.0100839170365109, 0.0284390778273268, 1.4, 0.059315383267866,
0.720746474038474, 0.892556075718011, 264.221965135642, 0.00399736923362332,
1.07389749687345, 0.062945400623009, 0.0350651227776714, 0.56,
0.328782282406387, 0.72074827307326, 0.996678038437571, 264.461935329477,
0.000519052043396647, 1.10766878855957, 0.0226463298088812,
0.0584995019584291, 1.41, 0.0907130585447701, 0.722588438082422,
1.06664745485891, 264.485958211608, 1.18149369745156e-05, 1.06470799045691,
-0.00340463293606924, 0.0285334915283407, 0.57, -0.0174794156711009,
0.720209798234173, 0.840484566567, 260.887242493081, 0.0458911671569587,
1.03696075095317, -0.214367307801245, 0.0404687705383334, 1.42,
-1.04339288337071, 0.70918053539942, 1.08865793243946, 262.431804838242,
0.0283445172487908, 1.04522443621758, 0.168194264235212, 0.0354266658559477,
0.58, 0.878119135080372, 0.712921459645946, 0.964495696169567))
})
test_that("Coefficients table results match", {
table <- results[["results"]][["modelContainer"]][["collection"]][["modelContainer_coeffTable"]][["data"]]
expect_equal_tables(table,
list(-0.442978799963177, -0.796105934144409, "intercept", 0.0139453388413607,
0.180170215078078, -0.0898516657819454, -2.4586683196843, 0.0627001759624519,
-0.150376029750759, "contcor2", 0.564113469510082, 0.108714347030799,
0.275776381675662, 0.576742423377559, 0.488477502169315, 0.0606088917506714,
"facGender (m)", 0.0252472258519366, 0.218304321878368, 0.916346112587959,
2.23759886183782, 0.011079948618749, -0.417996968163601, "facExperim (experimental)",
0.95963496153271, 0.218920816042528, 0.440156865401099, 0.0506116723801931
))
})
test_that("Parameter Covariances table results match", {
table <- results[["results"]][["modelContainer"]][["collection"]][["modelContainer_covMatTable"]][["data"]]
expect_equal_tables(table,
list(-0.00359216662392862, -0.0207831006432576, -0.0204871467233023,
0.0324613064012809, "intercept", 0.011818809250333, 0.00308945286134289,
0.00252843577029779, -0.00359216662392862, "contcor2", 0.00252843577029779,
-0.00715927260823057, 0.0476567769507742, -0.0204871467233023,
"facGender (m)", 0.00308945286134289, 0.0479263236967265, -0.00715927260823057,
-0.0207831006432576, "facExperim (experimental)"))
})
test_that("File Drawer Analysis table results match", {
table <- results[["results"]][["modelContainer"]][["collection"]][["modelContainer_failSafeTable"]][["data"]]
expect_equal_tables(table,
list(0.05, 208, "Rosenthal", 0.00196593842759479))
})
test_that("Fixed and Random Effects table results match", {
table <- results[["results"]][["modelContainer"]][["collection"]][["modelContainer_fixRandTable"]][["data"]]
expect_equal_tables(table,
list(3, "Omnibus test of Model Coefficients", 0.154998519713527, 5.24067551299979,
96, "Test of Residual Heterogeneity", 1.11540464463198e-17,
264.486146695969))
})
test_that("Diagnostic Plots matches", {
plotName <- results[["results"]][["modelContainer"]][["collection"]][["modelContainer_plots"]][["collection"]][["modelContainer_plots_diagnosticPlot"]][["data"]]
testPlot <- results[["state"]][["figures"]][[plotName]][["obj"]]
expect_equal_plots(testPlot, "diagnostic-plots")
})
test_that("Forest plot matches", {
plotName <- results[["results"]][["modelContainer"]][["collection"]][["modelContainer_plots"]][["collection"]][["modelContainer_plots_forest"]][["data"]]
testPlot <- results[["state"]][["figures"]][[plotName]][["obj"]]
expect_equal_plots(testPlot, "forest-plot")
})
test_that("Funnel Plot matches", {
plotName <- results[["results"]][["modelContainer"]][["collection"]][["modelContainer_plots"]][["collection"]][["modelContainer_plots_funnel"]][["data"]]
testPlot <- results[["state"]][["figures"]][[plotName]][["obj"]]
expect_equal_plots(testPlot, "funnel-plot")
})
test_that("Profile plot matches", {
plotName <- results[["results"]][["modelContainer"]][["collection"]][["modelContainer_plots"]][["collection"]][["modelContainer_plots_profile"]][["data"]]
testPlot <- results[["state"]][["figures"]][[plotName]][["obj"]]
expect_equal_plots(testPlot, "profile")
})
test_that("Trim-fill Analysis plot matches", {
plotName <- results[["results"]][["modelContainer"]][["collection"]][["modelContainer_plots"]][["collection"]][["modelContainer_plots_trimFill"]][["data"]]
testPlot <- results[["state"]][["figures"]][[plotName]][["obj"]]
expect_equal_plots(testPlot, "trim-fill-analysis")
})
test_that("Rank correlation test for Funnel plot asymmetry table results match", {
table <- results[["results"]][["modelContainer"]][["collection"]][["modelContainer_rankTestTable"]][["data"]]
expect_equal_tables(table,
list(0.00686868686868687, "Rank test", 0.921921630071705))
})
test_that("Regression test for Funnel plot asymmetry (\"Egger's test\") table results match", {
table <- results[["results"]][["modelContainer"]][["collection"]][["modelContainer_regTestTable"]][["data"]]
expect_equal_tables(table,
list("sei", 0.99470394721492, 0.00663766656825617))
})
test_that("Residual Heterogeneity Estimates table results match", {
table <- results[["results"]][["modelContainer"]][["collection"]][["modelContainer_residualTable"]][["data"]]
expect_equal_tables(table,
list(0.71060416156282, 0.429926531732936, "<unicode><unicode><unicode><unicode>",
1.0688514723806, 0.842973405015141, 0.655687831008733, "<unicode><unicode>",
1.03385273244336, 62.9258791193162, 50.663389411549, "I<unicode><unicode> (%)",
71.8546415573073, 2.69729929191932, 2.02689237884956, "H<unicode><unicode>",
3.55298370790381))
})
test_that("Analysis handles errors", {
options <- jaspTools::analysisOptions("ClassicalMetaAnalysis")
options$dependent <- "debInf"
options$wlsWeights <- "contGamma"
results <- jaspTools::runAnalysis("ClassicalMetaAnalysis", "test.csv", options)
expect_identical(results[["status"]], "validationError", label="Inf dependent check")
options$dependent <- "contNormal"
options$wlsWeights <- "debInf"
results <- jaspTools::runAnalysis("ClassicalMetaAnalysis", "test.csv", options)
expect_identical(results[["status"]], "validationError", label="Inf covariate check")
options$dependent <- "debSame"
options$wlsWeights <- "contGamma"
results <- jaspTools::runAnalysis("ClassicalMetaAnalysis", "test.csv", options)
expect_identical(results[["status"]], "validationError", label="No variance dependent check")
options$dependent <- "contNormal"
options$wlsWeights <- "debSame"
results <- jaspTools::runAnalysis("ClassicalMetaAnalysis", "test.csv", options)
expect_identical(results[["status"]], "validationError", label="No variance covariate check")
options$dependent <- "contGamma"
options$wlsWeights <- "contcor1"
results <- jaspTools::runAnalysis("ClassicalMetaAnalysis", "test.csv", options)
expect_identical(results[["status"]], "validationError", label="Negative wlsWeights check")
})
#model interaction tests
options <- jaspTools::analysisOptions("ClassicalMetaAnalysis")
options$covariates <- c("contcor1", "contcor2")
options$dependent <- "contNormal"
options$factors <- c("facGender", "facExperim")
options$forestPlot <- TRUE
options$funnelPlot <- TRUE
options$funnelPlotAsymmetryTest <- TRUE
options$method <- "Restricted ML"
options$modelTerms <- list(list(components = "contcor1"),
list(components = "contcor2"),
list(components = "facGender"),
list(components = "facExperim"),
list(components = c("contcor1", "contcor2")),
list(components = c("contcor1", "facGender")),
list(components = c("contcor1", "facExperim")),
list(components = c("contcor2", "facGender")),
list(components = c("contcor2", "facExperim")),
list(components = c("facGender", "facExperim")),
list(components = c("contcor1", "contcor2", "facGender")),
list(components = c("contcor1", "contcor2", "facExperim")),
list(components = c("contcor1", "facGender", "facExperim")),
list(components = c("contcor2", "facGender", "facExperim")),
list(components = c("contcor1", "contcor2", "facGender", "facExperim")))
options$plotResidualsCovariates <- TRUE
options$plotResidualsDependent <- TRUE
options$plotResidualsPredicted <- TRUE
options$rSquaredChange <- TRUE
options$regressionCoefficientsConfidenceIntervals <- TRUE
options$regressionCoefficientsCovarianceMatrix <- TRUE
options$residualsCasewiseDiagnostics <- TRUE
options$studyLabels <- "contBinom"
options$trimFillPlot <- TRUE
options$wlsWeights <- "contGamma"
options$regressionCoefficientsEstimates <- TRUE
options$regressionCoefficientsConfidenceIntervalsInterval <- .95
options$test <- "z"
options$modelFit <- TRUE
options$plotResidualsQQ <- TRUE
options$residualsParameters <- TRUE
set.seed(1)
results <- jaspTools::runAnalysis("ClassicalMetaAnalysis", "debug.csv", options)
test_that("Influence Measures table results match - model interactions", {
table <- results[["results"]][["modelContainer"]][["collection"]][["modelContainer_casewiseTable"]][["data"]]
expect_equal_tables(table,
list(96.3371703556965, 0.456496281316976, 1.27649506279766, -0.679623585618869,
0.341081015672735, 1.1, -0.932891346420732, 0.530801439885553,
1.76415541098568, 98.8869537626651, 0.214460469386254, 1.35263522432983,
-0.462926817652488, 0.23958627443837, 0.1, -0.824663140989147,
0.5472761775865, 0.616549638005037, 98.7836293012193, 0.155565567986494,
1.32229378356002, 0.39322940594554, 0.146463806768648, 0.2,
0.937026335276387, 0.55496517291785, 1.05589972876348, 97.0944573721634,
0.47036090850364, 1.17995854425348, 0.687099824296088, 0.242902270135589,
1.2, 1.21055566596263, 0.535756620957678, 0.879081843682491,
99.1037587864346, 0.0384890582744572, 1.45753761691511, -0.195822541019073,
0.26799274622884, 1.3, -0.328309238028867, 0.550298193961395,
1.04618666564716, 99.0600932289584, 0.0281950438082037, 1.07607572896303,
-0.167898552504149, 0.0602583977501949, 1.4, -0.663789319470744,
0.545868926535264, 0.304649254940967, 99.6340850335343, 0.000332296511019668,
1.13490611446419, -0.0151894837403314, 0.0291409526101749, 0.3,
-0.0795160239759953, 0.552954450984663, 0.706176227824763, 96.0676246521706,
1.17370837636842, 1.59630523429472, -1.0944638594596, 0.466019258615557,
0.4, -1.16564848306904, 0.531383730902773, 3.05807895984599,
93.2286892187441, 0.573328603156085, 1.35012459102312, 0.762254245606347,
0.338135650558741, 0.5, 1.06350403520061, 0.535656951495965,
2.959867508508, 99.5178604370425, 0.0106861413113273, 1.17754308322083,
-0.103314555305367, 0.125694398261395, 0.6, -0.27309444953793,
0.54734600530051, 0.246596041183609, 99.5656476788789, 0.000376930858648799,
1.02157596036324, 0.0193582616299253, 0.00697383464961719, 0.7,
0.234356427630475, 0.546133783378388, 0.124521235886435, 99.2029169040868,
0.0187709703197116, 1.06626147896566, 0.137000209120695, 0.05550260054142,
1.5, 0.565926306351903, 0.545532533930859, 0.267977363041949,
93.1206764059604, 1.12756082326341, 1.56758687351851, -1.07563719041813,
0.494783994065642, "0.8*", -1.07841341580968, 0.525739308899614,
2.75272672808099, 99.4210133508834, 0.0113983912800917, 1.42522705898144,
-0.102908578591365, 0.112785714314819, 0.9, -0.289869345809962,
0.564517470995094, 2.027867800595, 98.5682083618731, 0.0782156209637672,
1.04301541288732, -0.279715326863375, 0.09531022742894, 0.1,
-0.854726675840007, 0.540213601785798, 0.293283297704831, 99.6320747060504,
2.7301314260226e-05, 1.07241375810413, -0.00408545696844653,
0.0378942701641663, 0.11, -0.0217070833702994, 0.547522607176888,
0.177555444554951, 99.5764305970416, 0.000377291643425515, 1.12541320390989,
-0.018408970252163, 0.0519442658855141, 1.6, -0.0903339836460815,
0.55028665216978, 0.490241587976079, 99.2930161352079, 0.0349023072917978,
1.19928024172504, 0.186616240236588, 0.119173039677954, 0.12,
0.507465481758718, 0.549469587486718, 0.558730889179477, 99.0629448765626,
0.0231823435583276, 1.06093884396815, -0.152260918404121, 0.0607645834476172,
0.13, -0.59809650349968, 0.544664429761423, 0.350666598359688,
96.4290438659607, 0.157813441696117, 0.87575501943706, 0.399211433692902,
0.0843934957585663, 0.14, 1.28482463436949, 0.527023370259163,
1.42089217183844, 99.1675666352809, 0.0301524997043968, 1.0787207586954,
-0.173646414487839, 0.074982991687078, 0.15, -0.609723587615874,
0.54477661744951, 0.237620583289403, 99.5054046630201, 0.0575353508633493,
1.87797032610779, -0.239638888106549, 0.435361694233239, 1.7,
-0.273180625888596, 0.549782640953181, 0.51872897763932, 99.3648160106486,
0.00600365797825088, 1.02971149303728, -0.0774817553108188,
0.0350887856127527, 0.16, -0.404857262747201, 0.544426458933545,
0.0773724959165598, 99.6281823087033, 0.00153245562928651, 1.34148720750426,
0.0376002818488803, 0.174592237863466, 0.17, 0.0805879001195639,
0.553391048642001, 0.828910828967494, 90.9854049688813, 1.84607929505174,
0.692576063120245, 1.41016014434955, 0.326118321243338, "0.18*",
2.01549039326494, 0.483800460304176, 2.47627536653868, 99.6064601057906,
8.21473452450623e-05, 1.04376378751542, 0.00896828250344568,
0.0284529816116606, 0.19, 0.0561366069131431, 0.546101364840013,
0.0931525802044637, 99.6232033716969, 8.61723687511721e-05,
1.03307688159099, 0.00921355512581654, 0.0211176630856855, 0.2,
0.0647543248713598, 0.54587422789875, 0.0750449829897274, 99.3336204763962,
0.010441833342158, 1.07378001183462, -0.102109309820828, 0.0380977072784802,
1.8, -0.514139056120247, 0.547611262901876, 0.332331821838211,
99.3650838472059, 0.0150542507763518, 1.0954924088389, 0.122669707561274,
0.0697867134428913, 1.9, 0.449079640234325, 0.546500910494261,
0.236820858009263, 99.1691992071566, 0.0295506382675599, 1.40252880817806,
0.169252029281289, 0.131974424374896, 0.21, 0.438727710096578,
0.561360549393711, 2.07762773309507, 99.4190807387288, 0.00200972309037426,
1.01664402751083, -0.0448268232355744, 0.0115560470788022, 0.22,
-0.416181218913605, 0.545355545571563, 0.127358337416194, 99.628955328974,
3.10961250103544e-05, 1.14179238875679, -0.000174083541188705,
0.0760760994921192, 0.23, 0.000683162611278819, 0.549353910306294,
0.368996044284243, 97.9526231098694, 0.0528303796145737, 1.03042806218711,
0.229940934221611, 0.0491530451406111, 1.1, 1.01029892042986,
0.543280739497788, 1.07682287939815, 97.6069588769302, 1.14165870955828,
2.90390978455085, 1.06054080731773, 0.619264115812464, "0.24*",
0.833523219229026, 0.553677799795685, 3.48929736333834, 99.5382412888392,
0.000751888352613661, 1.44150052269179, 0.00680577412927626,
0.097148981796659, 0.25, 0.0422701215403784, 0.566901721325923,
2.03164512796304, 99.5702842214053, 0.000226186802821931, 1.01324355086194,
0.0150290453702749, 0.00688068781480822, 1.11, 0.184605842242131,
0.54546805548156, 0.080519997727041, 99.6154678514371, 0.000159218669072343,
1.05280125566202, 0.0120854086033989, 0.016460159219516, 1.12,
0.0962278279608115, 0.547820760669214, 0.247143172174384, 97.3624395764905,
0.246769069341781, 1.06990854570643, 0.497399235584502, 0.159716810423891,
1.13, 1.13567541074509, 0.536313820499895, 0.704414958003459,
99.5844606001605, 0.000596158644937112, 1.03053750418902, -0.0243491617798623,
0.0128078199099073, 1.14, -0.214764666977019, 0.546367081796924,
0.125584142308918, 99.6137061390605, 0.00122087735052475, 1.3285440426295,
-0.030424056523754, 0.0950425615740762, 0.26, -0.0656441341463135,
0.560223109068849, 1.33315006499304, 99.5938741790235, 0.00916529487491505,
1.48655149966305, -0.0913572793212016, 0.139438947431113, 0.27,
-0.222938830278564, 0.56542127182227, 1.91000376904465, 98.4679924337193,
0.310243707252932, 1.7824078069519, -0.553165595793847, 0.351239014764223,
1.15, -0.750326686301846, 0.557113577898357, 2.20884421575698,
99.6260316072155, 0.0227333257484807, 1.99881856360405, -0.145624958973444,
0.335092928387113, 0.28, -0.199012432176368, 0.5690934228972,
2.61714045841573, 99.6104176549667, 0.0664069667083704, 1.56299086477304,
-0.257085202756782, 0.266200758245468, 0.29, -0.419255877416523,
0.556098793693554, 0.33343319233723, 99.571307396626, 0.00312879417339476,
1.12560832840723, -0.0553006087573252, 0.0402050354610894, 1.16,
-0.266621211738127, 0.551319894684875, 0.564317534610873, 99.5804050824252,
0.000784011546734139, 1.02949678562434, 0.0279457294729346,
0.0123412214716801, 0.3, 0.247738184840747, 0.546323561060829,
0.105988152586636, 99.5657312750271, 0.000434468448029113, 1.24161618298609,
0.0161849866701029, 0.07860466603684, 1.17, 0.0726056038316265,
0.556071328219657, 1.03214069342237, 99.5157594730634, 0.000938046169224952,
1.03329034804321, 0.0305768954130851, 0.0140216630203405, 1.18,
0.263821452295804, 0.546485541200398, 0.215613316714348, 61.2663424767168,
2.07108967579621, 0.00429837622820491, -1.82359502687641, 0.144266789586428,
"1.19*", -4.25666643238791, 0.200595920203848, 2.85625632021547,
99.3451304550568, 0.127600874599829, 2.46149788893631, 0.350377419125357,
0.479683702359611, 0.31, 0.365208049821577, 0.566453772157831,
3.20117262754815, 99.6332261985828, 0.00332240080553162, 1.53541576191297,
-0.0461572994184263, 0.0974958672139665, 1.2, -0.12037790957634,
0.572299781511991, 2.58036083183749, 98.601263435181, 0.335043058972649,
1.59983025465177, -0.575260740429679, 0.273690112953904, 1.21,
-0.929447783901487, 0.557443050372189, 1.92659360457737, 99.1120521548583,
0.497013352753578, 2.18722634696302, -0.704359414366081, 0.521106846753421,
"0.32*", -0.675415222916651, 0.548773898023907, 0.931821758340883,
99.6124302196893, 0.000991252743101212, 1.09761397512565, -0.0312961827397897,
0.058827339433405, 1.22, -0.126119252928827, 0.547628616038572,
0.246984087994497, 99.6347572447786, 0.0133261292051028, 2.02655578162516,
0.114444209599819, 0.437231266945347, 0.33, 0.128557116644964,
0.555684122393195, 0.627108142745137, 99.450090847031, 0.00406165929652207,
1.08313064810759, -0.0635945892837411, 0.0399341177025338, 1.23,
-0.316959118558204, 0.54816593100777, 0.363737283716105, 99.6227618318863,
0.00022546685059272, 1.03098118516384, 0.0148982055028093, 0.0123378249375784,
0.34, 0.130487562541352, 0.54644168341365, 0.108023507105491,
98.7751707447984, 0.0233634710322405, 1.33110838604415, 0.151793746496233,
0.146222396994535, 0.35, 0.383222346301919, 0.555524737754622,
1.60867398460893, 99.576515646394, 0.00277740549846082, 1.15350559696259,
0.0519371222937254, 0.0581162714457157, 0.36, 0.208109177859316,
0.551785169000919, 0.604194737474366, 99.4467755780838, 0.00217429014735891,
1.0435655944456, 0.0465878930117798, 0.021769297343813, 0.37,
0.319604011503614, 0.546649847287167, 0.286292957964169, 99.501457178244,
0.0196688491391593, 1.38877278238256, 0.139097405992478, 0.169801974960015,
0.38, 0.308358918542158, 0.55661646480645, 0.961021976172292,
99.2675530367414, 0.0550886767629268, 1.23863406309008, -0.234229824919644,
0.125409973169987, 0.39, -0.61713087103065, 0.551559255585927,
0.877392403328742, 98.5688274927965, 0.211969345798226, 1.27324322095087,
-0.460316904339828, 0.205130460495482, 1.24, -0.906166298499748,
0.545934957203253, 0.746182036887624, 98.7184626735135, 0.0186227885976608,
1.01030129180045, 0.136468538313633, 0.0239462116438353, 1.25,
0.868900116498524, 0.543806609165966, 0.264600761540305, 99.6031245851408,
0.184182623199955, 3.75170594046661, -0.422891595202222, 0.666165806714429,
"1.26*", -0.299165269922118, 0.564489288362896, 2.7823767662687,
99.6088353415526, 0.00274556962115147, 1.1117092087688, -0.052241257255033,
0.0660890200324689, 1.27, -0.1953293019361, 0.54804189068826,
0.261396118118749, 96.2738523954663, 1.0702295363524, 1.269017211009,
1.038507991662, 0.302461163622036, 1.28, 1.57986958280957, 0.534897240902579,
1.5801608043901, 99.577628702319, 0.00217589801292, 1.06483016208615,
0.0466156626925793, 0.0459610476766441, 0.4, 0.212759592416038,
0.546250087953023, 0.12315526358838, 99.6227551081177, 0.000118599841272146,
1.03521457150618, -0.0107570877833677, 0.0180991797930619, 0.41,
-0.0805669043929181, 0.546296415869489, 0.0914822540403977,
99.6340978532534, 5.58798661112545e-05, 1.02825406666424, 0.00732448248775069,
0.0129983904459344, 0.42, 0.0602817072921072, 0.54616866824168,
0.0813726569047265, 99.6341187186811, 0.000879859629407126,
1.15217560867823, -0.0284514832789621, 0.0572234014266447, 1.29,
-0.105517057545831, 0.551765966768637, 0.542213394621752, 99.4884554624027,
0.0345765111193272, 1.46051594996944, -0.182638661079046, 0.124512957796426,
0.43, -0.457011040873676, 0.565480738931074, 2.01397393247324,
96.4808338363115, 0.0403747962341611, 1.42409799123437, 0.196827752992612,
0.120502147420149, 1.3, 0.549280568660405, 0.563799107677805,
3.07480319718214, 99.5190477917848, 0.00283444533819313, 1.08533750387021,
-0.0528639822709547, 0.0229031279436326, 0.44, -0.3387394106602,
0.549790562816901, 0.436842429894619, 99.4020671628105, 0.028553521097423,
1.1688035635675, -0.168662860652799, 0.0689105137295757, 0.45,
-0.606964815814015, 0.551902620736052, 0.368926820205178, 99.6224394925908,
0.00169620299111446, 1.14930403794316, 0.040290609322695, 0.0561570021881634,
0.46, 0.158665624420202, 0.551652269916983, 0.537918874608041,
99.5562718444709, 0.00859946632947953, 1.29620680294048, -0.0917201837983387,
0.132492338870959, 1.31, -0.236470803298107, 0.554630086637897,
0.917409636023193, 99.4258231426553, 0.0551837319023138, 1.47153546305942,
0.232752676414493, 0.173672466495623, 0.47, 0.495822161529097,
0.561100605775342, 1.43633130079509, 99.5840714530907, 0.000126631711881769,
1.00454558832528, -0.0112517110705855, 0.00280527149195533,
0.48, -0.213008024940962, 0.545096151972286, 0.0263132970805883,
99.5322969859414, 0.00270699847829301, 1.05036226318412, 0.0519809735824607,
0.028037343198549, 0.49, 0.307299810087722, 0.546654014352046,
0.159977126685082, 99.1430224032566, 0.0243941270159084, 1.06560671522521,
-0.156190478133343, 0.0661916233652922, 0.5, -0.585982382063428,
0.544549221637668, 0.322384477320382, 99.4858259432433, 0.0176309640368555,
1.85820250218614, 0.132058514720384, 0.393304518579293, 0.51,
0.166387811806029, 0.554685770306653, 0.586520403982654, 78.2604881131105,
1.2272330348813, 0.0945186365202983, 1.17223525256392, 0.091318222640909,
0.52, 3.63223016187379, 0.36772976334388, 1.76390402293031,
98.7561644546767, 0.20955122456515, 1.63258819684821, -0.457310210946004,
0.361090324791359, 1.32, -0.6093051346917, 0.548420561438851,
1.12485834713176, 99.3864099059091, 0.0289596469932513, 1.66916443498145,
-0.164394672641393, 0.190009827771149, 1.33, -0.335059578148082,
0.570340948863684, 2.77391452333484, 99.3851483201073, 0.312467997025662,
2.43754376670483, 0.558749564850011, 0.567939047357311, "1.34*",
0.487276731333661, 0.54919514666715, 0.367346184690901, 99.4884399140513,
0.00346540027258657, 1.59246141303086, -0.0520237403161038,
0.188477833943534, 0.53, -0.119807121225751, 0.566382967484281,
2.14476260304108, 99.6260889556802, 4.07387879535877e-06, 1.01509644461308,
0.00187642969731744, 0.00675934204166654, 1.35, 0.0279267254927512,
0.545628315805731, 0.0582084948793067, 96.4827931880599, 0.281259163227303,
0.921411371627934, -0.532239483756629, 0.137807842274185, 1.36,
-1.30848083410423, 0.526339426225538, 0.946292346733219, 99.4909105441175,
0.00955389405271385, 1.18454386801651, -0.0973450939460735,
0.0887315260451843, 0.54, -0.314371854017346, 0.551231971433898,
0.586808053235012, 98.9898760150617, 0.0793888043864291, 1.24037357964499,
-0.281430904456412, 0.131818937883485, 0.55, -0.719614763342714,
0.550998703618177, 0.544186305993934, 99.5355907412666, 0.000836244171536608,
1.94430926571465, -0.0257193799040055, 0.416150984764389, 1.37,
-0.0282210271606256, 0.555419497495948, 0.896098693716998, 99.1947522785183,
0.101003572393597, 1.46332683826974, 0.31769128878179, 0.291669237773938,
1.38, 0.49590296062427, 0.547875112290879, 0.434729782581912,
99.5932992335663, 0.00222123852414899, 1.12416334634063, -0.0470428032565756,
0.0863565954877701, 1.39, -0.153760784650784, 0.547150976176223,
0.200998669805516, 99.4709911091794, 0.0141641891532711, 1.59059376071144,
0.116329451174651, 0.228675316214048, 1.4, 0.219516701382983,
0.561890864585373, 1.67233787175663, 99.6101282028459, 4.89511951388839e-05,
1.01185503718934, 0.00696857811593593, 0.00487016624957594,
0.56, 0.104770077072443, 0.545521670727813, 0.0652302740895396,
96.6531076025142, 0.189541618700232, 1.94610324798301, 0.433241063435495,
0.435891090911215, 1.41, 0.497378899130929, 0.552770756780389,
2.43450663477824, 98.2841315594552, 0.0785785146690848, 1.65278031635436,
-0.276631766710734, 0.261117411159917, 0.57, -0.474535581347138,
0.5616521618466, 2.62984693542172, 99.2035896033499, 0.0275132006462676,
1.08075796463495, -0.165875278301942, 0.0838397589656111, 1.42,
-0.54722102961504, 0.544143615567333, 0.211417609274386, 99.6192068072974,
0.0205765073499802, 1.39070981141705, 0.142141673161286, 0.160393878505634,
0.58, 0.314036681062748, 0.557746691131937, 0.982572824775827))
})
test_that("Coefficients table results match - model interactions", {
table <- results[["results"]][["modelContainer"]][["collection"]][["modelContainer_coeffTable"]][["data"]]
expect_equal_tables(table,
list(-0.572718566787404, -1.09946121080177, "intercept", 0.0330861402214251,
0.268751184139131, -0.0459759227730375, -2.13103644034889, -0.011344235150078,
-0.642469295738674, "contcor1", 0.971896648131824, 0.322008497509155,
0.619780825438518, -0.0352296142425729, 0.155626762050519, -0.521177801399562,
"contcor2", 0.652219592593599, 0.345314794472984, 0.8324313255006,
0.450680841196032, 0.810936822812212, -0.191857623947587, "facGender (m)",
0.112971369848264, 0.511639219032911, 1.81373126957201, 1.58497783720534,
0.264826014631206, -0.800896697966652, "facExperim (experimental)",
0.626230075708153, 0.543746066944277, 1.33054872722907, 0.487039871606732,
-0.00739141244590683, -0.49584882566078, "contcor1<unicode><unicode><unicode>contcor2",
0.976339429043278, 0.249217544269026, 0.481066000768966, -0.0296584755603239,
0.383525909051226, -0.9186749397377, "contcor1<unicode><unicode><unicode>facGender (m)",
0.56376979016118, 0.664400393770777, 1.68572675784015, 0.577251176620381,
0.275164751760064, -0.673036347314896, "contcor1<unicode><unicode><unicode>facExperim (experimental)",
0.569508895924281, 0.48378495850712, 1.22336585083502, 0.568774921422064,
-0.837947033316145, -2.60129890526181, "contcor2<unicode><unicode><unicode>facGender (m)",
0.351658397211503, 0.899685850432921, 0.92540483862952, -0.931377361234404,
-0.0547440672776936, -1.47364099110235, "contcor2<unicode><unicode><unicode>facExperim (experimental)",
0.939721752166373, 0.723940301364412, 1.36415285654697, -0.0756195879335869,
-0.956967290543106, -2.47783031693304, "facGender (m)<unicode><unicode><unicode>facExperim (experimental)",
0.217478311350627, 0.775964778816294, 0.56389573584683, -1.233261246732,
0.895878849829479, -0.299117432378454, "contcor1<unicode><unicode><unicode>contcor2<unicode><unicode><unicode>facGender (m)",
0.141732777207294, 0.609703181496127, 2.09087513203741, 1.46936882899498,
0.0123442438993629, -1.50100672553294, "contcor1<unicode><unicode><unicode>contcor2<unicode><unicode><unicode>facExperim (experimental)",
0.987244588052306, 0.772132026284057, 1.52569521333167, 0.0159872191272398,
-0.105594581512077, -1.81983877265936, "contcor1<unicode><unicode><unicode>facGender (m)<unicode><unicode><unicode>facExperim (experimental)",
0.903904491260768, 0.874630450960592, 1.60864960963521, -0.12073051126462,
0.441981212201374, -1.83832285577444, "contcor2<unicode><unicode><unicode>facGender (m)<unicode><unicode><unicode>facExperim (experimental)",
0.704026240849918, 1.16344181628299, 2.72228528017719, 0.379891117901739,
-0.591170761891612, -2.65383196042148, "contcor1<unicode><unicode><unicode>contcor2<unicode><unicode><unicode>facGender (m)<unicode><unicode><unicode>facExperim (experimental)",
0.574295124344442, 1.05239749597266, 1.47149043663825, -0.561737142243226
))
})
test_that("Parameter Covariances table results match - model interactions", {
table <- results[["results"]][["modelContainer"]][["collection"]][["modelContainer_covMatTable"]][["data"]]
expect_equal_tables(table,
list(0.0135925892343855, -0.030354391324863, 0.0303543913248631, 0.030354391324863,
-0.0303543913248631, -0.0135925892343855, -0.0135925892343855,
0.0135925892343855, 0.00155085613023708, -0.00155085613023707,
-0.00155085613023709, 0.00155085613023705, -0.0722271989761851,
-0.0722271989761851, 0.0722271989761851, 0.0722271989761851,
"intercept", 0.103689472468104, -0.0324857254108197, 0.0324857254108198,
0.0324857254108198, -0.0324857254108199, -0.103689472468104,
-0.103689472468104, 0.103689472468104, -0.0478536338503915,
0.0478536338503915, 0.0478536338503915, -0.0478536338503915,
-0.0135925892343854, -0.0135925892343856, 0.0135925892343855,
0.0135925892343855, "contcor1", -0.0478536338503915, -0.029279621587305,
0.0292796215873049, 0.0292796215873051, -0.0292796215873049,
0.0478536338503915, 0.0478536338503917, -0.0478536338503917,
0.119242307281919, -0.119242307281919, -0.119242307281919, 0.119242307281919,
-0.00155085613023713, -0.00155085613023705, 0.00155085613023709,
0.00155085613023708, "contcor2", -0.0135925892343856, 0.0303543913248631,
-0.0303543913248633, -0.153470866117865, 0.153470866117865,
0.0135925892343856, -0.0534785573193151, 0.0534785573193152,
-0.00155085613023705, 0.00155085613023687, 0.12474135270184,
-0.12474135270184, 0.0722271989761853, 0.261774690452607, -0.261774690452607,
-0.0722271989761851, "facGender (m)", -0.0135925892343854, 0.030354391324863,
-0.216078078653158, -0.0303543913248632, 0.216078078653158,
-0.00962318199687896, 0.0135925892343852, 0.00962318199687922,
-0.00155085613023713, -0.066597094865394, 0.00155085613023769,
0.0665970948653935, 0.29565978531737, 0.0722271989761853, -0.29565978531737,
-0.0722271989761851, "facExperim (experimental)", -0.0324857254108197,
0.0621093843714837, -0.0621093843714838, -0.0621093843714839,
0.0621093843714839, 0.0324857254108197, 0.0324857254108197,
-0.0324857254108197, -0.029279621587305, 0.029279621587305,
0.0292796215873051, -0.0292796215873051, 0.030354391324863,
0.0303543913248631, -0.0303543913248631, -0.030354391324863,
"contcor1<unicode><unicode><unicode>contcor2", -0.103689472468104, 0.0324857254108197,
-0.0324857254108197, -0.0390061038181272, 0.0390061038181271,
0.103689472468104, 0.441427883242763, -0.441427883242764, 0.0478536338503917,
-0.0478536338503913, -0.329079692471366, 0.329079692471366,
0.0135925892343852, -0.0534785573193151, 0.0534785573193155,
-0.0135925892343855, "contcor1<unicode><unicode><unicode>facGender (m)", -0.103689472468104,
0.0324857254108197, -0.086623183012083, -0.0324857254108199,
0.0866231830120831, 0.234047886077736, 0.103689472468104, -0.234047886077736,
0.0478536338503915, -0.141715989713044, -0.0478536338503914,
0.141715989713043, -0.00962318199687896, 0.0135925892343856,
0.00962318199687883, -0.0135925892343855, "contcor1<unicode><unicode><unicode>facExperim (experimental)",
0.0478536338503915, 0.0292796215873051, -0.0292796215873053,
-0.322505604097216, 0.322505604097216, -0.0478536338503914,
-0.329079692471366, 0.329079692471367, -0.119242307281919, 0.119242307281918,
0.809434629469209, -0.809434629469209, 0.00155085613023769,
0.12474135270184, -0.12474135270184, -0.00155085613023709, "contcor2<unicode><unicode><unicode>facGender (m)",
0.0478536338503915, 0.029279621587305, 0.0591596914802318, -0.0292796215873048,
-0.059159691480232, -0.141715989713044, -0.0478536338503913,
0.141715989713043, -0.119242307281919, 0.524089559939596, 0.119242307281918,
-0.524089559939596, -0.066597094865394, 0.00155085613023687,
0.0665970948653942, -0.00155085613023707, "contcor2<unicode><unicode><unicode>facExperim (experimental)",
0.0135925892343855, -0.0303543913248631, 0.216078078653158,
0.153470866117865, -0.428872615422987, 0.00962318199687883,
0.0534785573193155, -0.131087553796561, 0.00155085613023709,
0.0665970948653942, -0.12474135270184, 0.0749568287842243, -0.29565978531737,
-0.261774690452607, 0.60212133796342, 0.0722271989761851, "facGender (m)<unicode><unicode><unicode>facExperim (experimental)",
0.0324857254108198, -0.0621093843714839, 0.0621093843714841,
0.3717379695265, -0.3717379695265, -0.0324857254108199, -0.0390061038181272,
0.0390061038181273, 0.0292796215873051, -0.0292796215873048,
-0.322505604097216, 0.322505604097216, -0.0303543913248632,
-0.153470866117865, 0.153470866117865, 0.030354391324863, "contcor1<unicode><unicode><unicode>contcor2<unicode><unicode><unicode>facGender (m)",
0.0324857254108198, -0.0621093843714838, 0.596187866013523,
0.0621093843714841, -0.596187866013524, -0.086623183012083,
-0.0324857254108197, 0.0866231830120827, 0.0292796215873049,
0.0591596914802318, -0.0292796215873053, -0.0591596914802314,
-0.216078078653158, -0.0303543913248633, 0.216078078653158,
0.0303543913248631, "contcor1<unicode><unicode><unicode>contcor2<unicode><unicode><unicode>facExperim (experimental)",
0.103689472468104, -0.0324857254108197, 0.0866231830120827,
0.0390061038181273, -0.0129567418193662, -0.234047886077736,
-0.441427883242764, 0.764978425747528, -0.0478536338503917,
0.141715989713043, 0.329079692471367, -0.506301910727773, 0.00962318199687922,
0.0534785573193152, -0.131087553796561, 0.0135925892343855,
"contcor1:facGender (m)<unicode><unicode><unicode>facExperim (experimental)", -0.0478536338503915,
-0.0292796215873051, -0.0591596914802314, 0.322505604097216,
-0.219580324628296, 0.141715989713043, 0.329079692471366, -0.506301910727773,
0.119242307281919, -0.524089559939596, -0.809434629469209, 1.35359685987586,
0.0665970948653935, -0.12474135270184, 0.0749568287842243, 0.00155085613023705,
"contcor2:facGender (m)<unicode><unicode><unicode>facExperim (experimental)", -0.0324857254108199,
0.0621093843714839, -0.596187866013524, -0.3717379695265, 1.10754048952954,
0.0866231830120831, 0.0390061038181271, -0.0129567418193662,
-0.0292796215873049, -0.059159691480232, 0.322505604097216,
-0.219580324628296, 0.216078078653158, 0.153470866117865, -0.428872615422987,
-0.0303543913248631, "contcor1<unicode><unicode><unicode>contcor2<unicode><unicode><unicode>facGender (m)<unicode><unicode><unicode>facExperim (experimental)"
))
})
test_that("File Drawer Analysis table results match - model interactions", {
table <- results[["results"]][["modelContainer"]][["collection"]][["modelContainer_failSafeTable"]][["data"]]
expect_equal_tables(table,
list(0.05, 0, "Rosenthal", 0.0813278786958635))
})
test_that("Fixed and Random Effects table results match - model interactions", {
table <- results[["results"]][["modelContainer"]][["collection"]][["modelContainer_fixRandTable"]][["data"]]
expect_equal_tables(table,
list(15, "Omnibus test of Model Coefficients", 0.216907507488967, 18.9303488095121,
84, "Test of Residual Heterogeneity", 0.117156071611725, 99.6347772817611
))
})
test_that("Diagnostic Plots matches - model interactions", {
plotName <- results[["results"]][["modelContainer"]][["collection"]][["modelContainer_plots"]][["collection"]][["modelContainer_plots_diagnosticPlot"]][["data"]]
testPlot <- results[["state"]][["figures"]][[plotName]][["obj"]]
expect_equal_plots(testPlot, "diagnostic-plots-model")
})
test_that("Forest plot matches - model interactions", {
plotName <- results[["results"]][["modelContainer"]][["collection"]][["modelContainer_plots"]][["collection"]][["modelContainer_plots_forest"]][["data"]]
testPlot <- results[["state"]][["figures"]][[plotName]][["obj"]]
expect_equal_plots(testPlot, "forest-plot-model")
})
test_that("Funnel Plot matches - model interactions", {
plotName <- results[["results"]][["modelContainer"]][["collection"]][["modelContainer_plots"]][["collection"]][["modelContainer_plots_funnel"]][["data"]]
testPlot <- results[["state"]][["figures"]][[plotName]][["obj"]]
expect_equal_plots(testPlot, "funnel-plot-model")
})
test_that("Profile plot matches - model interactions", {
plotName <- results[["results"]][["modelContainer"]][["collection"]][["modelContainer_plots"]][["collection"]][["modelContainer_plots_profile"]][["data"]]
testPlot <- results[["state"]][["figures"]][[plotName]][["obj"]]
expect_equal_plots(testPlot, "profile-model")
})
test_that("Trim-fill Analysis plot matches - model interactions", {
plotName <- results[["results"]][["modelContainer"]][["collection"]][["modelContainer_plots"]][["collection"]][["modelContainer_plots_trimFill"]][["data"]]
testPlot <- results[["state"]][["figures"]][[plotName]][["obj"]]
expect_equal_plots(testPlot, "trim-fill-analysis-model")
})
test_that("Rank correlation test for Funnel plot asymmetry table results match - model interactions", {
table <- results[["results"]][["modelContainer"]][["collection"]][["modelContainer_rankTestTable"]][["data"]]
expect_equal_tables(table,
list(0.0703030303030303, "Rank test", 0.302256516349067))
})
test_that("Regression test for Funnel plot asymmetry (\"Egger's test\") table results match - model interactions", {
table <- results[["results"]][["modelContainer"]][["collection"]][["modelContainer_regTestTable"]][["data"]]
expect_equal_tables(table,
list("sei", 0.794904131826642, -0.259947679937865))
})
test_that("Residual Heterogeneity Estimates table results match - model interactions", {
table <- results[["results"]][["modelContainer"]][["collection"]][["modelContainer_residualTable"]][["data"]]
expect_equal_tables(table,
list(0.544954299586419, 0, "<unicode><unicode><unicode><unicode>",
0.24418480479452, 0.738210200137074, 0, "<unicode><unicode>",
0.494150589187668, 34.1721304558822, 0, "I<unicode><unicode> (%)",
18.8710710587465, 1.51911341947623, 1, "H<unicode><unicode>",
1.23260594346576))
})
# test the diagnostic plot without the Q-Q plot
options <- jaspTools::analysisOptions("ClassicalMetaAnalysis")
options$.meta <- list(covariates = list(containsColumn = TRUE), dependent = list(
containsColumn = TRUE), factors = list(containsColumn = TRUE),
studyLabels = list(containsColumn = TRUE), wlsWeights = list(
containsColumn = TRUE))
options$dependent <- "ES"
options$method <- "Fixed Effects"
options$plotResidualsDependent <- TRUE
options$plotResidualsQQ <- FALSE
options$regressionCoefficientsEstimates <- FALSE
options$residualsParameters <- FALSE
options$wlsWeights <- "SE"
options$regressionCoefficientsConfidenceIntervalsInterval <- .95
options$test <- "z"
options$modelFit <- TRUE
options$regressionCoefficientsCovarianceMatrix <- TRUE
options$rSquaredChange <- TRUE
options$funnelPlotAsymmetryTest <- FALSE
options$residualsCasewiseDiagnostics <- TRUE
options$plotResidualsCovariates <- FALSE
options$forestPlot <- FALSE
options$plotResidualsPredicted <- FALSE
options$trimFillPlot <- FALSE
set.seed(1)
results <- jaspTools::runAnalysis("ClassicalMetaAnalysis", "BCG Vaccine", options)
test_that("Diagnostic Plots matches without Q-Q plot", {
plotName <- results[["results"]][["modelContainer"]][["collection"]][["modelContainer_plots"]][["collection"]][["modelContainer_plots_diagnosticPlot"]][["data"]]
testPlot <- results[["state"]][["figures"]][[plotName]][["obj"]]
expect_equal_plots(testPlot, "diagnostic-plots-no-qq")
})
|
replace_inf <- function(df) {
do.call(data.frame,
lapply(df, function(j) {
replace(j, is.infinite(j), NA)
})
)
}
dynamics_ts <-
function(x) {
require(TTR)
x_std <- as.vector(scale(x))
cat("Computing trend ...\n")
ts_trend <- trend(x)
cat("Computing skewness ...\n")
ts_skew <- moments::skewness(x)
cat("Computing kurtosis ...\n")
ts_kts <- moments::kurtosis(x)
cat("acceleration .. \n")
ts_accl <- mean(EMA(x,n = 5) / EMA(x,n = 15),na.rm=TRUE)
cat("Computing mean ...\n")
ts_mean <- mean(x)
cat("Computing median ...\n")
ts_median <- median(x)
cat("Computing min ...\n")
ts_min <- min(x_std)
cat("Computing max ...\n")
ts_max <- max(x_std)
cat("Computing standard deviation ...\n")
ts_stddev <- sd(x)
cat("Computing maximum lyapunov exponent ...\n")
ts_mle <- max_lyapunov_exp(tail(x,500))
cat("Computing hurst ...\n")
ts_hurst <- HURST(x)
cat("Computing ratio sma ema ...\n")
ts_r_sema <- r_sma_ema(x)
cat("Computing serial correlation ...\n")
ts_serialcorr <- tryCatch(Box.test(x)$p.val,
error=function(e) NA)
#ts_qt1 <- quantile(x_std, .1)
#ts_qt9 <- quantile(x_std, .9)
ts_qt05 <- quantile(x_std, .05)
ts_qt95 <- quantile(x_std, .95)
ts_iqr <- IQR(x_std)
ts_dyns <-
data.frame(#ts_trend = ts_trend,
#ts_qt1=ts_qt1,
#ts_qt9=ts_qt9,
ts_qt05=ts_qt05,
ts_qt95=ts_qt95,
ts_iqr=ts_iqr,
ts_skew = ts_skew,
ts_kts = ts_kts,
#ts_median = ts_median,
#ts_min = ts_min,
#ts_max = ts_max,
ts_accl = ts_accl,
#ts_reldiff = ts_reldiff,
#ts_mean = ts_mean,
#ts_stddev = ts_stddev,
#ts_mle = ts_mle,
ts_hurst = ts_hurst,
ts_r_sema = ts_r_sema,
ts_n = length(x),
#ts_selfsim = ts_selfsim,
ts_serialcorr = ts_serialcorr)
ts_dyns <- replace_inf(ts_dyns)
#has_na <- DMwR::manyNAs(t(ts_dyns), .4)
#if (length(has_na) > 0) {
# ts_dyns <- subset(ts_dyns, select = -has_na)
#}
#ts_dyns <- tsensembler::soft.completion(ts_dyns)
#nzv_cols <- caret::nearZeroVar(ts_dyns)
#if (length(nzv_cols) > 0L) {
# ts_dyns <- subset(ts_dyns, select = -nzv_cols)
#}
rownames(ts_dyns) <- NULL
#preproc <- caret::preProcess(dStats)
#dStats <- predict(preproc, dStats)
dplyr::as_tibble(ts_dyns)
}
trend <-
function(x) {
sd(x) / sd(diff(x)[-1])
}
max_lyapunov_exp <-
function(x) {
require(nonlinearTseries)
len <- length(x)
Reduce(max,
nonlinearTseries::divergence(
nonlinearTseries::maxLyapunov(
time.series = x,
min.embedding.dim = ceiling(len / 4),
max.embedding.dim = ceiling(len / 2),
radius = ceiling(len / 6),
do.plot = FALSE
)
))
}
#' Hurst exponent
#'
#' @param x numeric vector
HURST <-
function(x) {
require(Rwave)
cwtwnoise <- DOG(x, 10, 3, 1, plot = FALSE)
mcwtwnoise <- Mod(cwtwnoise)
mcwtwnoise <- mcwtwnoise * mcwtwnoise
wspwnoise <- tfmean(mcwtwnoise, plot = FALSE)
hurst.est(wspwnoise, 1:7, 3, plot = FALSE)[[2]]
}
r_sma_ema <-
function(x) {
require(TTR)
if (length(x) > 10)
n <- 5
else
n <- 3
ts_sma <- SMA(rev(x), n = n)
ts_ema <- EMA(rev(x), n = n)
ts_sma <- ts_sma[!is.na(ts_sma)]
ts_ema <- ts_ema[!is.na(ts_ema)]
sema <- ts_sma / ts_ema
sema <- sema[!(is.infinite(sema) | is.na(sema))]
mean(ts_sma / ts_ema)
}
|
/src/dynamics.r
|
no_license
|
fabrizziosoares/performance_estimation
|
R
| false
| false
| 4,019
|
r
|
replace_inf <- function(df) {
do.call(data.frame,
lapply(df, function(j) {
replace(j, is.infinite(j), NA)
})
)
}
dynamics_ts <-
function(x) {
require(TTR)
x_std <- as.vector(scale(x))
cat("Computing trend ...\n")
ts_trend <- trend(x)
cat("Computing skewness ...\n")
ts_skew <- moments::skewness(x)
cat("Computing kurtosis ...\n")
ts_kts <- moments::kurtosis(x)
cat("acceleration .. \n")
ts_accl <- mean(EMA(x,n = 5) / EMA(x,n = 15),na.rm=TRUE)
cat("Computing mean ...\n")
ts_mean <- mean(x)
cat("Computing median ...\n")
ts_median <- median(x)
cat("Computing min ...\n")
ts_min <- min(x_std)
cat("Computing max ...\n")
ts_max <- max(x_std)
cat("Computing standard deviation ...\n")
ts_stddev <- sd(x)
cat("Computing maximum lyapunov exponent ...\n")
ts_mle <- max_lyapunov_exp(tail(x,500))
cat("Computing hurst ...\n")
ts_hurst <- HURST(x)
cat("Computing ratio sma ema ...\n")
ts_r_sema <- r_sma_ema(x)
cat("Computing serial correlation ...\n")
ts_serialcorr <- tryCatch(Box.test(x)$p.val,
error=function(e) NA)
#ts_qt1 <- quantile(x_std, .1)
#ts_qt9 <- quantile(x_std, .9)
ts_qt05 <- quantile(x_std, .05)
ts_qt95 <- quantile(x_std, .95)
ts_iqr <- IQR(x_std)
ts_dyns <-
data.frame(#ts_trend = ts_trend,
#ts_qt1=ts_qt1,
#ts_qt9=ts_qt9,
ts_qt05=ts_qt05,
ts_qt95=ts_qt95,
ts_iqr=ts_iqr,
ts_skew = ts_skew,
ts_kts = ts_kts,
#ts_median = ts_median,
#ts_min = ts_min,
#ts_max = ts_max,
ts_accl = ts_accl,
#ts_reldiff = ts_reldiff,
#ts_mean = ts_mean,
#ts_stddev = ts_stddev,
#ts_mle = ts_mle,
ts_hurst = ts_hurst,
ts_r_sema = ts_r_sema,
ts_n = length(x),
#ts_selfsim = ts_selfsim,
ts_serialcorr = ts_serialcorr)
ts_dyns <- replace_inf(ts_dyns)
#has_na <- DMwR::manyNAs(t(ts_dyns), .4)
#if (length(has_na) > 0) {
# ts_dyns <- subset(ts_dyns, select = -has_na)
#}
#ts_dyns <- tsensembler::soft.completion(ts_dyns)
#nzv_cols <- caret::nearZeroVar(ts_dyns)
#if (length(nzv_cols) > 0L) {
# ts_dyns <- subset(ts_dyns, select = -nzv_cols)
#}
rownames(ts_dyns) <- NULL
#preproc <- caret::preProcess(dStats)
#dStats <- predict(preproc, dStats)
dplyr::as_tibble(ts_dyns)
}
trend <-
function(x) {
sd(x) / sd(diff(x)[-1])
}
max_lyapunov_exp <-
function(x) {
require(nonlinearTseries)
len <- length(x)
Reduce(max,
nonlinearTseries::divergence(
nonlinearTseries::maxLyapunov(
time.series = x,
min.embedding.dim = ceiling(len / 4),
max.embedding.dim = ceiling(len / 2),
radius = ceiling(len / 6),
do.plot = FALSE
)
))
}
#' Hurst exponent
#'
#' @param x numeric vector
HURST <-
function(x) {
require(Rwave)
cwtwnoise <- DOG(x, 10, 3, 1, plot = FALSE)
mcwtwnoise <- Mod(cwtwnoise)
mcwtwnoise <- mcwtwnoise * mcwtwnoise
wspwnoise <- tfmean(mcwtwnoise, plot = FALSE)
hurst.est(wspwnoise, 1:7, 3, plot = FALSE)[[2]]
}
r_sma_ema <-
function(x) {
require(TTR)
if (length(x) > 10)
n <- 5
else
n <- 3
ts_sma <- SMA(rev(x), n = n)
ts_ema <- EMA(rev(x), n = n)
ts_sma <- ts_sma[!is.na(ts_sma)]
ts_ema <- ts_ema[!is.na(ts_ema)]
sema <- ts_sma / ts_ema
sema <- sema[!(is.infinite(sema) | is.na(sema))]
mean(ts_sma / ts_ema)
}
|
setClass('xgb.DMatrix')
#' Get a new DMatrix containing the specified rows of
#' orginal xgb.DMatrix object
#'
#' Get a new DMatrix containing the specified rows of
#' orginal xgb.DMatrix object
#'
#' @examples
#' data(agaricus.train, package='xgboost')
#' train <- agaricus.train
#' dtrain <- xgb.DMatrix(train$data, label=train$label)
#' dsub <- slice(dtrain, 1:3)
#' @rdname slice
#' @export
#'
slice <- function(object, ...){
UseMethod("slice")
}
#' @param object Object of class "xgb.DMatrix"
#' @param idxset a integer vector of indices of rows needed
#' @param ... other parameters
#' @rdname slice
#' @method slice xgb.DMatrix
setMethod("slice", signature = "xgb.DMatrix",
definition = function(object, idxset, ...) {
if (class(object) != "xgb.DMatrix") {
stop("slice: first argument dtrain must be xgb.DMatrix")
}
ret <- .Call("XGDMatrixSliceDMatrix_R", object, idxset,
PACKAGE = "xgboost")
attr_list <- attributes(object)
nr <- xgb.numrow(object)
len <- sapply(attr_list,length)
ind <- which(len == nr)
if (length(ind) > 0) {
nms <- names(attr_list)[ind]
for (i in 1:length(ind)) {
attr(ret,nms[i]) <- attr(object,nms[i])[idxset]
}
}
return(structure(ret, class = "xgb.DMatrix"))
})
|
/src/external/xgboost/R-package/R/slice.xgb.DMatrix.R
|
permissive
|
Sitispeaks/turicreate
|
R
| false
| false
| 1,481
|
r
|
setClass('xgb.DMatrix')
#' Get a new DMatrix containing the specified rows of
#' orginal xgb.DMatrix object
#'
#' Get a new DMatrix containing the specified rows of
#' orginal xgb.DMatrix object
#'
#' @examples
#' data(agaricus.train, package='xgboost')
#' train <- agaricus.train
#' dtrain <- xgb.DMatrix(train$data, label=train$label)
#' dsub <- slice(dtrain, 1:3)
#' @rdname slice
#' @export
#'
slice <- function(object, ...){
UseMethod("slice")
}
#' @param object Object of class "xgb.DMatrix"
#' @param idxset a integer vector of indices of rows needed
#' @param ... other parameters
#' @rdname slice
#' @method slice xgb.DMatrix
setMethod("slice", signature = "xgb.DMatrix",
definition = function(object, idxset, ...) {
if (class(object) != "xgb.DMatrix") {
stop("slice: first argument dtrain must be xgb.DMatrix")
}
ret <- .Call("XGDMatrixSliceDMatrix_R", object, idxset,
PACKAGE = "xgboost")
attr_list <- attributes(object)
nr <- xgb.numrow(object)
len <- sapply(attr_list,length)
ind <- which(len == nr)
if (length(ind) > 0) {
nms <- names(attr_list)[ind]
for (i in 1:length(ind)) {
attr(ret,nms[i]) <- attr(object,nms[i])[idxset]
}
}
return(structure(ret, class = "xgb.DMatrix"))
})
|
# Joshua Alley
# execute all files in public-goods-test
# load packages and set seeds, and manage conflicts
source("data/setup-script.R")
# simulate problems with ratio outcomes
source("data/ratio-simulation.R")
# primary analyses
source("data/analysis.R")
# analyses of weighted membership and changes
source("data/analysis-z-weight.R")
source("data/analysis-change.R")
# single-level regression results
source("data/analysis-avg-weight.R")
# simulate ML model
source("data/simulation-check.R")
|
/do-all.R
|
no_license
|
joshuaalley/public-goods-test
|
R
| false
| false
| 499
|
r
|
# Joshua Alley
# execute all files in public-goods-test
# load packages and set seeds, and manage conflicts
source("data/setup-script.R")
# simulate problems with ratio outcomes
source("data/ratio-simulation.R")
# primary analyses
source("data/analysis.R")
# analyses of weighted membership and changes
source("data/analysis-z-weight.R")
source("data/analysis-change.R")
# single-level regression results
source("data/analysis-avg-weight.R")
# simulate ML model
source("data/simulation-check.R")
|
install.packages(c("tidyverse","grid","gridExtra"), repos = "https://cloud.r-project.org")
|
/scripts/setupR.R
|
permissive
|
Kennedy-Lab-UW/Duplex-Seq-Pipeline
|
R
| false
| false
| 90
|
r
|
install.packages(c("tidyverse","grid","gridExtra"), repos = "https://cloud.r-project.org")
|
#' Make a DHS API client
#'
#' @title Make a dhs client
#'
#' @param config config object, as created using \code{read_rdhs_config}
#' @param root Character for root directory to where client, caches,
#' surveys etc. will be stored. Default = \code{rappdirs_rdhs()}
#' @param api_key Character for DHS API KEY. Default = NULL
#'
#' @template client_dhs_methods
#' @export
#'
#' @examples
#' \dontrun{
#' # create an rdhs config file at "rdhs.json"
#' conf <- set_rdhs_config(
#' config_path = "rdhs.json",global = FALSE, prompt = FALSE
#' )
#' td <- tempdir()
#' cli <- rdhs::client_dhs(api_key = "DEMO_1234", config = conf, root = td)
#' }
client_dhs <- function(config=NULL,
root=rappdirs_rdhs(),
api_key=NULL) {
# if api_key is NULL then set it to the default
if (is.null(api_key)) {
api_key <- api_key_internal
}
# we need to have a config, so we create the temp one if not provided
if (is.null(config)) {
.rdhs$internal_client_update <- FALSE
config <- set_rdhs_config(prompt = FALSE)
.rdhs$internal_client_update <- TRUE
}
# check rdhs against api last update time
cache_date <- client_cache_date(root = root)
if (last_api_update(config$timeout) > cache_date) {
# create new client if DHS database has been updated
client <- R6_client_dhs$new(config, api_key, root)
# If there was already a client in your root (i.e. there was a DHS update)
# then empty the api_call cache namespace and check package version
if (cache_date > 0) {
message("\nDHS API has been updated since you last set up a DHS client\n",
"in this root directory.")
message("Previous API / dataset requests will subsequently be rerun in\n",
"order to ensure your results are up to date.\n")
client$clear_namespace(namespace = "api_calls")
client$clear_namespace(namespace = "available_datasets_calls")
## clear any now old dataset calls
# ----------------------------------------------------
# fetch the dataupdates api endpoint
upd <- client$dhs_api_request(api_endpoint = "dataupdates")
# are any of the listed updates more recent than the cache date
if (max(mdy_hms(upd$UpdateDate)) > cache_date) {
## check which datasets have been downloaded in the past
# first grab the private for readability
private <- client$.__enclos_env__$private
# get the surveyIds for all downloaded datasets
downloaded_dataset_keys <- private$storr$list("downloaded_datasets")
downloaded_surveyIds <- strsplit(downloaded_dataset_keys, "_") %>%
lapply(function(x) x[1]) %>%
unlist()
# which of the updates have occured since our last client was created
chge <- upd$SurveyId[mdy_hms(upd$UpdateDate) > client$get_cache_date()]
datasets_to_clear <- which(downloaded_surveyIds %in% chge)
# do any of them match those that have been updated since the
# last cache_date
if (length(datasets_to_clear) > 0) {
for (key in downloaded_dataset_keys[datasets_to_clear]) {
private$storr$del(key, "downloaded_datasets")
private$storr$del(key, "downloaded_datasets_variable_names")
}
}
}
}
return(client)
# if no api updates have occurred then get the cached api client
} else {
# create client in the location rather than readRDS so that we
# are using new config etc
client <- R6_client_dhs$new(config, api_key, root)
# load cached client
private <- client$.__enclos_env__$private
# spit out a message to say if the client is being updated from
# a previous rdhs version
if (packageVersion("rdhs") != private$package_version) {
message("New version of rdhs detected.",
"Your saved client will be updated.")
}
return(client)
}
}
R6_client_dhs <- R6::R6Class(
classname = "client_dhs",
cloneable = FALSE,
# PUBLIC METHODS
public = list(
# INITIALISATION
initialize = function(config,
api_key,
root = rappdirs_rdhs()) {
if (!inherits(config, "rdhs_config")) {
stop ("config provided for client is not of class rdhs_config")
}
private$api_key <- api_key
private$root <- root
private$storr <- storr::storr_rds(file.path(root, "db"))
private$cache_date <- Sys.time()
saveRDS(self, file.path(root, client_file_name())) # save before config
private$config <- config
},
# API REQUESTS
# will either return your request as a parsed json (having cached the
# result), or will return an error
dhs_api_request = function(api_endpoint,
query = list(),
api_key = private$api_key,
num_results = 100,
just_results = TRUE) {
# Check api_endpoints first
if (!is.element(api_endpoint, private$api_endpoints)) {
stop(paste("Requested api_endpoint is invalid. Must be one one of:",
paste(api_endpoints, collapse = "\n"),
"For more information check the api website:",
"https://api.dhsprogram.com/#/index.html",
sep = "\n"))
}
# Collapse query list
query_param_lengths <- lapply(query, length) %>% unlist()
# collapse where lengths are greater than 1
for (i in which(query_param_lengths > 1)) {
query[[i]] <- paste0(query[[i]], collapse = ",")
}
# add the api key if it exists
query$apiKey <- private$api_key
# then build in pages. If they have asked for all results
# catch this and standardise
if (is.element(num_results, c("ALL", "all", "a", "al", "aL", "Al",
"A", "AL", "ALl", "All", "AlL", "aLL",
"aLl", "alL"))) {
query$perPage <- 100
num_results <- "ALL"
} else {
query$perPage <- num_results
}
# Build url query and associated key
url <- httr::modify_url(paste0(private$url, api_endpoint), query = query)
# SLight hacking to deal eith page numbers etc now
pp <- which(names(query) == "perPage")
key <- paste0(api_endpoint, "_",
paste0(
names(query)[-pp], unlist(query)[-pp], collapse = ","
),
",num_results", num_results, ",just_results", just_results)
key <- digest::digest(key)
# first check against cache
out <- tryCatch(private$storr$get(key, "api_calls"),
KeyError = function(e) NULL)
# check out agianst cache, if fine then return just that
if (!is.null(out)) {
return(out)
} else {
# Get request
resp <- httr::GET(url, httr::accept_json(),
httr::user_agent("https://github.com/ropensci/rdhs"),
encode = "json")
## pass to response parse
parsed_resp <- handle_api_response(resp, TRUE)
if (resp$status_code >= 400 && resp$status_code < 600) {
return(parsed_resp)
}
# put some message or return to let the user know if the data
# returned is empty
if (parsed_resp$RecordsReturned == 0) {
message("Records returned equal to 0. Most likely your query
terms are too specific or there is a typo that does not
trigger a 404 or 500 error")
}
# Now let's address the num_results argument
# if the first call has caught all the results then great
if (parsed_resp$RecordsReturned == parsed_resp$RecordCount) {
if (just_results) {
parsed_resp <- rbind_list_base(parsed_resp$Data)
}
} else {
# if not then query with either max possible or their requested amount
if (num_results == "ALL") {
query$perPage <- 5000
} else {
query$perPage <- num_results
}
# Create new request and parse this
url <- httr::modify_url(paste0(private$url, api_endpoint),
query = query)
resp <- httr::GET(
url, httr::accept_json(),
httr::user_agent("https://github.com/ropensci/rdhs"),
encode = "json"
)
parsed_resp <- handle_api_response(resp, TRUE)
# if this larger page query has returned all the results then
# return this else we will loop through
if (parsed_resp$TotalPages == 1) {
if (just_results) {
parsed_resp <- rbind_list_base(parsed_resp$Data)
}
} else {
# save the resp as a temp and then make parsed_resp the list we
# will loop requests into
temp_parsed_resp <- parsed_resp
parsed_resp <- list()
length(parsed_resp) <- temp_parsed_resp$TotalPages
parsed_resp[[1]] <- temp_parsed_resp
for (i in 2:length(parsed_resp)) {
query$page <- i
url <- httr::modify_url(paste0(private$url, api_endpoint),
query = query)
resp <- httr::GET(
url, httr::accept_json(),
httr::user_agent("https://github.com/ropensci/rdhs"),
encode = "json"
)
temp_parsed_resp <- handle_api_response(resp, TRUE)
parsed_resp[[i]] <- temp_parsed_resp
}
# and now concatenate the results
if (just_results) {
parsed_resp <- collapse_api_responses(parsed_resp)
}
}
}
## then cache the resp if we haven't stopped already and
# return the parsed resp
private$storr$set(key, parsed_resp, "api_calls")
return(parsed_resp)
}
},
# AVAILABLE DATASETS
# Creates data.frame of available datasets using \code{available_datasets}
# and caches it
available_datasets = function(clear_cache_first = FALSE) {
# check config are good
if (config_not_present(private$config)) {
handle_config(private$config$config_path)
}
# clear the cache for this if set to do so. This is only included
# here if the user has recently had a change to the datasets
# they have been allowed to access and want to ensure they
# are accessing their new available datasets
if (clear_cache_first) {
self$clear_namespace(namespace = "available_datasets_calls")
}
# create key for this
key <- digest::digest(paste0(private$config$project, ","))
# first check against cache
out <- tryCatch(private$storr$get(key, "available_datasets_calls"),
KeyError = function(e) NULL
)
# check out agianst cache, if fine then return just that
if (!is.null(out)) {
return(out)
} else {
# Get downloadable datasets
resp <- available_datasets(
config = private$config,
datasets_api_results = self$dhs_api_request("datasets",
num_results = "ALL"),
surveys_api_results = self$dhs_api_request("surveys",
num_results = "ALL")
)
## then cache the resp if we haven't stopped already and return
## the parsed resp
private$storr$set(key, resp, "available_datasets_calls")
return(resp)
}
},
# GET DATASETS
# Gets datasets provided, either by downloading or retrieving from the cache
get_datasets = function(dataset_filenames,
download_option="rds",
reformat=FALSE,
all_lower=TRUE,
output_dir_root=file.path(private$root, "datasets"),
clear_cache = FALSE,
...) {
# check config are good
if (config_not_present(private$config)) {
handle_config(private$config$config_path)
}
# if cache needs clearing
if (clear_cache){
avs <- self$available_datasets(TRUE)
}
# fetch which datasets you can download from your login
datasets <- private$check_available_datasets(dataset_filenames)
# results storage
res <- list()
# possible download options:
download_possibilities <- c("zip", "rds", "both")
dopt <- grep(paste0(strsplit(download_option, "") %>%
unlist(), collapse = "|"),
download_possibilities, ignore.case = TRUE)
download_option <- download_possibilities[dopt]
if (!is.element(download_option, download_possibilities)) {
stop("Download option provided is not valid")
}
# handle for more than one dataset specified
download_iterations <- length(res) <- dim(datasets)[1]
names(res) <- datasets$file
# iterate through download requests
for (i in 1:download_iterations) {
# if no url then place error message in results list
if (is.na(datasets$URLS[i])) {
res[[i]] <- "Dataset is not available with your DHS login credentials"
} else {
# create key for this
key <- paste0(datasets[i, ]$SurveyId, "_", datasets[i, ]$FileName,
"_", download_option, "_", reformat)
# first check against cache
out <- tryCatch(private$storr$get(key, "downloaded_datasets"),
KeyError = function(e) NULL)
# check out agianst cache, if fine then return just that
if (!is.null(out)) {
res[[i]] <- out
} else {
# Download dataset
resp <- download_datasets(
config = private$config,
desired_dataset = datasets[i, ],
output_dir_root = output_dir_root,
download_option = download_option,
all_lower = all_lower,
reformat = reformat,
...
)
# if there were 2 results returned with these names then we cache
# them into different namespace the reason for this is it's really
# helpful to have the questions in each dataset quickly accessible
# without having to load the dataset each time. And we cache the
# dataset path rather than the full dataset so that people can more
# quickly jump and grab a dataset from the rds iin the datasets
# directory rather than having to go into the db directory
if (identical(names(resp), c("dataset", "variable_names"))) {
private$storr$set(key, resp$dataset,
"downloaded_datasets")
private$storr$set(key, resp$variable_names,
"downloaded_dataset_variable_names")
res[[i]] <- resp$dataset
} else if (grepl("No support for reading in", resp)) {
res[[i]] <- resp
} else {
## then cache the resp and store it in the results list
private$storr$set(key, resp, "downloaded_datasets")
res[[i]] <- resp
}
}
}
}
# add the reformat as an attribute to make life easier is
# in survey questions/variables
attr(res, which = "reformat") <- reformat
return(res)
},
# SURVEY_QUESTIONS
# Creates data.frame of all survey variables and descriptions, with an
# option to filter by search terms
survey_questions = function(dataset_filenames,
search_terms = NULL,
essential_terms = NULL,
regex = NULL,
rm_na = TRUE,
...) {
# check config are good
if (config_not_present(private$config)) {
handle_config(private$config$config_path)
}
# download any datasets that need to be downloaded
download <- self$get_datasets(dataset_filenames, ...)
# fetch which datasets you can download from your login
datasets <- private$check_available_datasets(dataset_filenames)
datasets <- datasets[!is.na(datasets$URLS), ]
# handle the search terms
if (is.null(regex) & is.null(search_terms)) {
stop("One of search terms or regex must not be NULL")
}
if (is.null(search_terms)) {
pattern <- paste0(regex, essential_terms, collapse = "|")
} else {
pattern <- paste0(search_terms, essential_terms, collapse = "|")
if (!is.null(regex)) {
message(paste0(
"Both regex and search_terms were provided.",
"search_terms will be used.",
"To use regex for searching, do not specify search_terms"
))
}
}
# results storage
df <- data.frame("code" = character(0), "description" = character(0),
"dataset_filename" = character(0),
"dataset_path" = character(0),
"survey_id" = character(0))
res <- list()
download_iteration <- length(res) <- dim(datasets)[1]
names(res) <- datasets$file
# iterate through downloaded surveys
for (i in 1:download_iteration) {
# create key for this
key <- paste0(datasets[i, ]$SurveyId, "_",
datasets[i, ]$FileName, "_",
"rds", "_",
attr(download, which = "reformat"))
# first check against cache
out <- tryCatch(
private$storr$get(key, "downloaded_datasets"),
KeyError = function(e) NULL
)
out_desc <- tryCatch(
private$storr$get(key, "downloaded_dataset_variable_names"),
KeyError = function(e) NULL
)
# add the survey file path to the res list
res[[i]] <- out
# match on search terms and remove questions that have na's
matched_rows <- grep(pattern = pattern, out_desc$description,
ignore.case = TRUE)
if (rm_na) {
na_from_match <- grep(private$na_s,
out_desc$description[matched_rows],
ignore.case = TRUE)
if (length(na_from_match) > 0) {
matched_rows <- matched_rows[-grep(
private$na_s,
out_desc$description[matched_rows],
ignore.case = TRUE
)]
}
}
# only add if we have found any questions that match
if (length(matched_rows) > 0) {
# add the descriptions to the df object
df <- rbind(df, data.frame(
"variable" = out_desc$variable[matched_rows],
"description" = out_desc$description[matched_rows],
"dataset_filename" = rep(names(res[i]), length(matched_rows)),
"dataset_path" = rep(res[[i]], length(matched_rows)),
"survey_id" = rep(datasets[i, ]$SurveyId, length(matched_rows)),
stringsAsFactors = FALSE
))
}
}
# now remove datasets that do not have essential terms:
if (!is.null(essential_terms)) {
if (sum(is.na(grep(essential_terms, df$description))) > 0) {
df <- df[grepl(essential_terms, df$description), ]
}
}
# Return the questions, codes and surveys data.frame
return(df)
},
# SURVEY_VARIABLES
# Creates data.frame of wanted survey variables and descriptions
survey_variables = function(dataset_filenames,
variables,
essential_variables = NULL,
rm_na = TRUE,
...) {
# check config are good
if (config_not_present(private$config)) {
handle_config(private$config$config_path)
}
# first download any datasets needed
download <- self$get_datasets(dataset_filenames, ...)
# fetch which datasets you can download from your login
datasets <- private$check_available_datasets(dataset_filenames)
datasets <- datasets[!is.na(datasets$URLS), ]
# results storage
df <- data.frame(
"code" = character(0), "description" = character(0),
"dataset_filename" = character(0), "dataset_path" = character(0),
"survey_id" = character(0)
)
res <- list()
download_iteration <- length(res) <- dim(datasets)[1]
names(res) <- datasets$file
# iterate through datasets
for (i in 1:download_iteration) {
# create key for this
key <- paste0(
datasets[i, ]$SurveyId, "_", datasets[i, ]$FileName, "_", "rds",
"_", attr(download, which = "reformat")
)
# Get description and dataset path and find the matched_rows for
# the requested variables
out_desc <- private$storr$get(key, "downloaded_dataset_variable_names")
res[[i]] <- private$storr$get(key, "downloaded_datasets")
# handle for case mismatches - we'll do this rather than allow people to
# cache agianst the case they have specified with all_lower as that is
# ridiculous memory wastage.
# if the description first variable is upper then they all are and we'll
# force the variables and essential variables to be the same for for
# matching. If not then all lower and do the same
if (is_uppercase(out_desc$variable[1])) {
variables <- toupper(variables)
if (!is.null(essential_variables)) {
essential_variables <- toupper(essential_variables)
}
} else {
variables <- tolower(variables)
if (!is.null(essential_variables)) {
essential_variables <- tolower(essential_variables)
}
}
# now let's match
matched_rows <- na.omit(match(variables, out_desc$variable))
if (rm_na) {
# remove na results
na_from_match <- grep(private$na_s,
out_desc$description[matched_rows],
ignore.case = TRUE)
if (length(na_from_match) > 0) {
matched_rows <- matched_rows[-grep(
private$na_s,
out_desc$description[matched_rows],
ignore.case = TRUE
)]
}
}
# only add if we have found any questions that match
if (length(matched_rows) > 0) {
# add the descriptions to the df object
df <- rbind(df, data.frame(
"variable" = out_desc$variable[matched_rows],
"description" = out_desc$description[matched_rows],
"dataset_filename" = rep(names(res[i]), length(matched_rows)),
"dataset_path" = rep(res[[i]], length(matched_rows)),
"survey_id" = rep(datasets[i, ]$SurveyId, length(matched_rows)),
stringsAsFactors = FALSE
))
}
}
# now remove datasets that do not have essential codes:
if (!is.null(essential_variables)) {
for (i in unique(df$dataset_filename)) {
if (sum(is.na(match(essential_variables,
df$variable[df$dataset_filename == i]))) > 0) {
df <- df[-which(df$dataset_filename == i), ]
}
}
}
# return the finished df
return(df)
},
# EXTRACTION
extract = function(questions, add_geo=FALSE) {
if (dim(questions)[1] == 0) {
stop("questions argument is empty - check your
survey_questions/variables terms?")
}
# are the questions relating to the model datasets
if (all(substr(unique(questions$dataset_filename), 1, 2) == "zz")) {
datasets <- model_datasets
} else {
datasets <- self$available_datasets()
}
# append the filename as survey to the datasets for easier matching later
datasets$Survey <- strsplit(datasets$FileName, ".", fixed = TRUE) %>%
lapply(function(x) x[1]) %>%
unlist()
## get geo_surveys if needed
if (add_geo) {
hhs_geo <- which(datasets$FileType %in% c("Geographic Data"))
snm <- match(unique(questions$dataset_filename), datasets$Survey)
ge_match <- which(datasets$SurveyNum %in% datasets$SurveyNum[snm] &
datasets$FileType == "Geographic Data")
if (sum(!is.na(ge_match)) > 0) {
geo_surveys <- self$get_datasets(
dataset_filenames = datasets$FileName[ge_match],
download_option = "rds"
)
}
}
## fetch the results
res <- extraction(questions, datasets, geo_surveys, add_geo)
return(res)
},
# GETTERS
get_cache_date = function() private$cache_date,
get_root = function() private$root,
get_config = function() private$config,
# get a dataset's var labels
get_variable_labels = function(dataset_filenames=NULL,
dataset_paths=NULL,
rm_na = FALSE) {
# catch if both null
if (is.null(dataset_filenames) && is.null(dataset_paths)) {
stop("One of dataset_filenames or dataset_paths must not be null")
}
# catch if both provided
if (!is.null(dataset_filenames) && !is.null(dataset_paths)) {
message("Both of dataset_filenames and dataset_paths are provided.
The filenames will be used")
dataset_paths <- NULL
}
# grab these now
filenames <- dhs_datasets(client = self)$FileName
filenames <- c(filenames, model_datasets$FileName)
# get vars from dataset_paths
if (!is.null(dataset_paths)) {
# stop if all poor file paths
if (all(!file.exists(dataset_paths))) {
stop(
"All dataset file paths were not found:\n ",
paste0(dataset_paths[!file.exists(dataset_paths)], sep = "\n ")
)
}
# message any poor file paths first
if (any(!file.exists(dataset_paths))) {
message(
"Following dataset file paths were not found:\n ",
paste0(dataset_paths[!file.exists(dataset_paths)], sep = "\n ")
)
}
# what have we downloaded
downs <- self$get_downloaded_datasets()
# which file paths are these
mats <- match(dataset_paths[file.exists(dataset_paths)], downs)
# what keys do these belong to and what were the downloaded options
# (so we don't download extra files)
keys <- private$storr$list("downloaded_datasets")[mats]
options <- strsplit(keys, "_") %>% lapply(function(x) x[c(2, 4)])
options <- lapply(options, function(x) {
c(grep(x[1], filenames, value = TRUE), x[2])
})
vars <- lapply(options, function(x) {
self$survey_questions(dataset_filenames = x[1],
search_terms = "",
reformat = x[2],
rm_na = FALSE)
})
vars <- rbind_labelled(vars)
}
if (!is.null(dataset_filenames)) {
# just get the ones that exist
names_matched <- filenames[match(dataset_filenames, filenames)]
# stop if all poor file names
if (all(is.na(names_matched))) {
stop("All dataset file names are not valid:\n ",
paste0(dataset_filenames[is.na(names_matched)], sep = "\n "))
}
# message any poor file names
if (any(is.na(names_matched))) {
message("Following dataset file names are not valid:\n ",
paste0(dataset_filenames[is.na(names_matched)], sep = "\n "))
}
# grab the variables using a catch all variables term
vars <- self$survey_questions(
dataset_filenames = names_matched[!is.na(names_matched)],
search_terms = "",
rm_na = FALSE
)
}
return(vars)
},
## GET_DOWNLOADED_DATASETS
# Grab all downloaded datasets
get_downloaded_datasets = function() {
# grab the keys within the namespace for this
keys <- private$storr$list("downloaded_datasets")
# download paths
downloads <- private$storr$mget(keys, namespace = "downloaded_datasets")
names(downloads) <- strsplit(
basename(unlist(downloads)), ".rds", fixed = TRUE
) %>% lapply(function(x) x[1]) %>% unlist()
return(downloads)
},
# SETTERS
set_cache_date = function(date) private$cache_date <- date,
# SAVE CLIENT
save_client = function() saveRDS(self, file.path(
private$root,
client_file_name()
)),
# CLEAR NAMESPACE
clear_namespace = function(namespace) {
private$storr$clear(namespace = namespace)
private$storr$gc()
}
),
private = list(
api_key = NULL,
root = NULL,
user_declared_root = NULL,
config = NULL,
cache_date = Sys.time(),
package_version = packageVersion("rdhs"),
url = "https://api.dhsprogram.com/rest/dhs/",
api_endpoints = c(
"data", "indicators", "countries", "surveys",
"surveycharacteristics", "publications", "datasets",
"geometry", "tags", "dataupdates", "uiupdates", "info"
),
storr = NULL,
na_s = "^na -|^na-|.*-na$|.* - na$| \\{NA\\}$|.* NA$|.*NA$",
# CHECK_AVAIALABLE_DATASETS
check_available_datasets = function(filenames) {
# catch of the filenames requested are with or without the zip
if (any(grepl("zip", filenames, ignore.case = TRUE))) {
nm_type <- "FileName"
} else {
nm_type <- "file"
}
# ammend our model_datasets first
model_datasets <- create_new_filenames(model_datasets)
# if they have only asked for model datasets then return those
if (all(filenames %in% model_datasets[[nm_type]])){
return(model_datasets[match(filenames, model_datasets[[nm_type]]), ])
}
# fetch which datasets you can download from your login
avs <- self$available_datasets()
avs <- create_new_filenames(avs)
avs <- rbind(avs, model_datasets)
# fetch all the datasets so we can catch for the India matches by
# using the country code catch
datasets <- dhs_datasets(client = self)
datasets <- rbind(datasets, model_datasets[, -c(14:15)])
# create new filename argument that takes into account the india
# difficiulties where needed
datasets <- create_new_filenames(datasets)
# find all the duplicate filenames and what datasets they belong to
duplicates <- datasets[duplicated(datasets$FileName), nm_type]
duplicate_data <- datasets[which(datasets[,nm_type] %in% duplicates), ]
# because there are duplicate filenames in the API we allow/recommend
# users to provide as the datasets argument the output of dhs_datasets
# so that we have the full info about the dataset they want. As such we
# mow may have filenames that are filenames or the entire API output so
# let's check this
if (is.vector(filenames)) {
# do their requested filenames include any of the duplicates
duplicates_fnd <- match(toupper(duplicates), toupper(filenames))
# if there are no duplicates matched then perfect
if (sum(duplicates_fnd, na.rm = TRUE) == 0) {
# what is the full set of datasets they have asked for
potential <- datasets[match(toupper(filenames),
toupper(datasets[,nm_type])), ]
# now match the requested filenames are available
found_datasets <- match(toupper(filenames), toupper(avs[,nm_type]))
} else {
# let the user know there are duplicate matches and suggest that
# they clarify using dhs_datasets()
message(paste0(
"The following requested dataset file names are used
by more than one dataset:\n---\n",
paste0(duplicates[which(!is.na(duplicates_fnd))], collapse = "\n"),
"\n---\nBy default the above datasets will be downloaded according",
"to the country code indicated by the first 2 letters of these",
"datasets. If you wished for the the above datatasets to be",
"downloaded not based on just their first 2 letters then please",
"provide the desired rows from the output of dhs_datasets() for",
"the datasets argument.",
"See introductory vignette for more info about this issue.",
collapse = "\n"
))
# unique match strings
fil_match <- paste0(toupper(substr(filenames, 1, 2)),
toupper(filenames))
dat_match <- paste0(toupper(datasets$DHS_CountryCode),
toupper(datasets[,nm_type]))
avs_match <- paste0(toupper(avs$DHS_CountryCode),
toupper(avs[,nm_type]))
# what is the full set of datasets they have asked for based on
# the countrycode assumpotion
potential <- datasets[match(fil_match, dat_match), ]
# if there are duplicates what we will do is assume that they want
# the country versions
found_datasets <- match(fil_match, avs_match)
}
} else {
# unique match strings
fil_match <- paste0(toupper(filenames$DHS_CountryCode),
toupper(filenames[,nm_type]))
dat_match <- paste0(toupper(datasets$DHS_CountryCode),
toupper(datasets[,nm_type]))
avs_match <- paste0(toupper(avs$DHS_CountryCode),
toupper(avs[,nm_type]))
# what is the full set of datasets they have asked for
potential <- datasets[match(fil_match, dat_match), ]
# if they gave the full output then we can match with the
# provided country code
found_datasets <- match(fil_match, avs_match)
}
# create the datasets data.frame that will
# then be used to download datasets
potential$URLS <- avs$URLS[found_datasets]
# let them know about any datasets that they requested that aren't
# avaialable for them to download also
if (sum(is.na(found_datasets)) > 0) {
# which filenames have failed
fail_names <- filenames
if (is.data.frame(fail_names)) {
fail_names <- filenames[,nm_type]
}
message(
paste0(
"These requested datasets are not available from your ",
"DHS login credentials:\n---\n",
paste0(fail_names[which(is.na(found_datasets))], collapse = ", "),
"\n---\nPlease request permission for these datasets from ",
"the DHS website to be able to download them"
))
}
return(potential)
}
)
)
|
/R/client.R
|
no_license
|
tXiao95/rdhs
|
R
| false
| false
| 35,476
|
r
|
#' Make a DHS API client
#'
#' @title Make a dhs client
#'
#' @param config config object, as created using \code{read_rdhs_config}
#' @param root Character for root directory to where client, caches,
#' surveys etc. will be stored. Default = \code{rappdirs_rdhs()}
#' @param api_key Character for DHS API KEY. Default = NULL
#'
#' @template client_dhs_methods
#' @export
#'
#' @examples
#' \dontrun{
#' # create an rdhs config file at "rdhs.json"
#' conf <- set_rdhs_config(
#' config_path = "rdhs.json",global = FALSE, prompt = FALSE
#' )
#' td <- tempdir()
#' cli <- rdhs::client_dhs(api_key = "DEMO_1234", config = conf, root = td)
#' }
client_dhs <- function(config=NULL,
root=rappdirs_rdhs(),
api_key=NULL) {
# if api_key is NULL then set it to the default
if (is.null(api_key)) {
api_key <- api_key_internal
}
# we need to have a config, so we create the temp one if not provided
if (is.null(config)) {
.rdhs$internal_client_update <- FALSE
config <- set_rdhs_config(prompt = FALSE)
.rdhs$internal_client_update <- TRUE
}
# check rdhs against api last update time
cache_date <- client_cache_date(root = root)
if (last_api_update(config$timeout) > cache_date) {
# create new client if DHS database has been updated
client <- R6_client_dhs$new(config, api_key, root)
# If there was already a client in your root (i.e. there was a DHS update)
# then empty the api_call cache namespace and check package version
if (cache_date > 0) {
message("\nDHS API has been updated since you last set up a DHS client\n",
"in this root directory.")
message("Previous API / dataset requests will subsequently be rerun in\n",
"order to ensure your results are up to date.\n")
client$clear_namespace(namespace = "api_calls")
client$clear_namespace(namespace = "available_datasets_calls")
## clear any now old dataset calls
# ----------------------------------------------------
# fetch the dataupdates api endpoint
upd <- client$dhs_api_request(api_endpoint = "dataupdates")
# are any of the listed updates more recent than the cache date
if (max(mdy_hms(upd$UpdateDate)) > cache_date) {
## check which datasets have been downloaded in the past
# first grab the private for readability
private <- client$.__enclos_env__$private
# get the surveyIds for all downloaded datasets
downloaded_dataset_keys <- private$storr$list("downloaded_datasets")
downloaded_surveyIds <- strsplit(downloaded_dataset_keys, "_") %>%
lapply(function(x) x[1]) %>%
unlist()
# which of the updates have occured since our last client was created
chge <- upd$SurveyId[mdy_hms(upd$UpdateDate) > client$get_cache_date()]
datasets_to_clear <- which(downloaded_surveyIds %in% chge)
# do any of them match those that have been updated since the
# last cache_date
if (length(datasets_to_clear) > 0) {
for (key in downloaded_dataset_keys[datasets_to_clear]) {
private$storr$del(key, "downloaded_datasets")
private$storr$del(key, "downloaded_datasets_variable_names")
}
}
}
}
return(client)
# if no api updates have occurred then get the cached api client
} else {
# create client in the location rather than readRDS so that we
# are using new config etc
client <- R6_client_dhs$new(config, api_key, root)
# load cached client
private <- client$.__enclos_env__$private
# spit out a message to say if the client is being updated from
# a previous rdhs version
if (packageVersion("rdhs") != private$package_version) {
message("New version of rdhs detected.",
"Your saved client will be updated.")
}
return(client)
}
}
R6_client_dhs <- R6::R6Class(
classname = "client_dhs",
cloneable = FALSE,
# PUBLIC METHODS
public = list(
# INITIALISATION
initialize = function(config,
api_key,
root = rappdirs_rdhs()) {
if (!inherits(config, "rdhs_config")) {
stop ("config provided for client is not of class rdhs_config")
}
private$api_key <- api_key
private$root <- root
private$storr <- storr::storr_rds(file.path(root, "db"))
private$cache_date <- Sys.time()
saveRDS(self, file.path(root, client_file_name())) # save before config
private$config <- config
},
# API REQUESTS
# will either return your request as a parsed json (having cached the
# result), or will return an error
dhs_api_request = function(api_endpoint,
query = list(),
api_key = private$api_key,
num_results = 100,
just_results = TRUE) {
# Check api_endpoints first
if (!is.element(api_endpoint, private$api_endpoints)) {
stop(paste("Requested api_endpoint is invalid. Must be one one of:",
paste(api_endpoints, collapse = "\n"),
"For more information check the api website:",
"https://api.dhsprogram.com/#/index.html",
sep = "\n"))
}
# Collapse query list
query_param_lengths <- lapply(query, length) %>% unlist()
# collapse where lengths are greater than 1
for (i in which(query_param_lengths > 1)) {
query[[i]] <- paste0(query[[i]], collapse = ",")
}
# add the api key if it exists
query$apiKey <- private$api_key
# then build in pages. If they have asked for all results
# catch this and standardise
if (is.element(num_results, c("ALL", "all", "a", "al", "aL", "Al",
"A", "AL", "ALl", "All", "AlL", "aLL",
"aLl", "alL"))) {
query$perPage <- 100
num_results <- "ALL"
} else {
query$perPage <- num_results
}
# Build url query and associated key
url <- httr::modify_url(paste0(private$url, api_endpoint), query = query)
# SLight hacking to deal eith page numbers etc now
pp <- which(names(query) == "perPage")
key <- paste0(api_endpoint, "_",
paste0(
names(query)[-pp], unlist(query)[-pp], collapse = ","
),
",num_results", num_results, ",just_results", just_results)
key <- digest::digest(key)
# first check against cache
out <- tryCatch(private$storr$get(key, "api_calls"),
KeyError = function(e) NULL)
# check out agianst cache, if fine then return just that
if (!is.null(out)) {
return(out)
} else {
# Get request
resp <- httr::GET(url, httr::accept_json(),
httr::user_agent("https://github.com/ropensci/rdhs"),
encode = "json")
## pass to response parse
parsed_resp <- handle_api_response(resp, TRUE)
if (resp$status_code >= 400 && resp$status_code < 600) {
return(parsed_resp)
}
# put some message or return to let the user know if the data
# returned is empty
if (parsed_resp$RecordsReturned == 0) {
message("Records returned equal to 0. Most likely your query
terms are too specific or there is a typo that does not
trigger a 404 or 500 error")
}
# Now let's address the num_results argument
# if the first call has caught all the results then great
if (parsed_resp$RecordsReturned == parsed_resp$RecordCount) {
if (just_results) {
parsed_resp <- rbind_list_base(parsed_resp$Data)
}
} else {
# if not then query with either max possible or their requested amount
if (num_results == "ALL") {
query$perPage <- 5000
} else {
query$perPage <- num_results
}
# Create new request and parse this
url <- httr::modify_url(paste0(private$url, api_endpoint),
query = query)
resp <- httr::GET(
url, httr::accept_json(),
httr::user_agent("https://github.com/ropensci/rdhs"),
encode = "json"
)
parsed_resp <- handle_api_response(resp, TRUE)
# if this larger page query has returned all the results then
# return this else we will loop through
if (parsed_resp$TotalPages == 1) {
if (just_results) {
parsed_resp <- rbind_list_base(parsed_resp$Data)
}
} else {
# save the resp as a temp and then make parsed_resp the list we
# will loop requests into
temp_parsed_resp <- parsed_resp
parsed_resp <- list()
length(parsed_resp) <- temp_parsed_resp$TotalPages
parsed_resp[[1]] <- temp_parsed_resp
for (i in 2:length(parsed_resp)) {
query$page <- i
url <- httr::modify_url(paste0(private$url, api_endpoint),
query = query)
resp <- httr::GET(
url, httr::accept_json(),
httr::user_agent("https://github.com/ropensci/rdhs"),
encode = "json"
)
temp_parsed_resp <- handle_api_response(resp, TRUE)
parsed_resp[[i]] <- temp_parsed_resp
}
# and now concatenate the results
if (just_results) {
parsed_resp <- collapse_api_responses(parsed_resp)
}
}
}
## then cache the resp if we haven't stopped already and
# return the parsed resp
private$storr$set(key, parsed_resp, "api_calls")
return(parsed_resp)
}
},
# AVAILABLE DATASETS
# Creates data.frame of available datasets using \code{available_datasets}
# and caches it
available_datasets = function(clear_cache_first = FALSE) {
# check config are good
if (config_not_present(private$config)) {
handle_config(private$config$config_path)
}
# clear the cache for this if set to do so. This is only included
# here if the user has recently had a change to the datasets
# they have been allowed to access and want to ensure they
# are accessing their new available datasets
if (clear_cache_first) {
self$clear_namespace(namespace = "available_datasets_calls")
}
# create key for this
key <- digest::digest(paste0(private$config$project, ","))
# first check against cache
out <- tryCatch(private$storr$get(key, "available_datasets_calls"),
KeyError = function(e) NULL
)
# check out agianst cache, if fine then return just that
if (!is.null(out)) {
return(out)
} else {
# Get downloadable datasets
resp <- available_datasets(
config = private$config,
datasets_api_results = self$dhs_api_request("datasets",
num_results = "ALL"),
surveys_api_results = self$dhs_api_request("surveys",
num_results = "ALL")
)
## then cache the resp if we haven't stopped already and return
## the parsed resp
private$storr$set(key, resp, "available_datasets_calls")
return(resp)
}
},
# GET DATASETS
# Gets datasets provided, either by downloading or retrieving from the cache
get_datasets = function(dataset_filenames,
download_option="rds",
reformat=FALSE,
all_lower=TRUE,
output_dir_root=file.path(private$root, "datasets"),
clear_cache = FALSE,
...) {
# check config are good
if (config_not_present(private$config)) {
handle_config(private$config$config_path)
}
# if cache needs clearing
if (clear_cache){
avs <- self$available_datasets(TRUE)
}
# fetch which datasets you can download from your login
datasets <- private$check_available_datasets(dataset_filenames)
# results storage
res <- list()
# possible download options:
download_possibilities <- c("zip", "rds", "both")
dopt <- grep(paste0(strsplit(download_option, "") %>%
unlist(), collapse = "|"),
download_possibilities, ignore.case = TRUE)
download_option <- download_possibilities[dopt]
if (!is.element(download_option, download_possibilities)) {
stop("Download option provided is not valid")
}
# handle for more than one dataset specified
download_iterations <- length(res) <- dim(datasets)[1]
names(res) <- datasets$file
# iterate through download requests
for (i in 1:download_iterations) {
# if no url then place error message in results list
if (is.na(datasets$URLS[i])) {
res[[i]] <- "Dataset is not available with your DHS login credentials"
} else {
# create key for this
key <- paste0(datasets[i, ]$SurveyId, "_", datasets[i, ]$FileName,
"_", download_option, "_", reformat)
# first check against cache
out <- tryCatch(private$storr$get(key, "downloaded_datasets"),
KeyError = function(e) NULL)
# check out agianst cache, if fine then return just that
if (!is.null(out)) {
res[[i]] <- out
} else {
# Download dataset
resp <- download_datasets(
config = private$config,
desired_dataset = datasets[i, ],
output_dir_root = output_dir_root,
download_option = download_option,
all_lower = all_lower,
reformat = reformat,
...
)
# if there were 2 results returned with these names then we cache
# them into different namespace the reason for this is it's really
# helpful to have the questions in each dataset quickly accessible
# without having to load the dataset each time. And we cache the
# dataset path rather than the full dataset so that people can more
# quickly jump and grab a dataset from the rds iin the datasets
# directory rather than having to go into the db directory
if (identical(names(resp), c("dataset", "variable_names"))) {
private$storr$set(key, resp$dataset,
"downloaded_datasets")
private$storr$set(key, resp$variable_names,
"downloaded_dataset_variable_names")
res[[i]] <- resp$dataset
} else if (grepl("No support for reading in", resp)) {
res[[i]] <- resp
} else {
## then cache the resp and store it in the results list
private$storr$set(key, resp, "downloaded_datasets")
res[[i]] <- resp
}
}
}
}
# add the reformat as an attribute to make life easier is
# in survey questions/variables
attr(res, which = "reformat") <- reformat
return(res)
},
# SURVEY_QUESTIONS
# Creates data.frame of all survey variables and descriptions, with an
# option to filter by search terms
survey_questions = function(dataset_filenames,
search_terms = NULL,
essential_terms = NULL,
regex = NULL,
rm_na = TRUE,
...) {
# check config are good
if (config_not_present(private$config)) {
handle_config(private$config$config_path)
}
# download any datasets that need to be downloaded
download <- self$get_datasets(dataset_filenames, ...)
# fetch which datasets you can download from your login
datasets <- private$check_available_datasets(dataset_filenames)
datasets <- datasets[!is.na(datasets$URLS), ]
# handle the search terms
if (is.null(regex) & is.null(search_terms)) {
stop("One of search terms or regex must not be NULL")
}
if (is.null(search_terms)) {
pattern <- paste0(regex, essential_terms, collapse = "|")
} else {
pattern <- paste0(search_terms, essential_terms, collapse = "|")
if (!is.null(regex)) {
message(paste0(
"Both regex and search_terms were provided.",
"search_terms will be used.",
"To use regex for searching, do not specify search_terms"
))
}
}
# results storage
df <- data.frame("code" = character(0), "description" = character(0),
"dataset_filename" = character(0),
"dataset_path" = character(0),
"survey_id" = character(0))
res <- list()
download_iteration <- length(res) <- dim(datasets)[1]
names(res) <- datasets$file
# iterate through downloaded surveys
for (i in 1:download_iteration) {
# create key for this
key <- paste0(datasets[i, ]$SurveyId, "_",
datasets[i, ]$FileName, "_",
"rds", "_",
attr(download, which = "reformat"))
# first check against cache
out <- tryCatch(
private$storr$get(key, "downloaded_datasets"),
KeyError = function(e) NULL
)
out_desc <- tryCatch(
private$storr$get(key, "downloaded_dataset_variable_names"),
KeyError = function(e) NULL
)
# add the survey file path to the res list
res[[i]] <- out
# match on search terms and remove questions that have na's
matched_rows <- grep(pattern = pattern, out_desc$description,
ignore.case = TRUE)
if (rm_na) {
na_from_match <- grep(private$na_s,
out_desc$description[matched_rows],
ignore.case = TRUE)
if (length(na_from_match) > 0) {
matched_rows <- matched_rows[-grep(
private$na_s,
out_desc$description[matched_rows],
ignore.case = TRUE
)]
}
}
# only add if we have found any questions that match
if (length(matched_rows) > 0) {
# add the descriptions to the df object
df <- rbind(df, data.frame(
"variable" = out_desc$variable[matched_rows],
"description" = out_desc$description[matched_rows],
"dataset_filename" = rep(names(res[i]), length(matched_rows)),
"dataset_path" = rep(res[[i]], length(matched_rows)),
"survey_id" = rep(datasets[i, ]$SurveyId, length(matched_rows)),
stringsAsFactors = FALSE
))
}
}
# now remove datasets that do not have essential terms:
if (!is.null(essential_terms)) {
if (sum(is.na(grep(essential_terms, df$description))) > 0) {
df <- df[grepl(essential_terms, df$description), ]
}
}
# Return the questions, codes and surveys data.frame
return(df)
},
# SURVEY_VARIABLES
# Creates data.frame of wanted survey variables and descriptions
survey_variables = function(dataset_filenames,
variables,
essential_variables = NULL,
rm_na = TRUE,
...) {
# check config are good
if (config_not_present(private$config)) {
handle_config(private$config$config_path)
}
# first download any datasets needed
download <- self$get_datasets(dataset_filenames, ...)
# fetch which datasets you can download from your login
datasets <- private$check_available_datasets(dataset_filenames)
datasets <- datasets[!is.na(datasets$URLS), ]
# results storage
df <- data.frame(
"code" = character(0), "description" = character(0),
"dataset_filename" = character(0), "dataset_path" = character(0),
"survey_id" = character(0)
)
res <- list()
download_iteration <- length(res) <- dim(datasets)[1]
names(res) <- datasets$file
# iterate through datasets
for (i in 1:download_iteration) {
# create key for this
key <- paste0(
datasets[i, ]$SurveyId, "_", datasets[i, ]$FileName, "_", "rds",
"_", attr(download, which = "reformat")
)
# Get description and dataset path and find the matched_rows for
# the requested variables
out_desc <- private$storr$get(key, "downloaded_dataset_variable_names")
res[[i]] <- private$storr$get(key, "downloaded_datasets")
# handle for case mismatches - we'll do this rather than allow people to
# cache agianst the case they have specified with all_lower as that is
# ridiculous memory wastage.
# if the description first variable is upper then they all are and we'll
# force the variables and essential variables to be the same for for
# matching. If not then all lower and do the same
if (is_uppercase(out_desc$variable[1])) {
variables <- toupper(variables)
if (!is.null(essential_variables)) {
essential_variables <- toupper(essential_variables)
}
} else {
variables <- tolower(variables)
if (!is.null(essential_variables)) {
essential_variables <- tolower(essential_variables)
}
}
# now let's match
matched_rows <- na.omit(match(variables, out_desc$variable))
if (rm_na) {
# remove na results
na_from_match <- grep(private$na_s,
out_desc$description[matched_rows],
ignore.case = TRUE)
if (length(na_from_match) > 0) {
matched_rows <- matched_rows[-grep(
private$na_s,
out_desc$description[matched_rows],
ignore.case = TRUE
)]
}
}
# only add if we have found any questions that match
if (length(matched_rows) > 0) {
# add the descriptions to the df object
df <- rbind(df, data.frame(
"variable" = out_desc$variable[matched_rows],
"description" = out_desc$description[matched_rows],
"dataset_filename" = rep(names(res[i]), length(matched_rows)),
"dataset_path" = rep(res[[i]], length(matched_rows)),
"survey_id" = rep(datasets[i, ]$SurveyId, length(matched_rows)),
stringsAsFactors = FALSE
))
}
}
# now remove datasets that do not have essential codes:
if (!is.null(essential_variables)) {
for (i in unique(df$dataset_filename)) {
if (sum(is.na(match(essential_variables,
df$variable[df$dataset_filename == i]))) > 0) {
df <- df[-which(df$dataset_filename == i), ]
}
}
}
# return the finished df
return(df)
},
# EXTRACTION
extract = function(questions, add_geo=FALSE) {
if (dim(questions)[1] == 0) {
stop("questions argument is empty - check your
survey_questions/variables terms?")
}
# are the questions relating to the model datasets
if (all(substr(unique(questions$dataset_filename), 1, 2) == "zz")) {
datasets <- model_datasets
} else {
datasets <- self$available_datasets()
}
# append the filename as survey to the datasets for easier matching later
datasets$Survey <- strsplit(datasets$FileName, ".", fixed = TRUE) %>%
lapply(function(x) x[1]) %>%
unlist()
## get geo_surveys if needed
if (add_geo) {
hhs_geo <- which(datasets$FileType %in% c("Geographic Data"))
snm <- match(unique(questions$dataset_filename), datasets$Survey)
ge_match <- which(datasets$SurveyNum %in% datasets$SurveyNum[snm] &
datasets$FileType == "Geographic Data")
if (sum(!is.na(ge_match)) > 0) {
geo_surveys <- self$get_datasets(
dataset_filenames = datasets$FileName[ge_match],
download_option = "rds"
)
}
}
## fetch the results
res <- extraction(questions, datasets, geo_surveys, add_geo)
return(res)
},
# GETTERS
get_cache_date = function() private$cache_date,
get_root = function() private$root,
get_config = function() private$config,
# get a dataset's var labels
get_variable_labels = function(dataset_filenames=NULL,
dataset_paths=NULL,
rm_na = FALSE) {
# catch if both null
if (is.null(dataset_filenames) && is.null(dataset_paths)) {
stop("One of dataset_filenames or dataset_paths must not be null")
}
# catch if both provided
if (!is.null(dataset_filenames) && !is.null(dataset_paths)) {
message("Both of dataset_filenames and dataset_paths are provided.
The filenames will be used")
dataset_paths <- NULL
}
# grab these now
filenames <- dhs_datasets(client = self)$FileName
filenames <- c(filenames, model_datasets$FileName)
# get vars from dataset_paths
if (!is.null(dataset_paths)) {
# stop if all poor file paths
if (all(!file.exists(dataset_paths))) {
stop(
"All dataset file paths were not found:\n ",
paste0(dataset_paths[!file.exists(dataset_paths)], sep = "\n ")
)
}
# message any poor file paths first
if (any(!file.exists(dataset_paths))) {
message(
"Following dataset file paths were not found:\n ",
paste0(dataset_paths[!file.exists(dataset_paths)], sep = "\n ")
)
}
# what have we downloaded
downs <- self$get_downloaded_datasets()
# which file paths are these
mats <- match(dataset_paths[file.exists(dataset_paths)], downs)
# what keys do these belong to and what were the downloaded options
# (so we don't download extra files)
keys <- private$storr$list("downloaded_datasets")[mats]
options <- strsplit(keys, "_") %>% lapply(function(x) x[c(2, 4)])
options <- lapply(options, function(x) {
c(grep(x[1], filenames, value = TRUE), x[2])
})
vars <- lapply(options, function(x) {
self$survey_questions(dataset_filenames = x[1],
search_terms = "",
reformat = x[2],
rm_na = FALSE)
})
vars <- rbind_labelled(vars)
}
if (!is.null(dataset_filenames)) {
# just get the ones that exist
names_matched <- filenames[match(dataset_filenames, filenames)]
# stop if all poor file names
if (all(is.na(names_matched))) {
stop("All dataset file names are not valid:\n ",
paste0(dataset_filenames[is.na(names_matched)], sep = "\n "))
}
# message any poor file names
if (any(is.na(names_matched))) {
message("Following dataset file names are not valid:\n ",
paste0(dataset_filenames[is.na(names_matched)], sep = "\n "))
}
# grab the variables using a catch all variables term
vars <- self$survey_questions(
dataset_filenames = names_matched[!is.na(names_matched)],
search_terms = "",
rm_na = FALSE
)
}
return(vars)
},
## GET_DOWNLOADED_DATASETS
# Grab all downloaded datasets
get_downloaded_datasets = function() {
# grab the keys within the namespace for this
keys <- private$storr$list("downloaded_datasets")
# download paths
downloads <- private$storr$mget(keys, namespace = "downloaded_datasets")
names(downloads) <- strsplit(
basename(unlist(downloads)), ".rds", fixed = TRUE
) %>% lapply(function(x) x[1]) %>% unlist()
return(downloads)
},
# SETTERS
set_cache_date = function(date) private$cache_date <- date,
# SAVE CLIENT
save_client = function() saveRDS(self, file.path(
private$root,
client_file_name()
)),
# CLEAR NAMESPACE
clear_namespace = function(namespace) {
private$storr$clear(namespace = namespace)
private$storr$gc()
}
),
private = list(
api_key = NULL,
root = NULL,
user_declared_root = NULL,
config = NULL,
cache_date = Sys.time(),
package_version = packageVersion("rdhs"),
url = "https://api.dhsprogram.com/rest/dhs/",
api_endpoints = c(
"data", "indicators", "countries", "surveys",
"surveycharacteristics", "publications", "datasets",
"geometry", "tags", "dataupdates", "uiupdates", "info"
),
storr = NULL,
na_s = "^na -|^na-|.*-na$|.* - na$| \\{NA\\}$|.* NA$|.*NA$",
# CHECK_AVAIALABLE_DATASETS
check_available_datasets = function(filenames) {
# catch of the filenames requested are with or without the zip
if (any(grepl("zip", filenames, ignore.case = TRUE))) {
nm_type <- "FileName"
} else {
nm_type <- "file"
}
# ammend our model_datasets first
model_datasets <- create_new_filenames(model_datasets)
# if they have only asked for model datasets then return those
if (all(filenames %in% model_datasets[[nm_type]])){
return(model_datasets[match(filenames, model_datasets[[nm_type]]), ])
}
# fetch which datasets you can download from your login
avs <- self$available_datasets()
avs <- create_new_filenames(avs)
avs <- rbind(avs, model_datasets)
# fetch all the datasets so we can catch for the India matches by
# using the country code catch
datasets <- dhs_datasets(client = self)
datasets <- rbind(datasets, model_datasets[, -c(14:15)])
# create new filename argument that takes into account the india
# difficiulties where needed
datasets <- create_new_filenames(datasets)
# find all the duplicate filenames and what datasets they belong to
duplicates <- datasets[duplicated(datasets$FileName), nm_type]
duplicate_data <- datasets[which(datasets[,nm_type] %in% duplicates), ]
# because there are duplicate filenames in the API we allow/recommend
# users to provide as the datasets argument the output of dhs_datasets
# so that we have the full info about the dataset they want. As such we
# mow may have filenames that are filenames or the entire API output so
# let's check this
if (is.vector(filenames)) {
# do their requested filenames include any of the duplicates
duplicates_fnd <- match(toupper(duplicates), toupper(filenames))
# if there are no duplicates matched then perfect
if (sum(duplicates_fnd, na.rm = TRUE) == 0) {
# what is the full set of datasets they have asked for
potential <- datasets[match(toupper(filenames),
toupper(datasets[,nm_type])), ]
# now match the requested filenames are available
found_datasets <- match(toupper(filenames), toupper(avs[,nm_type]))
} else {
# let the user know there are duplicate matches and suggest that
# they clarify using dhs_datasets()
message(paste0(
"The following requested dataset file names are used
by more than one dataset:\n---\n",
paste0(duplicates[which(!is.na(duplicates_fnd))], collapse = "\n"),
"\n---\nBy default the above datasets will be downloaded according",
"to the country code indicated by the first 2 letters of these",
"datasets. If you wished for the the above datatasets to be",
"downloaded not based on just their first 2 letters then please",
"provide the desired rows from the output of dhs_datasets() for",
"the datasets argument.",
"See introductory vignette for more info about this issue.",
collapse = "\n"
))
# unique match strings
fil_match <- paste0(toupper(substr(filenames, 1, 2)),
toupper(filenames))
dat_match <- paste0(toupper(datasets$DHS_CountryCode),
toupper(datasets[,nm_type]))
avs_match <- paste0(toupper(avs$DHS_CountryCode),
toupper(avs[,nm_type]))
# what is the full set of datasets they have asked for based on
# the countrycode assumpotion
potential <- datasets[match(fil_match, dat_match), ]
# if there are duplicates what we will do is assume that they want
# the country versions
found_datasets <- match(fil_match, avs_match)
}
} else {
# unique match strings
fil_match <- paste0(toupper(filenames$DHS_CountryCode),
toupper(filenames[,nm_type]))
dat_match <- paste0(toupper(datasets$DHS_CountryCode),
toupper(datasets[,nm_type]))
avs_match <- paste0(toupper(avs$DHS_CountryCode),
toupper(avs[,nm_type]))
# what is the full set of datasets they have asked for
potential <- datasets[match(fil_match, dat_match), ]
# if they gave the full output then we can match with the
# provided country code
found_datasets <- match(fil_match, avs_match)
}
# create the datasets data.frame that will
# then be used to download datasets
potential$URLS <- avs$URLS[found_datasets]
# let them know about any datasets that they requested that aren't
# avaialable for them to download also
if (sum(is.na(found_datasets)) > 0) {
# which filenames have failed
fail_names <- filenames
if (is.data.frame(fail_names)) {
fail_names <- filenames[,nm_type]
}
message(
paste0(
"These requested datasets are not available from your ",
"DHS login credentials:\n---\n",
paste0(fail_names[which(is.na(found_datasets))], collapse = ", "),
"\n---\nPlease request permission for these datasets from ",
"the DHS website to be able to download them"
))
}
return(potential)
}
)
)
|
#' @keywords internal
#' @import shiny
#' @import shinydashboard
#' @importFrom sf st_linestring st_drop_geometry
#' @import mapview
#' @importFrom flexdashboard gauge gaugeSectors gaugeOutput
#' @importFrom esquisse esquisserUI esquisserServer
# The following block is used by usethis to automatically manage
# roxygen namespace tags. Modify with care!
## usethis namespace: start
## usethis namespace: end
NULL
|
/R/emdash-package.R
|
permissive
|
asiripanich/emdash
|
R
| false
| false
| 413
|
r
|
#' @keywords internal
#' @import shiny
#' @import shinydashboard
#' @importFrom sf st_linestring st_drop_geometry
#' @import mapview
#' @importFrom flexdashboard gauge gaugeSectors gaugeOutput
#' @importFrom esquisse esquisserUI esquisserServer
# The following block is used by usethis to automatically manage
# roxygen namespace tags. Modify with care!
## usethis namespace: start
## usethis namespace: end
NULL
|
library(ape)
testtree <- read.tree("2072_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="2072_0_unrooted.txt")
|
/codeml_files/newick_trees_processed/2072_0/rinput.R
|
no_license
|
DaniBoo/cyanobacteria_project
|
R
| false
| false
| 135
|
r
|
library(ape)
testtree <- read.tree("2072_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="2072_0_unrooted.txt")
|
#' @importFrom graphics par rect text
plotlodmatlist <- function(lodmatlist, times, ylab="QTL position", xlab="Time", mval=0, ...) {
nlst <- length(lodmatlist)
nlsts <- lapply(lodmatlist, function(t) nrow(t))
chrs <- lapply(lodmatlist, function(t) t[1,1])
end <- NULL
start <- NULL
num = 0
nt <- ncol(lodmatlist[[1]]) - 2
z3 <- NULL;
for(i in 1:nlst) { # i=1
lods <- as.matrix(lodmatlist[[i]][,-c(1,2)])
z3 <- rbind(z3, rbind(lods, rep(mval,nt), rep(mval,nt)))
num = num + nlsts[[i]]
end <- c(end, num + (i-1)*2 )
start <- c(start, num - nlsts[[i]] + 1 + (i-1)*2 )
}
midpt <- (start + end)/2
z3 <- t(z3)
x <- 1:(dim(z3)[1])
y <- 1:(dim(z3)[2])
if(!missing(times) && !is.null(times)) {
if(length(times) != length(x))
stop("times should have length ", length(x))
x <- times
}
par(mar=c(5.1,6.1,2.1,2.1))
image.plot(x, y, z3, yaxt="n", xlab=xlab, ylab=ylab, ...)
last <- end[nlst]
start <- c(start, last + 2)
for(i in 1:nlst)
rect(x[1]-diff(x[1:2]),end[i], x[length(x)]+diff(x[1:2]),start[i+1]-0.5, col="white", border="white")
rect(x[1]-diff(x[1:2]),max(end), x[length(x)]+diff(x[1:2]),max(start)+1, col="white", border="white")
u <- par("usr") # plot ranges [left,right, bottom,top]
width <- 0.01*diff(u[1:2])
text(u[1]-5*width, midpt, unlist(chrs) , xpd=TRUE)
for(i in seq(along=start)) {
if(i %% 2)
rect(u[1]-width, start[i], u[1]-width*2, end[i], col="gray30", xpd=TRUE)
else
rect(u[1]-width*2, start[i], u[1]-width*3, end[i], col="gray30", xpd=TRUE)
}
}
|
/R/plotlodmatlist.R
|
no_license
|
ikwak2/funqtl
|
R
| false
| false
| 1,688
|
r
|
#' @importFrom graphics par rect text
plotlodmatlist <- function(lodmatlist, times, ylab="QTL position", xlab="Time", mval=0, ...) {
nlst <- length(lodmatlist)
nlsts <- lapply(lodmatlist, function(t) nrow(t))
chrs <- lapply(lodmatlist, function(t) t[1,1])
end <- NULL
start <- NULL
num = 0
nt <- ncol(lodmatlist[[1]]) - 2
z3 <- NULL;
for(i in 1:nlst) { # i=1
lods <- as.matrix(lodmatlist[[i]][,-c(1,2)])
z3 <- rbind(z3, rbind(lods, rep(mval,nt), rep(mval,nt)))
num = num + nlsts[[i]]
end <- c(end, num + (i-1)*2 )
start <- c(start, num - nlsts[[i]] + 1 + (i-1)*2 )
}
midpt <- (start + end)/2
z3 <- t(z3)
x <- 1:(dim(z3)[1])
y <- 1:(dim(z3)[2])
if(!missing(times) && !is.null(times)) {
if(length(times) != length(x))
stop("times should have length ", length(x))
x <- times
}
par(mar=c(5.1,6.1,2.1,2.1))
image.plot(x, y, z3, yaxt="n", xlab=xlab, ylab=ylab, ...)
last <- end[nlst]
start <- c(start, last + 2)
for(i in 1:nlst)
rect(x[1]-diff(x[1:2]),end[i], x[length(x)]+diff(x[1:2]),start[i+1]-0.5, col="white", border="white")
rect(x[1]-diff(x[1:2]),max(end), x[length(x)]+diff(x[1:2]),max(start)+1, col="white", border="white")
u <- par("usr") # plot ranges [left,right, bottom,top]
width <- 0.01*diff(u[1:2])
text(u[1]-5*width, midpt, unlist(chrs) , xpd=TRUE)
for(i in seq(along=start)) {
if(i %% 2)
rect(u[1]-width, start[i], u[1]-width*2, end[i], col="gray30", xpd=TRUE)
else
rect(u[1]-width*2, start[i], u[1]-width*3, end[i], col="gray30", xpd=TRUE)
}
}
|
# PCA with prcomp()
# to get more information about prcomp
?prcomp
dim(iris)
data(iris)
# remove class variable
pca.data <- iris[,-5]
#PCA with prcomp()
pca <- prcomp(pca.data)
# importance of variables - variance captured by each principal component
#old variables: Sepal.Length, Sepal.Width, Petal.Length, Petal.Width
#new variables (principal components): PC1, PC2, PC3, PC4
summary(pca)
names(iris)
#How many principal components: scree plot
plot(pca, type = "l")
screeplot(pca)
#newdata
View(pca$x)
dim(pca$x)
pca$x[1:10,] #print out the first 10 data points of the transformed data.
#Print out the first two principal components
pca$x[,1:2]
#scatter plot of the first two principal components
pairs(pca$x[,1:2])
# principal components are linear combinations of the original variables
#For example: PC1: 0.36138659 * Sepal.Length + -0.08452251 *Sepal.Width
# + 0.85667061 * Petal.Length + 0.35828920 * Petal.Width
pca$rotation #rotation or loadings
#reduced data has captured 98% of the variance in Iris data.
# We represent Iris data as 2 dimensional data
reduced.iris <- data.frame(pca$x[,1:2],Species=iris$Species)
#observe data, old variables and new variables(principal components)
biplot(pca,scale=0)
########################################################################################
#PCA with princomp()
#princomp()
?princomp
data(iris)
pca.data <- iris[,-5]
#PCA with princomp()
pca <- princomp(pca.data)
# principal components are linear combinations of the original variables
loadings(pca)
View(pca$scores) #data after transformation
dim(iris)
#adding class variable to new data set
reduced.iris <- data.frame(pca$scores[,1:2],Species=iris$Species)
dim(reduced.iris)
head(reduced.iris)
|
/AppliedDataMining/AppliedDataMining/HW2/2.2/pca.R
|
no_license
|
keithhickman08/IUH
|
R
| false
| false
| 1,736
|
r
|
# PCA with prcomp()
# to get more information about prcomp
?prcomp
dim(iris)
data(iris)
# remove class variable
pca.data <- iris[,-5]
#PCA with prcomp()
pca <- prcomp(pca.data)
# importance of variables - variance captured by each principal component
#old variables: Sepal.Length, Sepal.Width, Petal.Length, Petal.Width
#new variables (principal components): PC1, PC2, PC3, PC4
summary(pca)
names(iris)
#How many principal components: scree plot
plot(pca, type = "l")
screeplot(pca)
#newdata
View(pca$x)
dim(pca$x)
pca$x[1:10,] #print out the first 10 data points of the transformed data.
#Print out the first two principal components
pca$x[,1:2]
#scatter plot of the first two principal components
pairs(pca$x[,1:2])
# principal components are linear combinations of the original variables
#For example: PC1: 0.36138659 * Sepal.Length + -0.08452251 *Sepal.Width
# + 0.85667061 * Petal.Length + 0.35828920 * Petal.Width
pca$rotation #rotation or loadings
#reduced data has captured 98% of the variance in Iris data.
# We represent Iris data as 2 dimensional data
reduced.iris <- data.frame(pca$x[,1:2],Species=iris$Species)
#observe data, old variables and new variables(principal components)
biplot(pca,scale=0)
########################################################################################
#PCA with princomp()
#princomp()
?princomp
data(iris)
pca.data <- iris[,-5]
#PCA with princomp()
pca <- princomp(pca.data)
# principal components are linear combinations of the original variables
loadings(pca)
View(pca$scores) #data after transformation
dim(iris)
#adding class variable to new data set
reduced.iris <- data.frame(pca$scores[,1:2],Species=iris$Species)
dim(reduced.iris)
head(reduced.iris)
|
#' Soma dois Elementos
#'
#' somar retorna a soma de dois valores
#'
#' @param x primeiro elemento
#' @param y segundo elemento
#'
#' @examples
#' x <- 1
#' y <- 2
#' somar(x,y)
#'
#' @export
somar <- function(x,y) { #### devtools::use_package('tibble')
x + y
}
|
/R/somar.R
|
no_license
|
evelinangelica/Rackaton
|
R
| false
| false
| 264
|
r
|
#' Soma dois Elementos
#'
#' somar retorna a soma de dois valores
#'
#' @param x primeiro elemento
#' @param y segundo elemento
#'
#' @examples
#' x <- 1
#' y <- 2
#' somar(x,y)
#'
#' @export
somar <- function(x,y) { #### devtools::use_package('tibble')
x + y
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ext2spy.R
\name{ext2spy}
\alias{ext2spy}
\title{Convert Spatial Extent to Polygon}
\usage{
ext2spy(x, crs = "+init=epsg:4326", as_sf = TRUE)
}
\arguments{
\item{x}{An \code{Extent} object, or any object from which an \code{Extent}
can be extracted, e.g. \code{Raster*}.}
\item{crs}{Coordinate reference system passed to \code{\link{proj4string}}.}
\item{as_sf}{\code{logical}. If \code{TRUE} (default), the returned object is
of class \code{sf} rather than \code{Spatial*}.}
}
\value{
Depending on 'as_sf', either a \code{sf} or \code{SpatialPolygons}
object.
}
\description{
Convert a spatial extent to polygons.
}
\examples{
ext = extent(c(25, 70, -5, 30))
ext2spy(ext) # 'sf' (default)
ext2spy(ext, as_sf = FALSE) # 'Spatial*'
}
\seealso{
\code{\link{extent}}.
}
\author{
Florian Detsch
}
|
/fuzzedpackages/Orcs/man/ext2spy.Rd
|
no_license
|
akhikolla/testpackages
|
R
| false
| true
| 876
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ext2spy.R
\name{ext2spy}
\alias{ext2spy}
\title{Convert Spatial Extent to Polygon}
\usage{
ext2spy(x, crs = "+init=epsg:4326", as_sf = TRUE)
}
\arguments{
\item{x}{An \code{Extent} object, or any object from which an \code{Extent}
can be extracted, e.g. \code{Raster*}.}
\item{crs}{Coordinate reference system passed to \code{\link{proj4string}}.}
\item{as_sf}{\code{logical}. If \code{TRUE} (default), the returned object is
of class \code{sf} rather than \code{Spatial*}.}
}
\value{
Depending on 'as_sf', either a \code{sf} or \code{SpatialPolygons}
object.
}
\description{
Convert a spatial extent to polygons.
}
\examples{
ext = extent(c(25, 70, -5, 30))
ext2spy(ext) # 'sf' (default)
ext2spy(ext, as_sf = FALSE) # 'Spatial*'
}
\seealso{
\code{\link{extent}}.
}
\author{
Florian Detsch
}
|
#' The Main App UI
#'
#' @return A \code{\link[shiny]{tagList}} containing the UI.
#' @keywords internal
.ui_main <- function() {
dashboard_title <- "R4DS Mentor Tool"
shiny::tagList(
# Leave this function for adding external resources. But commenting out
# until I use it to avoid bugs.
# .golem_add_external_resources(),
# List the first level UI elements here
shinydashboard::dashboardPage(
header = .ui_header(dashboard_title),
sidebar = .ui_sidebar(),
body = .ui_body(),
title = dashboard_title,
skin = "blue"
)
)
}
#' The Dashboard Header
#'
#' @return A \code{\link[shinydashboard]{dashboardHeader}}.
#' @keywords internal
.ui_header <- function(dashboard_title) {
shinydashboard::dashboardHeader(
title = dashboard_title
)
}
#' The Dashboard Sidebar
#'
#' @return A \code{\link[shinydashboard]{dashboardSidebar}}.
#' @keywords internal
.ui_sidebar <- function() {
shinydashboard::dashboardSidebar(
shiny::p(
paste(
"Eventually there will be menus here, for example to select channels",
"to include in the report. It takes a while to load the data, sorry."
),
style = "padding:4px"
)
)
}
#' The Dashboard Body
#'
#' @return A \code{\link[shinydashboard]{dashboardBody}}.
#' @keywords internal
.ui_body <- function() {
shinydashboard::dashboardBody(
DT::dataTableOutput("questions"),
shiny::actionButton("refresh", label = "Please wait...")
)
}
#' Golem Extras
#'
#' @return A \code{\link[shiny]{tags}} head element.
#' @keywords internal
.golem_add_external_resources <- function(){
shiny::addResourcePath(
'www', system.file('app/www', package = 'mentordash')
)
shiny::tags$head(
golem::activate_js(),
golem::favicon()
# Add here all the external resources
# If you have a custom.css in the inst/app/www
# Or for example, you can add shinyalert::useShinyalert() here
#tags$link(rel="stylesheet", type="text/css", href="www/custom.css")
)
}
|
/R/app_ui.R
|
permissive
|
jmbuhr/mentordash
|
R
| false
| false
| 2,017
|
r
|
#' The Main App UI
#'
#' @return A \code{\link[shiny]{tagList}} containing the UI.
#' @keywords internal
.ui_main <- function() {
dashboard_title <- "R4DS Mentor Tool"
shiny::tagList(
# Leave this function for adding external resources. But commenting out
# until I use it to avoid bugs.
# .golem_add_external_resources(),
# List the first level UI elements here
shinydashboard::dashboardPage(
header = .ui_header(dashboard_title),
sidebar = .ui_sidebar(),
body = .ui_body(),
title = dashboard_title,
skin = "blue"
)
)
}
#' The Dashboard Header
#'
#' @return A \code{\link[shinydashboard]{dashboardHeader}}.
#' @keywords internal
.ui_header <- function(dashboard_title) {
shinydashboard::dashboardHeader(
title = dashboard_title
)
}
#' The Dashboard Sidebar
#'
#' @return A \code{\link[shinydashboard]{dashboardSidebar}}.
#' @keywords internal
.ui_sidebar <- function() {
shinydashboard::dashboardSidebar(
shiny::p(
paste(
"Eventually there will be menus here, for example to select channels",
"to include in the report. It takes a while to load the data, sorry."
),
style = "padding:4px"
)
)
}
#' The Dashboard Body
#'
#' @return A \code{\link[shinydashboard]{dashboardBody}}.
#' @keywords internal
.ui_body <- function() {
shinydashboard::dashboardBody(
DT::dataTableOutput("questions"),
shiny::actionButton("refresh", label = "Please wait...")
)
}
#' Golem Extras
#'
#' @return A \code{\link[shiny]{tags}} head element.
#' @keywords internal
.golem_add_external_resources <- function(){
shiny::addResourcePath(
'www', system.file('app/www', package = 'mentordash')
)
shiny::tags$head(
golem::activate_js(),
golem::favicon()
# Add here all the external resources
# If you have a custom.css in the inst/app/www
# Or for example, you can add shinyalert::useShinyalert() here
#tags$link(rel="stylesheet", type="text/css", href="www/custom.css")
)
}
|
# Data
# URL: https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip
# Note: assumed to be located in the parent directory
# Load the household power consumption data
load_data <- function(filepath) {
# read the dataset
data <- read.table(filepath, sep=";", header=TRUE, na.strings="?")
# combine date and time columns to a new datetime column
data$datetime <- strptime(paste(data$Date, data$Time), "%d/%m/%Y %H:%M:%S")
# filter by date per instructions
daterange <- c(as.Date("2007-02-01"), as.Date("2007-02-02"))
data[as.Date(data$datetime) %in% daterange,]
}
library("graphics")
#load the data
data <- load_data("../household_power_consumption.txt")
#open png to write out the histogram
png("plot4.png", width=480, height=480, bg="white")
#setup for 4 graphs
par(mfrow=c(2, 2))
#plot the line graph
with(data, {
##upper-left plot
plot(datetime, Global_active_power, type="l", col="black", xlab="", ylab="Global Active Power")
##upper-right plot
plot(datetime, Voltage, type="l", col="black", xlab="datetime", ylab="Voltage")
##bottom-left plot
# initialize the plot
plot(datetime, Sub_metering_1, type="n", xlab="", ylab="Energy sub metering")
#select the colors
colors <- c("black", "red", "blue")
# specify the variables to plot as lines
plotvars <- c("Sub_metering_1","Sub_metering_2","Sub_metering_3")
#draw the lines each plotvars
for (i in seq_along(plotvars)) {
plotvar <- plotvars[i]
column <- data[[plotvar]]
lines(datetime, column, col=colors[i])
}
#add the legend
legend("topright",
legend=plotvars,
col=colors,
lty="solid"
)
#bottom-right plot
plot(datetime, Global_reactive_power, type="l", col="black")
}) #end with
#close the device to write the png
thedev <- dev.off()
|
/plot4.R
|
no_license
|
rfiorillo/ExData_Plotting1
|
R
| false
| false
| 1,831
|
r
|
# Data
# URL: https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip
# Note: assumed to be located in the parent directory
# Load the household power consumption data
load_data <- function(filepath) {
# read the dataset
data <- read.table(filepath, sep=";", header=TRUE, na.strings="?")
# combine date and time columns to a new datetime column
data$datetime <- strptime(paste(data$Date, data$Time), "%d/%m/%Y %H:%M:%S")
# filter by date per instructions
daterange <- c(as.Date("2007-02-01"), as.Date("2007-02-02"))
data[as.Date(data$datetime) %in% daterange,]
}
library("graphics")
#load the data
data <- load_data("../household_power_consumption.txt")
#open png to write out the histogram
png("plot4.png", width=480, height=480, bg="white")
#setup for 4 graphs
par(mfrow=c(2, 2))
#plot the line graph
with(data, {
##upper-left plot
plot(datetime, Global_active_power, type="l", col="black", xlab="", ylab="Global Active Power")
##upper-right plot
plot(datetime, Voltage, type="l", col="black", xlab="datetime", ylab="Voltage")
##bottom-left plot
# initialize the plot
plot(datetime, Sub_metering_1, type="n", xlab="", ylab="Energy sub metering")
#select the colors
colors <- c("black", "red", "blue")
# specify the variables to plot as lines
plotvars <- c("Sub_metering_1","Sub_metering_2","Sub_metering_3")
#draw the lines each plotvars
for (i in seq_along(plotvars)) {
plotvar <- plotvars[i]
column <- data[[plotvar]]
lines(datetime, column, col=colors[i])
}
#add the legend
legend("topright",
legend=plotvars,
col=colors,
lty="solid"
)
#bottom-right plot
plot(datetime, Global_reactive_power, type="l", col="black")
}) #end with
#close the device to write the png
thedev <- dev.off()
|
require(table1)
require(survival)
dat <- subset(survival::pbc, !is.na(trt)) # Exclude subjects not randomized
help(pbc) # see the description of the dataset
help(is.na) # learn the is.na function
dat$trt <- factor(dat$trt, levels=1:2, labels=c("D-penicillamine", "Placebo"))
dat$sex <- factor(dat$sex, levels=c("m", "f"), labels=c("Male", "Female"))
dat$stage <- factor(dat$stage, levels=1:4, labels=paste("Stage", 1:4))
dat$edema <- factor(dat$edema, levels=c(0, 0.5, 1),
labels=c("No edema",
"Untreated or successfully treated",
"Edema despite diuretic therapy"))
dat$spiders <- as.logical(dat$spiders)
dat$hepato <- as.logical(dat$hepato)
dat$ascites <- as.logical(dat$ascites)
label(dat$stage) <- "Histologic stage of disease"
label(dat$edema) <- "Edema status"
label(dat$spiders) <- "Blood vessel malformations in the skin"
label(dat$hepato) <- "Presence of hepatomegaly or enlarged liver"
label(dat$ascites) <- "Presence of ascites"
label(dat$platelet) <- "Platelet count (× 10<sup>9</sup> per liter)"
label(dat$protime) <- "Standardised blood clotting time"
label(dat$albumin) <- "Serum albumin (g/dL)"
label(dat$chol) <- "Serum cholesterol (mg/dL)"
label(dat$copper) <- "Urine copper (μg/day)"
label(dat$trig) <- "Triglycerides (mg/dL)"
label(dat$age) <- "Age (y)"
label(dat$sex) <- "Sex"
label(dat$alk.phos) <- "Alkaline phosphotase (U/L)"
label(dat$ast) <- "Aspartate aminotransferase (U/mL)"
label(dat$bili) <- "Serum bilirubin (mg/dL)"
table1(~ age + sex + stage + edema + spiders + hepato + ascites +
platelet + protime + albumin + alk.phos + ast + bili + chol +
copper + trig | trt, data=dat)
indexD=which(dat$trt=="D-penicillamine")
#side by side hisograms
par(mfrow=c(1,2)) #set the plotting area into a 1*2 array
hist(dat$age[indexD], breaks=10, main="Histogram of age in D-penicillamine group",
xlab="age")
hist(dat$age[-indexD], breaks=10, main="Histogram of age in placebo group", xlab="age")
dev.off() #set par to the default
#Overlapping hisograms
help(rgb)
hist(dat$age[indexD], breaks=10, main="Histogram of age", xlab="age", col=rgb(1,0,0,1/4),
xlim=range(dat$age), ylim=c(0,30))
hist(dat$age[-indexD], breaks=10, xlab="age", col=rgb(0,0,1,1/4), add=T)
legend("topright", c("D-penicillamine","placebo"), fill=c(rgb(1,0,0,1/4),rgb(0,0,1,1/4)))
#side by side density plots
par(mfrow=c(1,2))
plot(density(dat$age[indexD]), main="Density plot of age in D-penicillamine group")
plot(density(dat$age[-indexD]), main="Density plot of age in placebo group")
dev.off()
#Overlapping density plots
plot(density(dat$age[indexD]), main="Density plot of age", ylim=c(0,0.04), col="red")
lines(density(dat$age[-indexD]), col="blue")
legend("topright", c("D-penicillamine","placebo"), fill=c("red","blue"))
#boxplot
boxplot(age~trt, data=dat, main="Boxplot of age by treatment groups",
col=c(rgb(1,0,0,1/4),rgb(0,0,1,1/4)), xlab="treatment")
#barplot of counts, with stacked bars
counts=table(dat$sex,dat$trt)
counts
barplot(counts, col=c(rgb(1,0,0,1/4),rgb(0,0,1,1/4)), legend=rownames(counts),
main="Patient distribution (counts) by treatment and sex")
#barplot of proportions, with juxtaposed bars
proportions=prop.table(counts,2) # column percentages
proportions
barplot(proportions, col=c(rgb(1,0,0,1/4),rgb(0,0,1,1/4)), legend=rownames(proportions),
beside=TRUE, main="Patient distribution (proportions) by treatment and sex")
#colors
require(RColorBrewer)
display.brewer.all()
help(brewer.pal)
counts2=table(dat$stage,dat$trt)
proportions2=prop.table(counts2,2)
display.brewer.pal(n=4,name="Blues")
barplot(proportions2, col=brewer.pal(n=4,name="Blues"), beside=TRUE,
main="Patient distribution (proportions) by treatment and stage")
legend("topleft", rownames(proportions2), fill=brewer.pal(n=4,name="Blues"), cex=0.75)
|
/week3 discussion.R
|
no_license
|
c1rowe/math189
|
R
| false
| false
| 3,960
|
r
|
require(table1)
require(survival)
dat <- subset(survival::pbc, !is.na(trt)) # Exclude subjects not randomized
help(pbc) # see the description of the dataset
help(is.na) # learn the is.na function
dat$trt <- factor(dat$trt, levels=1:2, labels=c("D-penicillamine", "Placebo"))
dat$sex <- factor(dat$sex, levels=c("m", "f"), labels=c("Male", "Female"))
dat$stage <- factor(dat$stage, levels=1:4, labels=paste("Stage", 1:4))
dat$edema <- factor(dat$edema, levels=c(0, 0.5, 1),
labels=c("No edema",
"Untreated or successfully treated",
"Edema despite diuretic therapy"))
dat$spiders <- as.logical(dat$spiders)
dat$hepato <- as.logical(dat$hepato)
dat$ascites <- as.logical(dat$ascites)
label(dat$stage) <- "Histologic stage of disease"
label(dat$edema) <- "Edema status"
label(dat$spiders) <- "Blood vessel malformations in the skin"
label(dat$hepato) <- "Presence of hepatomegaly or enlarged liver"
label(dat$ascites) <- "Presence of ascites"
label(dat$platelet) <- "Platelet count (× 10<sup>9</sup> per liter)"
label(dat$protime) <- "Standardised blood clotting time"
label(dat$albumin) <- "Serum albumin (g/dL)"
label(dat$chol) <- "Serum cholesterol (mg/dL)"
label(dat$copper) <- "Urine copper (μg/day)"
label(dat$trig) <- "Triglycerides (mg/dL)"
label(dat$age) <- "Age (y)"
label(dat$sex) <- "Sex"
label(dat$alk.phos) <- "Alkaline phosphotase (U/L)"
label(dat$ast) <- "Aspartate aminotransferase (U/mL)"
label(dat$bili) <- "Serum bilirubin (mg/dL)"
table1(~ age + sex + stage + edema + spiders + hepato + ascites +
platelet + protime + albumin + alk.phos + ast + bili + chol +
copper + trig | trt, data=dat)
indexD=which(dat$trt=="D-penicillamine")
#side by side hisograms
par(mfrow=c(1,2)) #set the plotting area into a 1*2 array
hist(dat$age[indexD], breaks=10, main="Histogram of age in D-penicillamine group",
xlab="age")
hist(dat$age[-indexD], breaks=10, main="Histogram of age in placebo group", xlab="age")
dev.off() #set par to the default
#Overlapping hisograms
help(rgb)
hist(dat$age[indexD], breaks=10, main="Histogram of age", xlab="age", col=rgb(1,0,0,1/4),
xlim=range(dat$age), ylim=c(0,30))
hist(dat$age[-indexD], breaks=10, xlab="age", col=rgb(0,0,1,1/4), add=T)
legend("topright", c("D-penicillamine","placebo"), fill=c(rgb(1,0,0,1/4),rgb(0,0,1,1/4)))
#side by side density plots
par(mfrow=c(1,2))
plot(density(dat$age[indexD]), main="Density plot of age in D-penicillamine group")
plot(density(dat$age[-indexD]), main="Density plot of age in placebo group")
dev.off()
#Overlapping density plots
plot(density(dat$age[indexD]), main="Density plot of age", ylim=c(0,0.04), col="red")
lines(density(dat$age[-indexD]), col="blue")
legend("topright", c("D-penicillamine","placebo"), fill=c("red","blue"))
#boxplot
boxplot(age~trt, data=dat, main="Boxplot of age by treatment groups",
col=c(rgb(1,0,0,1/4),rgb(0,0,1,1/4)), xlab="treatment")
#barplot of counts, with stacked bars
counts=table(dat$sex,dat$trt)
counts
barplot(counts, col=c(rgb(1,0,0,1/4),rgb(0,0,1,1/4)), legend=rownames(counts),
main="Patient distribution (counts) by treatment and sex")
#barplot of proportions, with juxtaposed bars
proportions=prop.table(counts,2) # column percentages
proportions
barplot(proportions, col=c(rgb(1,0,0,1/4),rgb(0,0,1,1/4)), legend=rownames(proportions),
beside=TRUE, main="Patient distribution (proportions) by treatment and sex")
#colors
require(RColorBrewer)
display.brewer.all()
help(brewer.pal)
counts2=table(dat$stage,dat$trt)
proportions2=prop.table(counts2,2)
display.brewer.pal(n=4,name="Blues")
barplot(proportions2, col=brewer.pal(n=4,name="Blues"), beside=TRUE,
main="Patient distribution (proportions) by treatment and stage")
legend("topleft", rownames(proportions2), fill=brewer.pal(n=4,name="Blues"), cex=0.75)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ga_v4_get.R
\name{fetch_google_analytics_4}
\alias{fetch_google_analytics_4}
\title{Fetch multiple GAv4 requests}
\usage{
fetch_google_analytics_4(request_list, merge = FALSE,
useResourceQuotas = NULL)
}
\arguments{
\item{request_list}{A list of requests created by \link{make_ga_4_req}}
\item{merge}{If TRUE then will rbind that list of data.frames}
\item{useResourceQuotas}{If using GA360, access increased sampling limits.
Default \code{NULL}, set to \code{TRUE} or \code{FALSE} if you have access to this feature.}
}
\value{
A dataframe if one request, or a list of data.frames if multiple.
}
\description{
Fetch the GAv4 requests as created by \link{make_ga_4_req}
}
\details{
For same viewId, daterange, segments, samplingLevel and cohortGroup, v4 batches can be made
}
\examples{
\dontrun{
library(googleAnalyticsR)
## authenticate,
## or use the RStudio Addin "Google API Auth" with analytics scopes set
ga_auth()
## get your accounts
account_list <- ga_account_list()
## pick a profile with data to query
ga_id <- account_list[23,'viewId']
ga_req1 <- make_ga_4_req(ga_id,
date_range = c("2015-07-30","2015-10-01"),
dimensions=c('source','medium'),
metrics = c('sessions'))
ga_req2 <- make_ga_4_req(ga_id,
date_range = c("2015-07-30","2015-10-01"),
dimensions=c('source','medium'),
metrics = c('users'))
fetch_google_analytics_4(list(ga_req1, ga_req2))
}
}
\seealso{
Other GAv4 fetch functions: \code{\link{fetch_google_analytics_4_slow}},
\code{\link{google_analytics}},
\code{\link{make_ga_4_req}}
}
|
/man/fetch_google_analytics_4.Rd
|
no_license
|
Erinmitten/googleAnalyticsR
|
R
| false
| true
| 1,787
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ga_v4_get.R
\name{fetch_google_analytics_4}
\alias{fetch_google_analytics_4}
\title{Fetch multiple GAv4 requests}
\usage{
fetch_google_analytics_4(request_list, merge = FALSE,
useResourceQuotas = NULL)
}
\arguments{
\item{request_list}{A list of requests created by \link{make_ga_4_req}}
\item{merge}{If TRUE then will rbind that list of data.frames}
\item{useResourceQuotas}{If using GA360, access increased sampling limits.
Default \code{NULL}, set to \code{TRUE} or \code{FALSE} if you have access to this feature.}
}
\value{
A dataframe if one request, or a list of data.frames if multiple.
}
\description{
Fetch the GAv4 requests as created by \link{make_ga_4_req}
}
\details{
For same viewId, daterange, segments, samplingLevel and cohortGroup, v4 batches can be made
}
\examples{
\dontrun{
library(googleAnalyticsR)
## authenticate,
## or use the RStudio Addin "Google API Auth" with analytics scopes set
ga_auth()
## get your accounts
account_list <- ga_account_list()
## pick a profile with data to query
ga_id <- account_list[23,'viewId']
ga_req1 <- make_ga_4_req(ga_id,
date_range = c("2015-07-30","2015-10-01"),
dimensions=c('source','medium'),
metrics = c('sessions'))
ga_req2 <- make_ga_4_req(ga_id,
date_range = c("2015-07-30","2015-10-01"),
dimensions=c('source','medium'),
metrics = c('users'))
fetch_google_analytics_4(list(ga_req1, ga_req2))
}
}
\seealso{
Other GAv4 fetch functions: \code{\link{fetch_google_analytics_4_slow}},
\code{\link{google_analytics}},
\code{\link{make_ga_4_req}}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{heart}
\alias{heart}
\title{Cleveland heart disease dataset}
\format{An object of class \code{data.frame} with 303 rows and 14 columns.}
\usage{
heart
}
\description{
A description.
}
\keyword{datasets}
|
/man/heart.Rd
|
no_license
|
klovens/pineplot
|
R
| false
| true
| 309
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{heart}
\alias{heart}
\title{Cleveland heart disease dataset}
\format{An object of class \code{data.frame} with 303 rows and 14 columns.}
\usage{
heart
}
\description{
A description.
}
\keyword{datasets}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sim01NA42-data.R
\docType{data}
\name{sim01NA42}
\alias{sim01NA42}
\title{Simulated Sperm-seq data 0/1/NA encoded}
\format{
A dataframe with column names, 4136 rows, and 51 columns:
\describe{
\item{positions}{positions, integer, SNP index pre-filtering for hetSNPs only}
\item{gami_}{gami_, 0, 1, or NA, the genotype at that SNP for gamete i}
}
}
\usage{
sim01NA42
}
\description{
Simulated Sperm-seq data from the generative model, with data 0/1/NA encoded,
with 50 gametes, 5000 beginning SNPs, 0.1 coverage,
average recombination rate of 1, sequencing error rate of 0.005.
Following filtering, there are 4136 hetSNPs. This data originated from the
generative model with a random seed of 42.
The first column is the SNP index positions and the following 50 columns
are the gamete genotypes which are encoded with 0/1/NA.
}
\keyword{datasets}
|
/man/sim01NA42.Rd
|
no_license
|
mccoy-lab/rhapsodi
|
R
| false
| true
| 923
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sim01NA42-data.R
\docType{data}
\name{sim01NA42}
\alias{sim01NA42}
\title{Simulated Sperm-seq data 0/1/NA encoded}
\format{
A dataframe with column names, 4136 rows, and 51 columns:
\describe{
\item{positions}{positions, integer, SNP index pre-filtering for hetSNPs only}
\item{gami_}{gami_, 0, 1, or NA, the genotype at that SNP for gamete i}
}
}
\usage{
sim01NA42
}
\description{
Simulated Sperm-seq data from the generative model, with data 0/1/NA encoded,
with 50 gametes, 5000 beginning SNPs, 0.1 coverage,
average recombination rate of 1, sequencing error rate of 0.005.
Following filtering, there are 4136 hetSNPs. This data originated from the
generative model with a random seed of 42.
The first column is the SNP index positions and the following 50 columns
are the gamete genotypes which are encoded with 0/1/NA.
}
\keyword{datasets}
|
library(tidyverse)
library(magrittr)
library(rvest)
#Step 1
myurl<-"https://www.imdb.com/title/tt0068646/"
#Step 2
myhtml<-read_html(myurl)
#Step 4
mycast<-html_nodes(myhtml, "table") %>% html_table()
#Step 5
mydat<-mycast %>% extract2(1)
mydat<-mydat[-1,c(2,4)]
names(mydat)<-c("Actor","Role")
library(stringr)
mydat<- mydat %>% mutate(Role=str_squish(Role))
#Your Turn
#Step 1
mydat<-read_html("https://www.imdb.com/title/tt0032138/") %>%
html_nodes("table") %>%
html_table() %>%
extract2(1)
mydat<-mydat[-1,c(2,4)]
names(mydat)<-c("Actor","Role")
mydat<- mydat %>% mutate(Role=str_squish(Role))
#CN Tower
myurl<-"https://www.tripadvisor.ca/Attraction_Review-g155019-d155483-Reviews-CN_Tower-Toronto_Ontario.html"
read_html(myurl) %>%
html_nodes(".cPQsENeY") %>%
html_text()
#Election returns:
myurl<-"https://electionresults.utah.gov/elections/countyCount/399789495"
mytable<-read_html(myurl) %>%
html_nodes("table") %>%
html_table(header=T) %>%
extract2(1)
mytable<-mytable[-c(1,2),c(1,4,9)]
names(mytable)<-c("county","trump","biden")
myurls<-c("https://electionresults.utah.gov/elections/countyCount/399792400",
"https://electionresults.utah.gov/elections/countyCount/399794673",
"https://electionresults.utah.gov/elections/countyCount/399794677",
"https://electionresults.utah.gov/elections/countyCount/399794680")
#first dist
mydat<-read_html(myurls[1]) %>%
html_nodes("table") %>%
html_table(header=T) %>%
extract2(1)
names(mydat)<-mydat[1,]
mydat<-mydat %>% slice(-c(1,2)) %>%
select(County, "Rep"=contains("REP", ignore.case = F),
"Dem"=contains("DEM", ignore.case = F))
mydata<-NULL
for (i in myurls) {
d<-read_html(i) %>%
html_nodes("table") %>%
html_table(header=T) %>%
extract2(1)
names(d)<-d[1,]
d<-d %>% slice(-c(1,2)) %>% slice(-c(1,2)) %>%
select(County, "Rep"=contains("REP", ignore.case = F),
"Dem"=contains("DEM", ignore.case = F))
mydata<-rbind.data.frame(mydata,d)
}
|
/public/materials/scripts/Week10_Rscript.R
|
no_license
|
ochyzh/POLS478
|
R
| false
| false
| 2,016
|
r
|
library(tidyverse)
library(magrittr)
library(rvest)
#Step 1
myurl<-"https://www.imdb.com/title/tt0068646/"
#Step 2
myhtml<-read_html(myurl)
#Step 4
mycast<-html_nodes(myhtml, "table") %>% html_table()
#Step 5
mydat<-mycast %>% extract2(1)
mydat<-mydat[-1,c(2,4)]
names(mydat)<-c("Actor","Role")
library(stringr)
mydat<- mydat %>% mutate(Role=str_squish(Role))
#Your Turn
#Step 1
mydat<-read_html("https://www.imdb.com/title/tt0032138/") %>%
html_nodes("table") %>%
html_table() %>%
extract2(1)
mydat<-mydat[-1,c(2,4)]
names(mydat)<-c("Actor","Role")
mydat<- mydat %>% mutate(Role=str_squish(Role))
#CN Tower
myurl<-"https://www.tripadvisor.ca/Attraction_Review-g155019-d155483-Reviews-CN_Tower-Toronto_Ontario.html"
read_html(myurl) %>%
html_nodes(".cPQsENeY") %>%
html_text()
#Election returns:
myurl<-"https://electionresults.utah.gov/elections/countyCount/399789495"
mytable<-read_html(myurl) %>%
html_nodes("table") %>%
html_table(header=T) %>%
extract2(1)
mytable<-mytable[-c(1,2),c(1,4,9)]
names(mytable)<-c("county","trump","biden")
myurls<-c("https://electionresults.utah.gov/elections/countyCount/399792400",
"https://electionresults.utah.gov/elections/countyCount/399794673",
"https://electionresults.utah.gov/elections/countyCount/399794677",
"https://electionresults.utah.gov/elections/countyCount/399794680")
#first dist
mydat<-read_html(myurls[1]) %>%
html_nodes("table") %>%
html_table(header=T) %>%
extract2(1)
names(mydat)<-mydat[1,]
mydat<-mydat %>% slice(-c(1,2)) %>%
select(County, "Rep"=contains("REP", ignore.case = F),
"Dem"=contains("DEM", ignore.case = F))
mydata<-NULL
for (i in myurls) {
d<-read_html(i) %>%
html_nodes("table") %>%
html_table(header=T) %>%
extract2(1)
names(d)<-d[1,]
d<-d %>% slice(-c(1,2)) %>% slice(-c(1,2)) %>%
select(County, "Rep"=contains("REP", ignore.case = F),
"Dem"=contains("DEM", ignore.case = F))
mydata<-rbind.data.frame(mydata,d)
}
|
#' Compute lagged or leading values
#'
#' Find the "previous" (`lag()`) or "next" (`lead()`) values in a vector.
#' Useful for comparing values behind of or ahead of the current values.
#'
#' @param x Vector of values
#' @param n Positive integer of length 1, giving the number of positions to
#' lead or lag by
#' @param default Value used for non-existent rows. Defaults to `NA`.
#' @param order_by Override the default ordering to use another vector or column
#' @param ... Needed for compatibility with lag generic.
#' @importFrom stats lag
#' @examples
#' lag(1:5)
#' lead(1:5)
#'
#' x <- 1:5
#' tibble(behind = lag(x), x, ahead = lead(x))
#'
#' # If you want to look more rows behind or ahead, use `n`
#' lag(1:5, n = 1)
#' lag(1:5, n = 2)
#'
#' lead(1:5, n = 1)
#' lead(1:5, n = 2)
#'
#' # If you want to define a value for non-existing rows, use `default`
#' lag(1:5)
#' lag(1:5, default = 0)
#'
#' lead(1:5)
#' lead(1:5, default = 6)
#'
#' # If data are not already ordered, use `order_by`
#' scrambled <- slice_sample(tibble(year = 2000:2005, value = (0:5) ^ 2), prop = 1)
#'
#' wrong <- mutate(scrambled, previous_year_value = lag(value))
#' arrange(wrong, year)
#'
#' right <- mutate(scrambled, previous_year_value = lag(value, order_by = year))
#' arrange(right, year)
#' @name lead-lag
NULL
#' @export
#' @rdname lead-lag
lag <- function(x, n = 1L, default = NA, order_by = NULL, ...) {
if (!is.null(order_by)) {
return(with_order(order_by, lag, x, n = n, default = default))
}
if (inherits(x, "ts")) {
bad_args("x", "must be a vector, not a ts object, do you want `stats::lag()`?")
}
if (length(n) != 1 || !is.numeric(n) || n < 0) {
bad_args("n", "must be a nonnegative integer scalar, ",
"not {friendly_type_of(n)} of length {length(n)}"
)
}
if (n == 0) return(x)
xlen <- vec_size(x)
n <- pmin(n, xlen)
default <- vec_cast(default, x, x_arg = "default", to_arg = "x")
vec_c(
vec_rep(default, n),
vec_slice(x, seq_len(xlen - n))
)
}
#' @export
#' @rdname lead-lag
lead <- function(x, n = 1L, default = NA, order_by = NULL, ...) {
if (!is.null(order_by)) {
return(with_order(order_by, lead, x, n = n, default = default))
}
if (length(n) != 1 || !is.numeric(n) || n < 0) {
bad_args("n", "must be a nonnegative integer scalar, ",
"not {friendly_type_of(n)} of length {length(n)}"
)
}
if (n == 0) return(x)
xlen <- vec_size(x)
n <- pmin(n, xlen)
default <- vec_cast(default, x, x_arg = "default", to_arg = "x")
vec_c(
vec_slice(x, -seq_len(n)),
vec_rep(default, n)
)
}
|
/R/lead-lag.R
|
permissive
|
torockel/dplyr
|
R
| false
| false
| 2,598
|
r
|
#' Compute lagged or leading values
#'
#' Find the "previous" (`lag()`) or "next" (`lead()`) values in a vector.
#' Useful for comparing values behind of or ahead of the current values.
#'
#' @param x Vector of values
#' @param n Positive integer of length 1, giving the number of positions to
#' lead or lag by
#' @param default Value used for non-existent rows. Defaults to `NA`.
#' @param order_by Override the default ordering to use another vector or column
#' @param ... Needed for compatibility with lag generic.
#' @importFrom stats lag
#' @examples
#' lag(1:5)
#' lead(1:5)
#'
#' x <- 1:5
#' tibble(behind = lag(x), x, ahead = lead(x))
#'
#' # If you want to look more rows behind or ahead, use `n`
#' lag(1:5, n = 1)
#' lag(1:5, n = 2)
#'
#' lead(1:5, n = 1)
#' lead(1:5, n = 2)
#'
#' # If you want to define a value for non-existing rows, use `default`
#' lag(1:5)
#' lag(1:5, default = 0)
#'
#' lead(1:5)
#' lead(1:5, default = 6)
#'
#' # If data are not already ordered, use `order_by`
#' scrambled <- slice_sample(tibble(year = 2000:2005, value = (0:5) ^ 2), prop = 1)
#'
#' wrong <- mutate(scrambled, previous_year_value = lag(value))
#' arrange(wrong, year)
#'
#' right <- mutate(scrambled, previous_year_value = lag(value, order_by = year))
#' arrange(right, year)
#' @name lead-lag
NULL
#' @export
#' @rdname lead-lag
lag <- function(x, n = 1L, default = NA, order_by = NULL, ...) {
if (!is.null(order_by)) {
return(with_order(order_by, lag, x, n = n, default = default))
}
if (inherits(x, "ts")) {
bad_args("x", "must be a vector, not a ts object, do you want `stats::lag()`?")
}
if (length(n) != 1 || !is.numeric(n) || n < 0) {
bad_args("n", "must be a nonnegative integer scalar, ",
"not {friendly_type_of(n)} of length {length(n)}"
)
}
if (n == 0) return(x)
xlen <- vec_size(x)
n <- pmin(n, xlen)
default <- vec_cast(default, x, x_arg = "default", to_arg = "x")
vec_c(
vec_rep(default, n),
vec_slice(x, seq_len(xlen - n))
)
}
#' @export
#' @rdname lead-lag
lead <- function(x, n = 1L, default = NA, order_by = NULL, ...) {
if (!is.null(order_by)) {
return(with_order(order_by, lead, x, n = n, default = default))
}
if (length(n) != 1 || !is.numeric(n) || n < 0) {
bad_args("n", "must be a nonnegative integer scalar, ",
"not {friendly_type_of(n)} of length {length(n)}"
)
}
if (n == 0) return(x)
xlen <- vec_size(x)
n <- pmin(n, xlen)
default <- vec_cast(default, x, x_arg = "default", to_arg = "x")
vec_c(
vec_slice(x, -seq_len(n)),
vec_rep(default, n)
)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/indirect.R
\name{get_reverse_linkingto}
\alias{get_reverse_linkingto}
\title{get_reverse_linkingto}
\usage{
get_reverse_linkingto(packages, level = 1L)
}
\arguments{
\item{packages}{(non-empty character vector) Package names}
\item{level}{(positive integer) Depth of recursive dependency}
}
\value{
A tibble with three columns: `pkg_1`, `relation` and `pkg_2`
}
\description{
Get reverse dependencies
}
\examples{
pkggraph::init(local = TRUE)
pkggraph::get_reverse_linkingto("BH")
}
\seealso{
\code{\link{get_reverse_depends}},
\code{\link{get_reverse_imports}}, \code{\link{get_reverse_linkingto}},
\code{\link{get_reverse_suggests}}, \code{\link{get_reverse_enhances}},
\code{\link{get_all_reverse_dependencies}}, \code{\link{get_linkingto}}
}
\author{
Srikanth KS
}
|
/man/get_reverse_linkingto.Rd
|
no_license
|
talegari/pkggraph
|
R
| false
| true
| 854
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/indirect.R
\name{get_reverse_linkingto}
\alias{get_reverse_linkingto}
\title{get_reverse_linkingto}
\usage{
get_reverse_linkingto(packages, level = 1L)
}
\arguments{
\item{packages}{(non-empty character vector) Package names}
\item{level}{(positive integer) Depth of recursive dependency}
}
\value{
A tibble with three columns: `pkg_1`, `relation` and `pkg_2`
}
\description{
Get reverse dependencies
}
\examples{
pkggraph::init(local = TRUE)
pkggraph::get_reverse_linkingto("BH")
}
\seealso{
\code{\link{get_reverse_depends}},
\code{\link{get_reverse_imports}}, \code{\link{get_reverse_linkingto}},
\code{\link{get_reverse_suggests}}, \code{\link{get_reverse_enhances}},
\code{\link{get_all_reverse_dependencies}}, \code{\link{get_linkingto}}
}
\author{
Srikanth KS
}
|
testlist <- list(AgeVector = c(-4.73074171454048e-167, 2.2262381097027e-76, -9.12990429452974e-204, 5.97087417427845e-79, 4.7390525269307e-300, 6.60630538515239e-121, 3.58611068565168e-154, -2.94504776827523e-186, 2.62380314702636e-116, -6.78950518864266e+23, 6.99695749856012e-167, 86485.676793021, 1.11271562183704e+230, 1.94114173595984e-186, 1.44833381226225e-178, -6.75217876587581e-69, 1.17166524186752e-15, -4.66902120197297e-64, -1.96807327384856e+304, 4.43806122192432e-53, 9.29588680224717e-276, -6.49633240047463e-239, -1.22140819059424e-138, 5.03155164774999e-80, -6.36956558303921e-38, 7.15714506860012e-155, -1.05546603899445e-274, -3.66720914317747e-169, -6.94681701552128e+38, 2.93126040859825e-33, 2.03804078100055e-84, 3.62794352816579e+190, 3.84224576683191e+202, 2.90661893502594e+44, -5.43046915655589e-132, -1.22315376742253e-152), ExpressionMatrix = structure(c(4.80597147865938e+96, 6.97343932706536e+155, 1.3267342810479e+281, 1.34663897260867e+171, 1.76430141680543e+158, 1.20021255064002e-241, 1.72046093489436e+274, 4.64807629890539e-66, 3.23566990107388e-38, 3.70896378162114e-42, 1.09474740380531e+92, 7.49155705745727e-308, 3.26639180474928e+224, 3.21841801500177e-79, 4.26435540037564e-295, 1.40002857639358e+82, 47573397570345336, 2.00517157311369e-187, 2.74035572944044e+70, 2.89262435086883e-308, 6.65942057982148e-198, 1.10979548758712e-208, 1.40208057226312e-220, 6.25978904299555e-111, 1.06191688875218e+167, 1.1857452172049, 7.01135380962132e-157, 4.49610615342627e-308, 8.04053421408348e+261, 6.23220855980985e+275, 1.91601752509744e+141, 2.27737212344351e-244, 1.6315101795754e+126, 3.83196182917788e+160, 1.53445011275161e-192), .Dim = c(5L, 7L)), permutations = 415362983L)
result <- do.call(myTAI:::cpp_bootMatrix,testlist)
str(result)
|
/myTAI/inst/testfiles/cpp_bootMatrix/AFL_cpp_bootMatrix/cpp_bootMatrix_valgrind_files/1615768287-test.R
|
no_license
|
akhikolla/updatedatatype-list3
|
R
| false
| false
| 1,803
|
r
|
testlist <- list(AgeVector = c(-4.73074171454048e-167, 2.2262381097027e-76, -9.12990429452974e-204, 5.97087417427845e-79, 4.7390525269307e-300, 6.60630538515239e-121, 3.58611068565168e-154, -2.94504776827523e-186, 2.62380314702636e-116, -6.78950518864266e+23, 6.99695749856012e-167, 86485.676793021, 1.11271562183704e+230, 1.94114173595984e-186, 1.44833381226225e-178, -6.75217876587581e-69, 1.17166524186752e-15, -4.66902120197297e-64, -1.96807327384856e+304, 4.43806122192432e-53, 9.29588680224717e-276, -6.49633240047463e-239, -1.22140819059424e-138, 5.03155164774999e-80, -6.36956558303921e-38, 7.15714506860012e-155, -1.05546603899445e-274, -3.66720914317747e-169, -6.94681701552128e+38, 2.93126040859825e-33, 2.03804078100055e-84, 3.62794352816579e+190, 3.84224576683191e+202, 2.90661893502594e+44, -5.43046915655589e-132, -1.22315376742253e-152), ExpressionMatrix = structure(c(4.80597147865938e+96, 6.97343932706536e+155, 1.3267342810479e+281, 1.34663897260867e+171, 1.76430141680543e+158, 1.20021255064002e-241, 1.72046093489436e+274, 4.64807629890539e-66, 3.23566990107388e-38, 3.70896378162114e-42, 1.09474740380531e+92, 7.49155705745727e-308, 3.26639180474928e+224, 3.21841801500177e-79, 4.26435540037564e-295, 1.40002857639358e+82, 47573397570345336, 2.00517157311369e-187, 2.74035572944044e+70, 2.89262435086883e-308, 6.65942057982148e-198, 1.10979548758712e-208, 1.40208057226312e-220, 6.25978904299555e-111, 1.06191688875218e+167, 1.1857452172049, 7.01135380962132e-157, 4.49610615342627e-308, 8.04053421408348e+261, 6.23220855980985e+275, 1.91601752509744e+141, 2.27737212344351e-244, 1.6315101795754e+126, 3.83196182917788e+160, 1.53445011275161e-192), .Dim = c(5L, 7L)), permutations = 415362983L)
result <- do.call(myTAI:::cpp_bootMatrix,testlist)
str(result)
|
#' @title reset.sim
#' @description a function to create a "reasonable" set of parameters
#' to create simulated panel data that obeys a parallel trends assumption.
#' In particular, it provides parameters where the the effect of participating
#' in the treatment is equal to one in all post-treatment time periods.
#'
#' After calling this function, the user can change particular values of the
#' parameters in order to generate dynamics, heterogeneous effects across
#' groups, etc.
#'
#' @param time.periods The number of time periods to include
#' @param n The total number of observations
#' @param ipw If TRUE, sets parameters so that DGP is
#' compatible with recovering ATT(g,t)'s using IPW (i.e.,
#' where logit that just includes a linear term in X works). If
#' FALSE, sets parameters that will be incompatible with IPW.
#' Either way, these parameters can be specified by the user
#' if so desired.
#' @param reg If TRUE, sets parameters so that DGP is compatible
#' with recovering ATT(g,t)'s using regressions on untreated
#' untreated potential outcomes. If FALSE, sets parameters that
#' will be incompatible with using regressions (i.e., regressions
#' that include only linear term in X). Either way, these
#' parameters can be specified by the user if so desired.
#'
#' @return list of simulation parameters
#'
#' @export
reset.sim <- function(time.periods=4, n=5000, ipw=TRUE, reg=TRUE) {
#-----------------------------------------------------------------------------
# set parameters
#-----------------------------------------------------------------------------
# coefficient on X
bett <- seq(1:time.periods)
# time fixed effect
thet <- seq(1:time.periods)
# time fixed effect
theu <- thet # changing this creates violations of parallel trends
# covariate effect
betu <- bett # changing this creates violations of conditional parallel trends
#-----------------------------------------------------------------------------
# parameters for treated potential outcomes
#-----------------------------------------------------------------------------
te.bet.ind <- rep(1,time.periods) # no selective treatment timing
te.bet.X <- bett #no heterogeneous effects by X
te.t <- thet # no calendar time effects
te.e <- rep(0,time.periods) # no dynamic effects
te <- 1 # overall basic effect
# parameters in generalized propensity score
# don't make them too big otherwise can get divide by 0
gamG <- c(0,1:time.periods)/(2*time.periods)
# return list of parameters
list(time.periods=time.periods,
bett=bett,
thet=thet,
theu=theu,
betu=betu,
te.bet.ind=te.bet.ind,
te.bet.X=te.bet.X,
te.t=te.t,
te.e=te.e,
te=te,
n=n,
gamG=gamG,
ipw=ipw,
reg=reg
)
}
#' @title build_sim_dataset
#'
#' @description A function for building simulated data
#'
#' @param sp_list A list of simulation parameters. See `reset.sim` to generate
#' some default values for parameters
#' @param panel whether to construct panel data (the default) or repeated
#' cross sections data
#'
#' @return a data.frame with the following columns
#' \itemize{
#' \item G observations group
#' \item X value of covariate
#' \item id observation's id
#' \item cluster observation's cluster (by construction there is no within-cluster correlation)
#' \item period time period for current observation
#' \item Y outcome
#' \item treat whether or not this unit is ever treated
#' }
#'
#' @export
build_sim_dataset <- function(sp_list, panel=TRUE) {
#-----------------------------------------------------------------------------
# build dataset
#-----------------------------------------------------------------------------
time.periods <- sp_list$time.periods
nt <- sp_list$nt
bett <- sp_list$bett
thet=sp_list$thet
nu <- sp_list$nu
theu <- sp_list$theu
betu <- sp_list$betu
te.bet.ind <- sp_list$te.bet.ind
te.bet.X <- sp_list$te.bet.X
te.t <- sp_list$te.t
te.e <- sp_list$te.e
te <- sp_list$te
n <- sp_list$n
gamG <- sp_list$gamG
ipw <- sp_list$ipw
reg <- sp_list$reg
X <- rnorm(n)
if (ipw) {
pr <- exp(outer(X,gamG)) / apply( exp(outer(X,gamG)), 1, sum)
} else {
pr <- exp(outer((pnorm(X)+0.5)^2,gamG)) / apply( exp(outer((pnorm(X)+0.5)^2,gamG)), 1, sum)
}
G <- apply(pr, 1, function(pvec) sample(seq(0,time.periods), size=1, prob=pvec))
Gt <- G[G>0]
nt <- length(Gt)
if (reg) {
Xmodel <- X
} else {
Xmodel <- X^2
}
Xt <- Xmodel[G>0]
# draw individual fixed effect
Ct <- rnorm(nt, mean=G)
# generate untreated potential outcomes in each time period
Ynames <- paste0("Y",1:time.periods)
#Ynames <- paste0(1:time.periods)
Y0tmat <- sapply(1:time.periods, function(t) {
thet[t] + Ct + Xt*bett[t] + rnorm(nt)
})
Y0tdf <- as.data.frame(Y0tmat)
# generate treated potential outcomes
Y1tdf <- sapply(1:time.periods, function(t) {
te.t[t] + te.bet.ind[Gt]*Ct + Xt*te.bet.X[t] + (Gt <= t)*te.e[sapply(1:nt, function(i) max(t-Gt[i]+1,1))] + te + rnorm(nt) # hack for the dynamic effects but ok
})
# generate observed data
Ytdf <- sapply(1:time.periods, function(t) {
(Gt<=t)*Y1tdf[,t] + (Gt>t)*Y0tdf[,t]
})
colnames(Ytdf) <- Ynames
# store observed data for treated group
dft <- cbind.data.frame(G=Gt,X=X[G>0],Ytdf)
# untreated units
# draw untreated covariate
nu <- sum(G==0)
Xu <- Xmodel[G==0]
# draw untreated fixed effect
Cu <- rnorm(nu, mean=0)
# generate untreated potential outcomes
Y0umat <- sapply(1:time.periods, function(t) {
theu[t] + Cu + rnorm(nu) + Xu*betu[t]
})
Y0udf <- as.data.frame(Y0umat)
colnames(Y0udf) <- Ynames
# store dataset of observed outcomes for untreated units
dfu <- cbind.data.frame(G=0,X=X[G==0],Y0udf)
# store overall dataset
df <- rbind.data.frame(dft, dfu)
# generate id variable
df$id <- 1:nrow(df)
# generate clusters (there's no actual within-cluster correlation)
df$cluster <- sample(1:50, size=nrow(df), replace=TRUE)
# convert data from wide to long format
ddf <- tidyr::pivot_longer(df,
cols=tidyr::starts_with("Y"),
names_to="period",
names_prefix="Y",
values_to="Y")
ddf$period <- as.numeric(ddf$period)
ddf$treat <- 1*(ddf$G > 0)
ddf <- ddf[order(ddf$id, ddf$period),] # reorder data
if (!panel) { # repeated cross sections
n <- nt+nu
Time <- sample(1:time.periods, size=n, replace=TRUE, prob=rep(1/time.periods, time.periods))
right.row <- sapply( unique(ddf$id), function(i) {
which(ddf$id==i & ddf$period==Time[i])
})
ddf <- ddf[right.row,]
}
ddf <- subset(ddf, G != 1)
ddf
}
#' @title sim
#' @description An internal function that builds simulated data, computes
#' ATT(g,t)'s and some aggregations. It is useful for testing the inference
#' procedures in the `did` function.
#'
#' @inheritParams reset.sim
#' @inheritParams build_sim_dataset
#'
#' @param ret which type of results to return. The options are `Wpval` (returns
#' 1 if the p-value from a Wald test that all pre-treatment ATT(g,t)'s are equal
#' is less than .05),
#' `cband` (returns 1 if a uniform confidence band covers 0 for groups and times),
#' `simple` (returns 1 if, using the simple treatment effect aggregation results
#' in rejecting that this aggregated treatment effect parameter is equal to 0),
#' `dynamic` (returns 1 if the uniform confidence band from the dynamic treatment
#' effect aggregation covers 0 in all pre- and post-treatment periods). The default
#' value is NULL, and in this case the function will just return the results from
#' the call to `att_gt`.
#' @param bstrap whether or not to use the bootstrap to conduct inference (default is TRUE)
#' @param cband whether or not to compute uniform confidence bands in the call to `att_gt`
#' (the default is TRUE)
#' @param control_group Whether to use the "nevertreated" comparison group (the default)
#' or the "notyettreated" as the comparison group
#' @param xformla Formula for covariates in `att_gt` (default is `~X`)
#' @param est_method Which estimation method to use in `att_gt` (default is "dr")
#' @param clustervars Any additional variables which should be clustered on
#' @param panel whether to simulate panel data (the default) or otherwise repeated
#' cross sections data
#'
#' @return When `ret=NULL`, returns the results of the call to `att_gt`, otherwise returns
#' 1 if the specified test rejects or 0 if not.
#'
#' @export
sim <- function(sp_list,
ret=NULL,
bstrap=TRUE,
cband=TRUE,
control_group="nevertreated",
xformla=~X,
est_method="dr",
clustervars=NULL,
panel=TRUE) {
ddf <- build_sim_dataset(sp_list=sp_list,
panel=panel)
time.periods <- sp_list$time.periods
te.e <- sp_list$te.e
te <- sp_list$te
# get results
res <- att_gt(yname="Y", xformla=xformla, data=ddf, tname="period", idname="id",
gname="G",
bstrap=bstrap, cband=cband, control_group=control_group,
est_method=est_method,
clustervars=clustervars,
panel=panel)
if (is.null(ret)) {
return(res)
} else if (ret=="Wpval") {
rej <- 1*(res$Wpval < .05)
return(rej)
} else if (ret=="cband") {
cu <- res$att + res$c*res$se
cl <- res$att - res$c*res$se
covers0 <- 1*(all( (cu > 0) & (cl < 0)))
return(covers0)
} else if (ret=="simple") {
agg <- aggte(res)
rej <- 1*( abs(agg$overall.att / agg$overall.se) > qnorm(.975) )
return(rej)
} else if (ret=="dynamic") {
truth <- c(rep(0,(time.periods-2)),te+te.e[1:(time.periods-1)])
agg <- aggte(res, type="dynamic")
cu <- agg$att.egt + agg$crit.val.egt * agg$se.egt
cl <- agg$att.egt - agg$crit.val.egt * agg$se.egt
coverstruth <- 1*(all( (cu > truth) & (cl < truth)))
return(coverstruth)
} else if (ret=="notyettreated") {
agg <- aggte(res)
rej <- 1*( abs(agg$overall.att / agg$overall.se) > qnorm(.975) )
return(rej)
} else {
return(res)
}
}
## pretest_sim <- function(ret=NULL, bstrap=FALSE, cband=FALSE,
## control.group="nevertreated", panel=TRUE, xformla=~X, cores=1) {
## ddf <- build_ipw_dataset(panel=panel)
## # get results
## res <- conditional_did_pretest(yname="Y", xformla=xformla, data=ddf,
## tname="period", idname="id",
## first.treat.name="G", estMethod="ipw",
## printdetails=FALSE,
## bstrap=bstrap, cband=cband,
## control.group=control.group,
## panel=panel,
## pl=TRUE, cores=cores)
## res$CvMpval
## }
|
/R/simulate_data.R
|
no_license
|
bcallaway11/did
|
R
| false
| false
| 11,095
|
r
|
#' @title reset.sim
#' @description a function to create a "reasonable" set of parameters
#' to create simulated panel data that obeys a parallel trends assumption.
#' In particular, it provides parameters where the the effect of participating
#' in the treatment is equal to one in all post-treatment time periods.
#'
#' After calling this function, the user can change particular values of the
#' parameters in order to generate dynamics, heterogeneous effects across
#' groups, etc.
#'
#' @param time.periods The number of time periods to include
#' @param n The total number of observations
#' @param ipw If TRUE, sets parameters so that DGP is
#' compatible with recovering ATT(g,t)'s using IPW (i.e.,
#' where logit that just includes a linear term in X works). If
#' FALSE, sets parameters that will be incompatible with IPW.
#' Either way, these parameters can be specified by the user
#' if so desired.
#' @param reg If TRUE, sets parameters so that DGP is compatible
#' with recovering ATT(g,t)'s using regressions on untreated
#' untreated potential outcomes. If FALSE, sets parameters that
#' will be incompatible with using regressions (i.e., regressions
#' that include only linear term in X). Either way, these
#' parameters can be specified by the user if so desired.
#'
#' @return list of simulation parameters
#'
#' @export
reset.sim <- function(time.periods=4, n=5000, ipw=TRUE, reg=TRUE) {
#-----------------------------------------------------------------------------
# set parameters
#-----------------------------------------------------------------------------
# coefficient on X
bett <- seq(1:time.periods)
# time fixed effect
thet <- seq(1:time.periods)
# time fixed effect
theu <- thet # changing this creates violations of parallel trends
# covariate effect
betu <- bett # changing this creates violations of conditional parallel trends
#-----------------------------------------------------------------------------
# parameters for treated potential outcomes
#-----------------------------------------------------------------------------
te.bet.ind <- rep(1,time.periods) # no selective treatment timing
te.bet.X <- bett #no heterogeneous effects by X
te.t <- thet # no calendar time effects
te.e <- rep(0,time.periods) # no dynamic effects
te <- 1 # overall basic effect
# parameters in generalized propensity score
# don't make them too big otherwise can get divide by 0
gamG <- c(0,1:time.periods)/(2*time.periods)
# return list of parameters
list(time.periods=time.periods,
bett=bett,
thet=thet,
theu=theu,
betu=betu,
te.bet.ind=te.bet.ind,
te.bet.X=te.bet.X,
te.t=te.t,
te.e=te.e,
te=te,
n=n,
gamG=gamG,
ipw=ipw,
reg=reg
)
}
#' @title build_sim_dataset
#'
#' @description A function for building simulated data
#'
#' @param sp_list A list of simulation parameters. See `reset.sim` to generate
#' some default values for parameters
#' @param panel whether to construct panel data (the default) or repeated
#' cross sections data
#'
#' @return a data.frame with the following columns
#' \itemize{
#' \item G observations group
#' \item X value of covariate
#' \item id observation's id
#' \item cluster observation's cluster (by construction there is no within-cluster correlation)
#' \item period time period for current observation
#' \item Y outcome
#' \item treat whether or not this unit is ever treated
#' }
#'
#' @export
build_sim_dataset <- function(sp_list, panel=TRUE) {
#-----------------------------------------------------------------------------
# build dataset
#-----------------------------------------------------------------------------
time.periods <- sp_list$time.periods
nt <- sp_list$nt
bett <- sp_list$bett
thet=sp_list$thet
nu <- sp_list$nu
theu <- sp_list$theu
betu <- sp_list$betu
te.bet.ind <- sp_list$te.bet.ind
te.bet.X <- sp_list$te.bet.X
te.t <- sp_list$te.t
te.e <- sp_list$te.e
te <- sp_list$te
n <- sp_list$n
gamG <- sp_list$gamG
ipw <- sp_list$ipw
reg <- sp_list$reg
X <- rnorm(n)
if (ipw) {
pr <- exp(outer(X,gamG)) / apply( exp(outer(X,gamG)), 1, sum)
} else {
pr <- exp(outer((pnorm(X)+0.5)^2,gamG)) / apply( exp(outer((pnorm(X)+0.5)^2,gamG)), 1, sum)
}
G <- apply(pr, 1, function(pvec) sample(seq(0,time.periods), size=1, prob=pvec))
Gt <- G[G>0]
nt <- length(Gt)
if (reg) {
Xmodel <- X
} else {
Xmodel <- X^2
}
Xt <- Xmodel[G>0]
# draw individual fixed effect
Ct <- rnorm(nt, mean=G)
# generate untreated potential outcomes in each time period
Ynames <- paste0("Y",1:time.periods)
#Ynames <- paste0(1:time.periods)
Y0tmat <- sapply(1:time.periods, function(t) {
thet[t] + Ct + Xt*bett[t] + rnorm(nt)
})
Y0tdf <- as.data.frame(Y0tmat)
# generate treated potential outcomes
Y1tdf <- sapply(1:time.periods, function(t) {
te.t[t] + te.bet.ind[Gt]*Ct + Xt*te.bet.X[t] + (Gt <= t)*te.e[sapply(1:nt, function(i) max(t-Gt[i]+1,1))] + te + rnorm(nt) # hack for the dynamic effects but ok
})
# generate observed data
Ytdf <- sapply(1:time.periods, function(t) {
(Gt<=t)*Y1tdf[,t] + (Gt>t)*Y0tdf[,t]
})
colnames(Ytdf) <- Ynames
# store observed data for treated group
dft <- cbind.data.frame(G=Gt,X=X[G>0],Ytdf)
# untreated units
# draw untreated covariate
nu <- sum(G==0)
Xu <- Xmodel[G==0]
# draw untreated fixed effect
Cu <- rnorm(nu, mean=0)
# generate untreated potential outcomes
Y0umat <- sapply(1:time.periods, function(t) {
theu[t] + Cu + rnorm(nu) + Xu*betu[t]
})
Y0udf <- as.data.frame(Y0umat)
colnames(Y0udf) <- Ynames
# store dataset of observed outcomes for untreated units
dfu <- cbind.data.frame(G=0,X=X[G==0],Y0udf)
# store overall dataset
df <- rbind.data.frame(dft, dfu)
# generate id variable
df$id <- 1:nrow(df)
# generate clusters (there's no actual within-cluster correlation)
df$cluster <- sample(1:50, size=nrow(df), replace=TRUE)
# convert data from wide to long format
ddf <- tidyr::pivot_longer(df,
cols=tidyr::starts_with("Y"),
names_to="period",
names_prefix="Y",
values_to="Y")
ddf$period <- as.numeric(ddf$period)
ddf$treat <- 1*(ddf$G > 0)
ddf <- ddf[order(ddf$id, ddf$period),] # reorder data
if (!panel) { # repeated cross sections
n <- nt+nu
Time <- sample(1:time.periods, size=n, replace=TRUE, prob=rep(1/time.periods, time.periods))
right.row <- sapply( unique(ddf$id), function(i) {
which(ddf$id==i & ddf$period==Time[i])
})
ddf <- ddf[right.row,]
}
ddf <- subset(ddf, G != 1)
ddf
}
#' @title sim
#' @description An internal function that builds simulated data, computes
#' ATT(g,t)'s and some aggregations. It is useful for testing the inference
#' procedures in the `did` function.
#'
#' @inheritParams reset.sim
#' @inheritParams build_sim_dataset
#'
#' @param ret which type of results to return. The options are `Wpval` (returns
#' 1 if the p-value from a Wald test that all pre-treatment ATT(g,t)'s are equal
#' is less than .05),
#' `cband` (returns 1 if a uniform confidence band covers 0 for groups and times),
#' `simple` (returns 1 if, using the simple treatment effect aggregation results
#' in rejecting that this aggregated treatment effect parameter is equal to 0),
#' `dynamic` (returns 1 if the uniform confidence band from the dynamic treatment
#' effect aggregation covers 0 in all pre- and post-treatment periods). The default
#' value is NULL, and in this case the function will just return the results from
#' the call to `att_gt`.
#' @param bstrap whether or not to use the bootstrap to conduct inference (default is TRUE)
#' @param cband whether or not to compute uniform confidence bands in the call to `att_gt`
#' (the default is TRUE)
#' @param control_group Whether to use the "nevertreated" comparison group (the default)
#' or the "notyettreated" as the comparison group
#' @param xformla Formula for covariates in `att_gt` (default is `~X`)
#' @param est_method Which estimation method to use in `att_gt` (default is "dr")
#' @param clustervars Any additional variables which should be clustered on
#' @param panel whether to simulate panel data (the default) or otherwise repeated
#' cross sections data
#'
#' @return When `ret=NULL`, returns the results of the call to `att_gt`, otherwise returns
#' 1 if the specified test rejects or 0 if not.
#'
#' @export
sim <- function(sp_list,
ret=NULL,
bstrap=TRUE,
cband=TRUE,
control_group="nevertreated",
xformla=~X,
est_method="dr",
clustervars=NULL,
panel=TRUE) {
ddf <- build_sim_dataset(sp_list=sp_list,
panel=panel)
time.periods <- sp_list$time.periods
te.e <- sp_list$te.e
te <- sp_list$te
# get results
res <- att_gt(yname="Y", xformla=xformla, data=ddf, tname="period", idname="id",
gname="G",
bstrap=bstrap, cband=cband, control_group=control_group,
est_method=est_method,
clustervars=clustervars,
panel=panel)
if (is.null(ret)) {
return(res)
} else if (ret=="Wpval") {
rej <- 1*(res$Wpval < .05)
return(rej)
} else if (ret=="cband") {
cu <- res$att + res$c*res$se
cl <- res$att - res$c*res$se
covers0 <- 1*(all( (cu > 0) & (cl < 0)))
return(covers0)
} else if (ret=="simple") {
agg <- aggte(res)
rej <- 1*( abs(agg$overall.att / agg$overall.se) > qnorm(.975) )
return(rej)
} else if (ret=="dynamic") {
truth <- c(rep(0,(time.periods-2)),te+te.e[1:(time.periods-1)])
agg <- aggte(res, type="dynamic")
cu <- agg$att.egt + agg$crit.val.egt * agg$se.egt
cl <- agg$att.egt - agg$crit.val.egt * agg$se.egt
coverstruth <- 1*(all( (cu > truth) & (cl < truth)))
return(coverstruth)
} else if (ret=="notyettreated") {
agg <- aggte(res)
rej <- 1*( abs(agg$overall.att / agg$overall.se) > qnorm(.975) )
return(rej)
} else {
return(res)
}
}
## pretest_sim <- function(ret=NULL, bstrap=FALSE, cband=FALSE,
## control.group="nevertreated", panel=TRUE, xformla=~X, cores=1) {
## ddf <- build_ipw_dataset(panel=panel)
## # get results
## res <- conditional_did_pretest(yname="Y", xformla=xformla, data=ddf,
## tname="period", idname="id",
## first.treat.name="G", estMethod="ipw",
## printdetails=FALSE,
## bstrap=bstrap, cband=cband,
## control.group=control.group,
## panel=panel,
## pl=TRUE, cores=cores)
## res$CvMpval
## }
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/qstats.r
\name{qstats}
\alias{qstats}
\title{Summary statistics for a quantitative variable}
\usage{
qstats(data, x, ..., stats = c("n", "mean", "sd"), na.rm = TRUE, digits = 2)
}
\arguments{
\item{data}{data frame}
\item{x}{numeric variable in data (unquoted)}
\item{...}{list of grouping variables}
\item{stats}{statistics to calculate (any function that produces a
numeric value), Default: \code{c("n", "mean", "sd")}}
\item{na.rm}{if \code{TRUE}, delete cases with missing values on x and or grouping
variables, Default: \code{TRUE}}
\item{digits}{number of decimal digits to print, Default: 2}
}
\value{
a data frame, where columns are grouping variables (optional) and
statistics
}
\description{
This function provides descriptive statistics for a quantitative
variable alone or separately by groups. Any function that returns a single
numeric value can bue used.
}
\examples{
# If no keyword arguments are provided, default values are used
qstats(mtcars, mpg, am, gear)
# You can supply as many (or no) grouping variables as needed
qstats(mtcars, mpg)
qstats(mtcars, mpg, am, cyl)
# You can specify your own functions (e.g., median,
# median absolute deviation, minimum, maximum))
qstats(mtcars, mpg, am, gear,
stats = c("median", "mad", "min", "max"))
}
|
/man/qstats.Rd
|
permissive
|
Rkabacoff/qacEDA
|
R
| false
| true
| 1,355
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/qstats.r
\name{qstats}
\alias{qstats}
\title{Summary statistics for a quantitative variable}
\usage{
qstats(data, x, ..., stats = c("n", "mean", "sd"), na.rm = TRUE, digits = 2)
}
\arguments{
\item{data}{data frame}
\item{x}{numeric variable in data (unquoted)}
\item{...}{list of grouping variables}
\item{stats}{statistics to calculate (any function that produces a
numeric value), Default: \code{c("n", "mean", "sd")}}
\item{na.rm}{if \code{TRUE}, delete cases with missing values on x and or grouping
variables, Default: \code{TRUE}}
\item{digits}{number of decimal digits to print, Default: 2}
}
\value{
a data frame, where columns are grouping variables (optional) and
statistics
}
\description{
This function provides descriptive statistics for a quantitative
variable alone or separately by groups. Any function that returns a single
numeric value can bue used.
}
\examples{
# If no keyword arguments are provided, default values are used
qstats(mtcars, mpg, am, gear)
# You can supply as many (or no) grouping variables as needed
qstats(mtcars, mpg)
qstats(mtcars, mpg, am, cyl)
# You can specify your own functions (e.g., median,
# median absolute deviation, minimum, maximum))
qstats(mtcars, mpg, am, gear,
stats = c("median", "mad", "min", "max"))
}
|
#!/usr/bin/env Rscript
args = commandArgs(trailingOnly=TRUE)
if(length(args) < 3){
stop("Usage: combineTable.r <relASVTable.tsv> <repseq.fasta> <taxonomy.tsv>")
}
table <- args[1]
seq <- args[2]
taxonomy <- args[3]
OUT="qiime2_ASV_table.csv"
#load packages, Biostrings_2.46.0
library("Biostrings")
#read required files
table <- read.table(file = table, sep = '\t', comment.char = "", skip=1, header=TRUE) #X.OTU.ID
tax <- read.table(file = taxonomy, sep = '\t', comment.char = "", header=TRUE) #Feature.ID
seq <- readDNAStringSet(seq)
seq <- data.frame(ID=names(seq), sequence=paste(seq))
#check if all ids match
if(!all(seq$ID %in% tax$Feature.ID)) {paste(seq,"and",taxonomy,"dont share all IDs, this is only ok when taxa were excluded.")}
if(!all(seq$ID %in% table$X.OTU.ID)) {stop(paste(seq,"and",table,"dont share all IDs, exit"), call.=FALSE)}
#merge
df <- merge(tax, seq, by.x="Feature.ID", by.y="ID", all.x=FALSE, all.y=TRUE)
df <- merge(df, table, by.x="Feature.ID", by.y="X.OTU.ID", all=TRUE)
#write
print (paste("write",OUT))
write.table(df, file = OUT, row.names=FALSE, sep="\t")
|
/bin/combineTable.r
|
permissive
|
apeltzer/rrna-ampliseq
|
R
| false
| false
| 1,105
|
r
|
#!/usr/bin/env Rscript
args = commandArgs(trailingOnly=TRUE)
if(length(args) < 3){
stop("Usage: combineTable.r <relASVTable.tsv> <repseq.fasta> <taxonomy.tsv>")
}
table <- args[1]
seq <- args[2]
taxonomy <- args[3]
OUT="qiime2_ASV_table.csv"
#load packages, Biostrings_2.46.0
library("Biostrings")
#read required files
table <- read.table(file = table, sep = '\t', comment.char = "", skip=1, header=TRUE) #X.OTU.ID
tax <- read.table(file = taxonomy, sep = '\t', comment.char = "", header=TRUE) #Feature.ID
seq <- readDNAStringSet(seq)
seq <- data.frame(ID=names(seq), sequence=paste(seq))
#check if all ids match
if(!all(seq$ID %in% tax$Feature.ID)) {paste(seq,"and",taxonomy,"dont share all IDs, this is only ok when taxa were excluded.")}
if(!all(seq$ID %in% table$X.OTU.ID)) {stop(paste(seq,"and",table,"dont share all IDs, exit"), call.=FALSE)}
#merge
df <- merge(tax, seq, by.x="Feature.ID", by.y="ID", all.x=FALSE, all.y=TRUE)
df <- merge(df, table, by.x="Feature.ID", by.y="X.OTU.ID", all=TRUE)
#write
print (paste("write",OUT))
write.table(df, file = OUT, row.names=FALSE, sep="\t")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/query.R
\name{projects}
\alias{projects}
\alias{schema}
\alias{fields}
\alias{values}
\alias{query_graphql}
\title{Discover and query Gen3 resources}
\usage{
projects()
schema(as = c("brief", "full"))
fields(type_name, as = c("brief", "full"))
values(type_name, ..., .n = 10)
query_graphql(query)
}
\arguments{
\item{as}{for \code{fields()}, \code{"brief"} returns fields that do not start with
an underscore. \code{"full"} returns all fields.}
\item{type_name}{\code{character(1)} name of the type to be queried.}
\item{...}{\code{character(1)} field(s) to be queried.}
\item{.n}{integer(1) number of records to retieve. The special
value \code{.n = 0} retrieves all records.}
\item{query}{character(1) valid graphql query to be evaluated by
the database.}
}
\value{
\code{projects()} returns a tibble with project_id, id, and
study_description. There are as many rows as there are projects
accessbile to the current user.
\code{schema()} returns a tibble with with a single columm
(\code{"name"}) corresponding to the type names available in Gen3.
\code{fields()} returns a tibble with columns \code{type_name},
\code{field} (name of corresponding fields in type name) and \code{type}
(type of field, e.g., String, Int).
\code{values()} returns a tibble with type_name and field names
as columns, with one row for each record queried.
\code{query_graphql()} returns JSON-like list-of-lists following
the structure of the query, but with terminal data.frame-like
collections simplified to a tibbles.
}
\description{
\code{projects()} returns projects available to the
currently authenticated user
\code{schema()} returns all type names (objects) defined in
the Gen3 schema. Type names form the basis of queries.
\code{fields()} returns fields defined on the type name. A
field has associated values that can be retrieved by queries.
\code{values()} returns values corresponding to fields of
\code{type_name}. Each row represents a record in the database.
\code{query_graphql()} allows arbitrary queries against the
graphql database.
}
\examples{
## Authenticate first
cache <- tools::R_user_dir("Gen3", "cache")
credentials <- file.path(cache, "credentials.json")
## only run examples if credentials file exists
stopifnot(
`no credentials file, cannot authenticate` = file.exists(credentials)
)
authenticate(credentials)
projects()
schema()
fields("subject")
values("subject", "id", "sex")
query <- '{
subject(
project_id: "open_access-1000Genomes"
first: 0
) {
id
sex
population
submitter_id
}
}'
result <- query_graphql(query)
result
}
|
/man/query.Rd
|
no_license
|
nturaga/Gen3
|
R
| false
| true
| 2,708
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/query.R
\name{projects}
\alias{projects}
\alias{schema}
\alias{fields}
\alias{values}
\alias{query_graphql}
\title{Discover and query Gen3 resources}
\usage{
projects()
schema(as = c("brief", "full"))
fields(type_name, as = c("brief", "full"))
values(type_name, ..., .n = 10)
query_graphql(query)
}
\arguments{
\item{as}{for \code{fields()}, \code{"brief"} returns fields that do not start with
an underscore. \code{"full"} returns all fields.}
\item{type_name}{\code{character(1)} name of the type to be queried.}
\item{...}{\code{character(1)} field(s) to be queried.}
\item{.n}{integer(1) number of records to retieve. The special
value \code{.n = 0} retrieves all records.}
\item{query}{character(1) valid graphql query to be evaluated by
the database.}
}
\value{
\code{projects()} returns a tibble with project_id, id, and
study_description. There are as many rows as there are projects
accessbile to the current user.
\code{schema()} returns a tibble with with a single columm
(\code{"name"}) corresponding to the type names available in Gen3.
\code{fields()} returns a tibble with columns \code{type_name},
\code{field} (name of corresponding fields in type name) and \code{type}
(type of field, e.g., String, Int).
\code{values()} returns a tibble with type_name and field names
as columns, with one row for each record queried.
\code{query_graphql()} returns JSON-like list-of-lists following
the structure of the query, but with terminal data.frame-like
collections simplified to a tibbles.
}
\description{
\code{projects()} returns projects available to the
currently authenticated user
\code{schema()} returns all type names (objects) defined in
the Gen3 schema. Type names form the basis of queries.
\code{fields()} returns fields defined on the type name. A
field has associated values that can be retrieved by queries.
\code{values()} returns values corresponding to fields of
\code{type_name}. Each row represents a record in the database.
\code{query_graphql()} allows arbitrary queries against the
graphql database.
}
\examples{
## Authenticate first
cache <- tools::R_user_dir("Gen3", "cache")
credentials <- file.path(cache, "credentials.json")
## only run examples if credentials file exists
stopifnot(
`no credentials file, cannot authenticate` = file.exists(credentials)
)
authenticate(credentials)
projects()
schema()
fields("subject")
values("subject", "id", "sex")
query <- '{
subject(
project_id: "open_access-1000Genomes"
first: 0
) {
id
sex
population
submitter_id
}
}'
result <- query_graphql(query)
result
}
|
# Final Project
library(ggplot2)
library(dplyr)
library(shiny)
library(DT)
library(rsconnect)
library(shinycssloaders)
library(lubridate)
my.server <- function(input, output) {
data <- read.csv("data/movies.csv")
changing_data <- reactive({
check_lang <- input$lang
rating_max <- input$rating[2]
rating_min <- input$rating[1]
check_year <- input$year
data$release_date <- lubridate::mdy(data$release_date)
curr_data <- filter(data, original_language == check_lang) %>%
filter(vote_average <= rating_max) %>% filter(vote_average >= rating_min) %>%
filter(grepl(input$genre, genres) == TRUE)
if (check_year != "") {
curr_data <- filter(curr_data, year(release_date) == check_year)
}
if(input$action != 0) {
num <- nrow(curr_data)
if (num > 10) {
num <- 10
}
curr_data <- sample_n(curr_data, num)
}
num <- nrow(curr_data)
if (num > 10) {
num <- 10
}
curr_data <- sample_n(curr_data, num)
})
output$table <- renderDataTable({
DT::datatable((select(changing_data(), c("original_title", "overview", "runtime", "vote_average"))),
options = list(paging = FALSE), rownames = FALSE)
})
output$plot <- renderPlot({
plot_data <- filter(changing_data(), budget != 0) %>% filter(revenue != 0)
validate(
need(nrow(plot_data) != 0, "No budget/revenue data to plot.")
)
ggplot(plot_data, aes((x = budget), y = (revenue/1000000), label=title)) +
labs(y = "Revenue ($ in millions)", x = "Budget") +
#geom_point()+
geom_text(aes(label=title),hjust=-.1, vjust=.3) +
ggtitle("Budget vs Revenue Comparison") +
theme(plot.title = element_text(face="bold", size=32, hjust=0)) +
theme(axis.title = element_text(face="bold", size=22)) +
theme(panel.background = element_rect(fill = "lightblue",
colour = "lightblue",
size = 0.5, linetype = "solid"))
})
output$gather_info <- renderText({
paste("Data gathered from the movie data set found of kaggle")
})
output$our_info <- renderText({
paste("Created by: Naveen Janarthanan, Camille Munro, Andy Tillotson, Jose Santos")
})
}
shinyServer(my.server)
|
/server.R
|
no_license
|
navjanar/7654321098_finalproj
|
R
| false
| false
| 2,311
|
r
|
# Final Project
library(ggplot2)
library(dplyr)
library(shiny)
library(DT)
library(rsconnect)
library(shinycssloaders)
library(lubridate)
my.server <- function(input, output) {
data <- read.csv("data/movies.csv")
changing_data <- reactive({
check_lang <- input$lang
rating_max <- input$rating[2]
rating_min <- input$rating[1]
check_year <- input$year
data$release_date <- lubridate::mdy(data$release_date)
curr_data <- filter(data, original_language == check_lang) %>%
filter(vote_average <= rating_max) %>% filter(vote_average >= rating_min) %>%
filter(grepl(input$genre, genres) == TRUE)
if (check_year != "") {
curr_data <- filter(curr_data, year(release_date) == check_year)
}
if(input$action != 0) {
num <- nrow(curr_data)
if (num > 10) {
num <- 10
}
curr_data <- sample_n(curr_data, num)
}
num <- nrow(curr_data)
if (num > 10) {
num <- 10
}
curr_data <- sample_n(curr_data, num)
})
output$table <- renderDataTable({
DT::datatable((select(changing_data(), c("original_title", "overview", "runtime", "vote_average"))),
options = list(paging = FALSE), rownames = FALSE)
})
output$plot <- renderPlot({
plot_data <- filter(changing_data(), budget != 0) %>% filter(revenue != 0)
validate(
need(nrow(plot_data) != 0, "No budget/revenue data to plot.")
)
ggplot(plot_data, aes((x = budget), y = (revenue/1000000), label=title)) +
labs(y = "Revenue ($ in millions)", x = "Budget") +
#geom_point()+
geom_text(aes(label=title),hjust=-.1, vjust=.3) +
ggtitle("Budget vs Revenue Comparison") +
theme(plot.title = element_text(face="bold", size=32, hjust=0)) +
theme(axis.title = element_text(face="bold", size=22)) +
theme(panel.background = element_rect(fill = "lightblue",
colour = "lightblue",
size = 0.5, linetype = "solid"))
})
output$gather_info <- renderText({
paste("Data gathered from the movie data set found of kaggle")
})
output$our_info <- renderText({
paste("Created by: Naveen Janarthanan, Camille Munro, Andy Tillotson, Jose Santos")
})
}
shinyServer(my.server)
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/functions_targetHub.R
\name{parse_response}
\alias{parse_response}
\title{Function that checks the status and parses the response.}
\usage{
parse_response(req)
}
\arguments{
\item{req}{json object}
\item{...}{other arguments}
}
\description{
Function that checks the status and parses the response.
}
|
/miRNAtargetpackage.Rcheck/00_pkg_src/miRNAtargetpackage/man/parse_response.Rd
|
no_license
|
camgu844/miRNA
|
R
| false
| false
| 389
|
rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/functions_targetHub.R
\name{parse_response}
\alias{parse_response}
\title{Function that checks the status and parses the response.}
\usage{
parse_response(req)
}
\arguments{
\item{req}{json object}
\item{...}{other arguments}
}
\description{
Function that checks the status and parses the response.
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.