content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
|---|---|---|---|---|---|---|---|---|---|
zakup_zwierzecia <- function(stan_gracza, stan_stada, zwierzak) {
wartosci_zwierzat <- c(R = 1, S = 6, P = 12, C = 36, H = 72, SD = 6, BD = 36)
if (sum(stan_gracza[1:5]*wartosci_zwierzat[1:5]) >= wartosci_zwierzat[zwierzak]) {
return (kup_zwierze_yolo(zwierzak, stan_gracza, stan_stada))
} else {
return (cbind(stan_gracza, stan_stada))
}
}
|
/R/zakup_zwierzecia.R
|
no_license
|
nkneblewska/SuperFarmerRCNK
|
R
| false
| false
| 357
|
r
|
zakup_zwierzecia <- function(stan_gracza, stan_stada, zwierzak) {
wartosci_zwierzat <- c(R = 1, S = 6, P = 12, C = 36, H = 72, SD = 6, BD = 36)
if (sum(stan_gracza[1:5]*wartosci_zwierzat[1:5]) >= wartosci_zwierzat[zwierzak]) {
return (kup_zwierze_yolo(zwierzak, stan_gracza, stan_stada))
} else {
return (cbind(stan_gracza, stan_stada))
}
}
|
library(tidyverse)
library(readr)
library(dplyr)
library(ggplot2)
#Her må man sette sin egen WD
setwd("C:\Users\47958\Desktop\BED2056")
#Henter inn data, som jeg lastet ned og pakket ut. Fjern "#"
#Birth17 <- read_csv("Birth17.txt")
#Birth18 <- read_csv("Birth18.txt")
#Birth19 <- read_csv("Birth19.txt")
################################################################
#Valgte å sortere data etter år
Birth17 <- Birth17 %>% mutate(Year=2017)
Birth18 <- Birth18 %>% mutate(Year=2018)
Birth19 <- Birth19 %>% mutate(Year=2019)
#Samlet data i et datasett
BirthTot <- bind_rows(Birth17,Birth18,Birth19)
#gjør variablene numerisk
BirthTot$BirthMonth <- as.numeric(BirthTot$BirthMonth)
BirthTot$BirthWeight <- as.numeric(BirthTot$BirthWeight)
#sjekekr str
str(BirthTot)
allbirth <- BirthTot %>% group_by(Year, SexOfInfant) %>% mutate(count=row_number()) %>% filter(count==max(count))
dataplotSOF <- ggplot(data = BirthTot, aes(x=SexOfInfant,y=count, fill=SexOfInfant)) + geom_bar(stat="identity")+
theme_hc()+
ylab(expression("Antall fødsler")) +
xlab("Sex of Infant")+
facet_wrap(~Year) +skip_empty_rows(=TRUE)
#gjennomsnittsvekt
gjbirth <- allbirth %>%
group_by(Year,SexOfInfant)%>%
summarise(avgWeight=mean(BirthWeight))
gjbirth
#Fødsler etter ukedager
#Omrangerer på variablene
ukebasis <- allbirth %>%
arrange(Year,SexOfInfant,BirthDayOfWeek)
#datawrangle
ukebasis<-ukebasis%>%
group_by(Year,SexOfInfant,BirthDayOfWeek) %>%
mutate(count=row_number()) %>%
filter(count==max(count))
#plot
ukebasis %>%
ggplot(aes(x=BirthDayOfWeek, y=count,group=SexOfInfant)) +
geom_line(aes(color=SexOfInfant))+
ylab(expression("Fødsler")) +
xlab("Weekday 1=Sunday,7=Monday")
|
/Assginment 7.R
|
no_license
|
edvardberg/homeworkbed2056
|
R
| false
| false
| 1,771
|
r
|
library(tidyverse)
library(readr)
library(dplyr)
library(ggplot2)
#Her må man sette sin egen WD
setwd("C:\Users\47958\Desktop\BED2056")
#Henter inn data, som jeg lastet ned og pakket ut. Fjern "#"
#Birth17 <- read_csv("Birth17.txt")
#Birth18 <- read_csv("Birth18.txt")
#Birth19 <- read_csv("Birth19.txt")
################################################################
#Valgte å sortere data etter år
Birth17 <- Birth17 %>% mutate(Year=2017)
Birth18 <- Birth18 %>% mutate(Year=2018)
Birth19 <- Birth19 %>% mutate(Year=2019)
#Samlet data i et datasett
BirthTot <- bind_rows(Birth17,Birth18,Birth19)
#gjør variablene numerisk
BirthTot$BirthMonth <- as.numeric(BirthTot$BirthMonth)
BirthTot$BirthWeight <- as.numeric(BirthTot$BirthWeight)
#sjekekr str
str(BirthTot)
allbirth <- BirthTot %>% group_by(Year, SexOfInfant) %>% mutate(count=row_number()) %>% filter(count==max(count))
dataplotSOF <- ggplot(data = BirthTot, aes(x=SexOfInfant,y=count, fill=SexOfInfant)) + geom_bar(stat="identity")+
theme_hc()+
ylab(expression("Antall fødsler")) +
xlab("Sex of Infant")+
facet_wrap(~Year) +skip_empty_rows(=TRUE)
#gjennomsnittsvekt
gjbirth <- allbirth %>%
group_by(Year,SexOfInfant)%>%
summarise(avgWeight=mean(BirthWeight))
gjbirth
#Fødsler etter ukedager
#Omrangerer på variablene
ukebasis <- allbirth %>%
arrange(Year,SexOfInfant,BirthDayOfWeek)
#datawrangle
ukebasis<-ukebasis%>%
group_by(Year,SexOfInfant,BirthDayOfWeek) %>%
mutate(count=row_number()) %>%
filter(count==max(count))
#plot
ukebasis %>%
ggplot(aes(x=BirthDayOfWeek, y=count,group=SexOfInfant)) +
geom_line(aes(color=SexOfInfant))+
ylab(expression("Fødsler")) +
xlab("Weekday 1=Sunday,7=Monday")
|
# set up graphics parameters
colerz <- topo.colors(nsp)
rcol <- "darkslateblue"
linez <- rep(1:6, 100) # enough for 600 species for now
lspbyrs <- 1
lresbyrs <- 2
lwd=2
#years to plot for within-year dynamics
plotyrs<- seq(1, nyrs, by=floor(nyrs/8))
|
/R/sourcefiles/zarchive/getGraphParms.R
|
no_license
|
lizzieinvancouver/temporalvar
|
R
| false
| false
| 251
|
r
|
# set up graphics parameters
colerz <- topo.colors(nsp)
rcol <- "darkslateblue"
linez <- rep(1:6, 100) # enough for 600 species for now
lspbyrs <- 1
lresbyrs <- 2
lwd=2
#years to plot for within-year dynamics
plotyrs<- seq(1, nyrs, by=floor(nyrs/8))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/extractModelStructure.R
\name{as.MxRAMModel}
\alias{as.MxRAMModel}
\title{as.MxRAMModel: Create an MxRAMModel from a lavaan or OpenMx model object}
\usage{
as.MxRAMModel(model, exogenous = TRUE, standardized = FALSE, ...)
}
\arguments{
\item{model}{a path modeling object (see details for supported types)}
\item{exogenous}{Include exogenous variables? (default TRUE)}
\item{standardized}{Transform all variables into standardized forms? (default FALSE)}
}
\value{
An MxRAMModel containing the same path structure as the original model
}
\description{
Transforms a model into an MxRAMModel
}
\details{
This function is experimental, and may have bugs.
At present, it does not handle constraints, groups, or pretty much anything else
that's at all complicated.
Currently supported: OpenMx RAM models (easy!), lavaan and blavaan models
}
|
/man/as.MxRAMModel.Rd
|
no_license
|
trbrick/MICr
|
R
| false
| true
| 919
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/extractModelStructure.R
\name{as.MxRAMModel}
\alias{as.MxRAMModel}
\title{as.MxRAMModel: Create an MxRAMModel from a lavaan or OpenMx model object}
\usage{
as.MxRAMModel(model, exogenous = TRUE, standardized = FALSE, ...)
}
\arguments{
\item{model}{a path modeling object (see details for supported types)}
\item{exogenous}{Include exogenous variables? (default TRUE)}
\item{standardized}{Transform all variables into standardized forms? (default FALSE)}
}
\value{
An MxRAMModel containing the same path structure as the original model
}
\description{
Transforms a model into an MxRAMModel
}
\details{
This function is experimental, and may have bugs.
At present, it does not handle constraints, groups, or pretty much anything else
that's at all complicated.
Currently supported: OpenMx RAM models (easy!), lavaan and blavaan models
}
|
# Load in the R package
library(rpart)
my_tree_three <- rpart(Survived ~ Pclass + Sex + Age + SibSp + Parch + Fare + Embarked,
data = train, method = "class", control = rpart.control(minsplit = 50, cp = 0))
# Visualize the decision tree using plot() and text()
plot(my_tree_three)
text(my_tree_three)
# Load in the packages to build a fancy plot
library(rattle)
library(rpart.plot)
library(RColorBrewer)
# Visualize my_tree_three
fancyRpartPlot(my_tree_three)
# Make predictions on the test set
my_prediction <- predict(my_tree_three, test, type = "class")
# Finish the data.frame() call
my_solution <- data.frame(PassengerId = test$PassengerId, Survived = my_prediction)
# Use nrow() on my_solution
nrow(my_solution)
# Finish the write.csv() call
write.csv(my_solution, file = "5th_prediction.csv", row.names = FALSE)
|
/HW1/src/other/6.R
|
no_license
|
Sohrabbeig/DataMiningCourse
|
R
| false
| false
| 849
|
r
|
# Load in the R package
library(rpart)
my_tree_three <- rpart(Survived ~ Pclass + Sex + Age + SibSp + Parch + Fare + Embarked,
data = train, method = "class", control = rpart.control(minsplit = 50, cp = 0))
# Visualize the decision tree using plot() and text()
plot(my_tree_three)
text(my_tree_three)
# Load in the packages to build a fancy plot
library(rattle)
library(rpart.plot)
library(RColorBrewer)
# Visualize my_tree_three
fancyRpartPlot(my_tree_three)
# Make predictions on the test set
my_prediction <- predict(my_tree_three, test, type = "class")
# Finish the data.frame() call
my_solution <- data.frame(PassengerId = test$PassengerId, Survived = my_prediction)
# Use nrow() on my_solution
nrow(my_solution)
# Finish the write.csv() call
write.csv(my_solution, file = "5th_prediction.csv", row.names = FALSE)
|
get_api2_collocations <- function(term = "consciousness", years = c(1700, 1799)) {
api_call_start <- "https://vm0175.kaj.pouta.csc.fi/ecco-search2/collocations"
api_call_term <- paste0("?term=", term)
api_call_options <- "&sumScaling=DF&minSumFreq=100&limit=100&pretty&localScaling=FLAT"
api_call_years <- paste0("&limitQuery=pubDate:[", years[1], "0000%20TO%20", years[2], "0000]")
api_call <- paste0(api_call_start, api_call_term, api_call_options, api_call_years)
}
|
/collocations-shinyapp/api2_collocatins_functions.R
|
no_license
|
COMHIS/estc-turin
|
R
| false
| false
| 479
|
r
|
get_api2_collocations <- function(term = "consciousness", years = c(1700, 1799)) {
api_call_start <- "https://vm0175.kaj.pouta.csc.fi/ecco-search2/collocations"
api_call_term <- paste0("?term=", term)
api_call_options <- "&sumScaling=DF&minSumFreq=100&limit=100&pretty&localScaling=FLAT"
api_call_years <- paste0("&limitQuery=pubDate:[", years[1], "0000%20TO%20", years[2], "0000]")
api_call <- paste0(api_call_start, api_call_term, api_call_options, api_call_years)
}
|
#
# This is the server logic of a Shiny web application. You can run the
# application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(ggplot2)
library(dplyr)
# select columns to be used in the analysis
cardata <- mtcars[,c(1:2,6,10)]
# Define server logic required to draw a plot
shinyServer(function(input, output) {
output$distPlot <- renderPlot({
# select diamonds depending of user input
cardata <- filter(cardata, grepl(input$gear, gear), grepl(input$cyl, cyl))
# build linear regression model
fit <- lm( mpg~wt, cardata)
# predicts the price
pred <- predict(fit, newdata = data.frame(wt = input$wt,
gear = input$gear,
cyl = input$cyl))
# Drow the plot using ggplot2
plot <- ggplot(data=cardata, aes(x=wt, y = mpg))+
geom_point(aes(color = gear), alpha = 0.3)+
geom_smooth(method = "lm")+
geom_vline(xintercept = input$wt, color = "red")+
geom_hline(yintercept = pred, color = "green")
plot
})
output$result <- renderText({
# renders the text for the prediction below the graph
cardata <- filter(mtcars, grepl(input$gear, gear), grepl(input$cyl, cyl))
fit <- lm( mpg~wt, cardata)
pred <- predict(fit, newdata = data.frame(wt = input$wt,
gear = input$gear,
cyl = input$cyl))
res <- paste(round(pred, digits = 2), "mpg")
res
})
})
|
/server.R
|
no_license
|
Vmudsam/DevelopingDataProject-CourseProject
|
R
| false
| false
| 1,674
|
r
|
#
# This is the server logic of a Shiny web application. You can run the
# application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(ggplot2)
library(dplyr)
# select columns to be used in the analysis
cardata <- mtcars[,c(1:2,6,10)]
# Define server logic required to draw a plot
shinyServer(function(input, output) {
output$distPlot <- renderPlot({
# select diamonds depending of user input
cardata <- filter(cardata, grepl(input$gear, gear), grepl(input$cyl, cyl))
# build linear regression model
fit <- lm( mpg~wt, cardata)
# predicts the price
pred <- predict(fit, newdata = data.frame(wt = input$wt,
gear = input$gear,
cyl = input$cyl))
# Drow the plot using ggplot2
plot <- ggplot(data=cardata, aes(x=wt, y = mpg))+
geom_point(aes(color = gear), alpha = 0.3)+
geom_smooth(method = "lm")+
geom_vline(xintercept = input$wt, color = "red")+
geom_hline(yintercept = pred, color = "green")
plot
})
output$result <- renderText({
# renders the text for the prediction below the graph
cardata <- filter(mtcars, grepl(input$gear, gear), grepl(input$cyl, cyl))
fit <- lm( mpg~wt, cardata)
pred <- predict(fit, newdata = data.frame(wt = input$wt,
gear = input$gear,
cyl = input$cyl))
res <- paste(round(pred, digits = 2), "mpg")
res
})
})
|
##########################
# Body condition analyses
##########################
# load required libraries
library(tidyverse);library(nlme);
library(MuMIn);library(stringi)
library(spdep);library(INLA)
library(marcoUtils);library(mgcv)
library(stringi);library(gtools)
library(XLConnect);library(ncf)
library(RColorBrewer)
# source required functions and info for labeling graphs
marcofunctions<-c("fitaddmod.R","fitintmod.R","scattervalues.R","spautowrap.R")
for (f in 1:length(marcofunctions)) {source(marcofunctions[f])}
# load data file
load("vulturedata.rda")
# rescale body condition index
vulturedata %>%
group_by(english) %>%
mutate(ScaledMassIndex=scale(ScaledMassIndex)) -> dat2
# rescale predictors
dat2 %>%
group_by(english) %>%
tidyr::gather(variable,value,-c(site.no,english,SiteID,X,Y,Date,ScaledMassIndex)) %>%
group_by(site.no,variable,SiteID,english,X,Y,Date,ScaledMassIndex) %>%
summarise(value=mean(value)) %>%
group_by(english,variable) %>%
mutate(value=scale(value)) %>%
spread(variable,value) ->dat3
# prepare wbv data
subset(dat3,english=="White-backed vulture") ->wbv1
coordinates(wbv1)<-~X+Y
proj4string(wbv1)<-latlon
wbv1<-spTransform(wbv1,CRS(ml))
as.data.frame(coordinates(wbv1)) %>%
rename(e=X,n=Y) ->coorsml
data.frame(coorsml,as.data.frame(wbv1)) %>%
mutate(NDVI_36m2=NDVI_36m^2,NDVI_24m2=NDVI_24m^2,NDVI_12m2=NDVI_12m^2,
NDVI_3m2=NDVI_3m^2,NDVI_1m2=NDVI_1m^2,NDVI_36m3=NDVI_36m^3,
NDVI_24m3=NDVI_24m^3,NDVI_12m3=NDVI_12m^3,NDVI_3m3=NDVI_3m^3,
NDVI_1m3=NDVI_1m^3,PA_cover2=PA_cover^2,PA_cover3=PA_cover^3,
Year2=Year^2,Year3=Year^3) ->wbv1
wbv1$e<-wbv1$e/1000
wbv1$n<-wbv1$n/1000
# prepare lfv data
subset(dat3,english=="Lappet-faced vulture") ->lfv1
coordinates(lfv1)<-~X+Y
proj4string(lfv1)<-latlon
lfv1<-spTransform(lfv1,CRS(ml))
as.data.frame(coordinates(lfv1)) %>%
rename(e=X,n=Y) ->coorsml
data.frame(coorsml,as.data.frame(lfv1)) %>%
mutate(NDVI_36m2=NDVI_36m^2,NDVI_24m2=NDVI_24m^2,NDVI_12m2=NDVI_12m^2,
NDVI_3m2=NDVI_3m^2,NDVI_1m2=NDVI_1m^2,NDVI_36m3=NDVI_36m^3,
NDVI_24m3=NDVI_24m^3,NDVI_12m3=NDVI_12m^3,NDVI_3m3=NDVI_3m^3,
NDVI_1m3=NDVI_1m^3,PA_cover2=PA_cover^2,PA_cover3=PA_cover^3,
Year2=Year^2,Year3=Year^3) ->lfv1
lfv1$e<-lfv1$e/1000
lfv1$n<-lfv1$n/1000
# Lapped-faced vulture
# construct mesh
mesh1<-inla.mesh.2d(as.matrix(lfv1[,c("e","n")]),max.edge =20,cutoff=40)
# define weight factors
A5<-inla.spde.make.A(mesh1,loc=as.matrix(lfv1[,c("e","n")]))
# define the spde
spde<-inla.spde2.matern(mesh1,alpha=2)
# define spatial field
w.index<-inla.spde.make.index(name="w",n.spde = spde$n.spde,
n.group=1,n.repl=1)
# define the stack
lfvstack<-inla.stack(tag="fit",data=list(y=lfv1$ScaledMassIndex),
A=list(1,1,A5),effects=list(Intercept=rep(1,dim(lfv1)[1]),
X=lfv1[,c(names(lfv1))],w=w.index))
# Fit additive models for Lappet-faced vulture
# linear terms + siteID + gaussian random field
lfvres2<-fitaddmod(indat=lfvstack,ranef='+f(w,model=spde)+f(SiteID,model="iid")',
quad=FALSE)
save(lfvres2,file="/mnt/data1tb/Dropbox/Andrea/ndvi/resultstochoose/Rfiles/lfvres2")
# Interaction models for Lappet-faced vulture (PA_cover *NDVI) (=input raw dataframes!)
lfvres3<-fitintmod(indat=lfv1,ranef='+f(w,model=spde)+f(SiteID,model="iid")',
inplot=lfv)
save(lfvres3,file="/mnt/data1tb/Dropbox/Andrea/ndvi/resultstochoose/Rfiles/lfvres3")
# White-backed vulture
# construct mesh
mesh1<-inla.mesh.2d(as.matrix(wbv1[,c("e","n")]),max.edge =20,cutoff=40)
# define weight factors
A5<-inla.spde.make.A(mesh1,loc=as.matrix(wbv1[,c("e","n")]))
# define the spde
spde<-inla.spde2.matern(mesh1,alpha=2)
# define spatial field
w.index<-inla.spde.make.index(name="w",n.spde = spde$n.spde,
n.group=1,n.repl=1)
# define the stack
wbvstack<-inla.stack(tag="fit",data=list(y=wbv1$ScaledMassIndex),
A=list(1,1,A5),effects=list(Intercept=rep(1,dim(wbv1)[1]),
X=wbv1[,c(names(wbv1))],w=w.index))
# linear terms + siteID + gaussian random field
wbvres2<-fitaddmod(indat=wbvstack,ranef='+f(w,model=spde)+f(SiteID,model="iid")',
quad=FALSE)
save(wbvres2,file="/mnt/data1tb/Dropbox/Andrea/ndvi/resultstochoose/Rfiles/wbvres2")
# ---Coefficient tables
wbvres2[[1]] %>%
group_by(modelset) %>%
filter(variable!="Intercept") %>%
dplyr::select(c(mean,variable,modelset,X0.025quant,X0.975quant)) %>%
rename(lowerCI=X0.025quant,higherCI=X0.975quant,model.name=modelset) %>%
mutate(Species="White-backed vulture") %>%
dplyr::select(Species,model.name,variable,mean,lowerCI,higherCI) ->lin.coef.wbv
writeWorksheetToFile(data=lin.coef.wbv,file="/mnt/data1tb/Dropbox/Andrea/ndvi/resultstochoose/coefs/coefs1.xlsx",
sheet = "Sheet1", header = TRUE,startCol=1,
startRow=1,styleAction =XLC$"STYLE_ACTION.NONE")
# lappet-faced v.: additive models
lfvres2[[1]] %>%
group_by(modelset) %>%
filter(variable!="Intercept") %>%
dplyr::select(c(mean,variable,modelset,X0.025quant,X0.975quant)) %>%
rename(lowerCI=X0.025quant,higherCI=X0.975quant,model.name=modelset) %>%
mutate(Species="Lappet-faced vulture") %>%
dplyr::select(Species,model.name,variable,mean,lowerCI,higherCI) ->lin.coef.lfv
writeWorksheetToFile(data=lin.coef.lfv,file="/mnt/data1tb/Dropbox/Andrea/ndvi/resultstochoose/coefs/coefs1.xlsx",
sheet = "Sheet1", header = TRUE,startCol=1,
startRow=16,styleAction =XLC$"STYLE_ACTION.NONE")
# lappet-faced v.: interaction models
lfvres3[[1]] %>%
group_by(modelset) %>%
filter(variable!="Intercept") %>%
dplyr::select(c(mean,variable,modelset,X0.025quant,X0.975quant)) %>%
rename(lowerCI=X0.025quant,higherCI=X0.975quant,model.name=modelset) %>%
mutate(Species="Lappet-faced vulture") %>%
dplyr::select(Species,model.name,variable,mean,lowerCI,higherCI) ->int.coef.lfv
writeWorksheetToFile(data=int.coef.lfv,file="/mnt/data1tb/Dropbox/Andrea/ndvi/resultstochoose/coefs/coefs1.xlsx",
sheet = "Sheet1", header = TRUE,startCol=8,
startRow=1,styleAction =XLC$"STYLE_ACTION.NONE")
# --- Spatial autocorrelation analysis
# additive models
spautowrap(indat=lfvres2[[3]],group="model",coor=lfv1[,c("e","n")]) %>%
mutate(species="Lappet-faced vulture") %>%
bind_rows(spautowrap(indat=wbvres2[[3]],group="model",coor=wbv1[,c("e","n")]) %>%
mutate(species="White-backed vulture")) %>%
ggplot(data=.,aes(x=distance,y=correlation))+facet_grid(model~species)+
geom_line(size=0.9)+theme_bw()+ylim(-1,1)+xlab("Distance (km)")+
ylab("Correlation")+theme(text=element_text(size=12,colour="black"),axis.text=element_text(colour="black"))+
theme(strip.text.x=element_text(size=12,face="bold"),
strip.text.y=element_text(size=6.5,face="bold")) ->additivesac
ggsave(additivesac,file="/mnt/data1tb/Dropbox/Andrea/ndvi/resultstochoose/figuresNOGRF/additivesac.png",
width=6,height=8,dpi=400)
# Interaction models
spautowrap(indat=lfvres3[[3]],group="model",coor=lfv1[,c("e","n")]) %>%
inner_join(modlookup2) %>%
mutate(species="Lappet-faced vulture") %>%
ggplot(data=.,aes(x=distance,y=correlation))+facet_grid(model.lab~species,scale="fixed")+
geom_line(size=0.9)+theme_bw()+ylim(-1,1)+xlab("Distance (km)")+
ylab("Correlation")+theme(text=element_text(size=12,colour="black"),axis.text=element_text(colour="black"))+
theme(strip.text.x=element_text(size=12,face="bold"),
strip.text.y=element_text(size=8,face="bold")) ->intsac
# combine plots together and save results
combined<-plot_grid(additivesac,intsac,ncol = 2)
ggsave(combined,file="/mnt/data1tb/Dropbox/Andrea/ndvi/resultstochoose/figures/SACall.png",
width=9,height=8,dpi=400)
# --- Posterior distributions
# Lappet-faced vulture
lfvres2[[2]] %>%
filter(var.name!="Intercept") %>%
inner_join(modlookup) %>%
inner_join(varlookup) %>%
rename(variable.lab=label) %>%
bind_rows(lfvres3[[2]] %>%
inner_join(varlookup1) %>%
rename(variable.lab=model.lab) %>%
inner_join(modlookupint)) %>%
mutate(model.lab=factor(model.lab,levels=c(unique(model.lab)))) %>%
ggplot(data=.,aes(x=x,y=y))+facet_wrap(model.lab~variable.lab,scale="free",ncol=4)+
geom_line(size=0.9)+theme_bw()+geom_vline(xintercept =0, linetype="dotted")+
ylab("Density")+xlab("Predictor")+theme(strip.text=element_text(face="bold",size=9,
colour="black"),axis.text =element_text(face="bold",size=10,colour="black"),
axis.title=element_text(face="bold",size=12,colour="black")) ->post.lfv
# save plot
ggsave(post.lfv,file="/mnt/data1tb/Dropbox/Andrea/ndvi/resultstochoose/figures/posteriorsLFV.png",
width=8,height=8.5,dpi=400)
# White-backed vulture
wbvres2[[2]] %>%
filter(var.name!="Intercept") %>%
inner_join(modlookup) %>%
inner_join(varlookup) %>%
ggplot(data=.,aes(x=x,y=y))+facet_wrap(model.lab~label,scale="free")+
geom_line(size=0.9)+theme_bw()+geom_vline(xintercept =0, linetype="dotted")+
ylab("Density")+xlab("Predictor")+theme(strip.text=element_text(face="bold",size=10,
colour="black"),axis.text =element_text(face="bold",size=12,colour="black"),
axis.title=element_text(face="bold",size=12,colour="black")) ->post.wbv.add
# save plot
ggsave(post.wbv.add,file="/mnt/data1tb/Dropbox/Andrea/ndvi/resultstochoose/figures/posteriorsWBVadditivemodels.png",
width=8,height=8.5,dpi=400)
# ---scatterplots
# create dataframe with values
lfv.sct<-scattervalues(indat=lfv,ranef='+f(w,model=spde)+f(SiteID,model="iid")')
wbv.sct<-scattervalues(indat=wbv,ranef='+f(w,model=spde)+f(SiteID,model="iid")')
# PA cover
# fitted values
lfv.sct[[2]] %>%
filter(pred.name=="PA_cover") %>%
mutate(species="Lappet-faced vulture") %>%
bind_rows(wbv.sct[[2]] %>%
filter(pred.name=="PA_cover") %>%
mutate(species="White-backed vulture")) -> pa.fitted
# coefficients
lfv.sct[[1]] %>%
filter(pred.name=="PA_cover") %>%
mutate(species="Lappet-faced vulture") %>%
bind_rows(wbv.sct[[1]] %>%
filter(pred.name=="PA_cover") %>%
mutate(species="White-backed vulture")) -> pa.coefs
# raw data
lfv %>%
mutate(species="Lappet-faced vulture") %>%
bind_rows(wbv %>%
mutate(species="White-backed vulture")) %>%
dplyr::select(species,ScaledMassIndex,SiteID,PA_cover) ->pa.raw
# scatterplot
ggplot(data=pa.fitted,aes(x=predictor,y=pred))+
geom_point(data=pa.raw,aes(y=ScaledMassIndex,x=PA_cover),
size=4,col="black",alpha=0.3)+theme_bw()+
geom_abline(data=pa.coefs,aes(intercept=Int,slope=Slope),size=1)+
geom_ribbon(aes(ymin=min,ymax=max),linetype=2,alpha= 0.3)+
facet_grid(~species)+
theme(strip.text.x = element_text(size=15, face="bold"),
strip.text.y = element_text(size=12, face="bold"))+
scale_x_continuous(breaks = seq(from=0,to=100, by = 25))+
theme(text = element_text(size=15),axis.text.x = element_text(colour="black"),
axis.text.y = element_text(colour="black")) +xlab("PA cover")+
ylab("Scaled Body Mass Index")->pa.scat
# save plot
ggsave(filename="/mnt/data1tb/Dropbox/Andrea/ndvi/resultstochoose/figures/MassPAJune18.png",
plot =pa.scat,width=10,height=8,dpi=400)
# NDVI
# fitted values
lfv.sct[[2]] %>%
filter(pred.name!="PA_cover") %>%
mutate(species="Lappet-faced vulture") %>%
bind_rows(wbv.sct[[2]] %>%
filter(pred.name!="PA_cover") %>%
mutate(species="White-backed vulture")) %>%
left_join(data.frame(pred.name=c("NDVI_1m","NDVI_3m","NDVI_12m","NDVI_24m","NDVI_36m"),
ndvilabel=factor(c("1 month","3 months","1 year"," 2 years"," 3 years"),
levels=c("1 month","3 months","1 year"," 2 years"," 3 years"))))-> ndvi.fitted
# coefficients
lfv.sct[[1]] %>%
filter(pred.name!="PA_cover") %>%
mutate(species="Lappet-faced vulture") %>%
bind_rows(wbv.sct[[1]] %>%
filter(pred.name!="PA_cover") %>%
mutate(species="White-backed vulture")) %>%
left_join(data.frame(pred.name=c("NDVI_1m","NDVI_3m","NDVI_12m","NDVI_24m","NDVI_36m"),
ndvilabel=factor(c("1 month","3 months","1 year"," 2 years"," 3 years"),
levels=c("1 month","3 months","1 year"," 2 years"," 3 years"))))-> ndvi.coefs
# raw data
lfv %>%
mutate(Species="Lappet-faced vulture") %>%
bind_rows(wbv %>%
mutate(Species="White-backed vulture")) %>%
dplyr::select(english,ScaledMassIndex,SiteID,NDVI_1m,NDVI_3m,NDVI_12m,NDVI_24m,NDVI_36m,PA_cover) %>%
tidyr::gather(ndviname,ndvivalue,-c(english,ScaledMassIndex,SiteID)) %>%
inner_join(data.frame(ndviname=c("NDVI_1m","NDVI_3m","NDVI_12m","NDVI_24m","NDVI_36m"),
ndvilabel=factor(c("1 month","3 months","1 year"," 2 years"," 3 years"),
levels=c("1 month","3 months","1 year"," 2 years"," 3 years")))) %>%
dplyr::rename(species=english)->combined.m2
# scatterplot
ggplot(data=ndvi.fitted,aes(x=predictor,y=pred))+
geom_point(data=combined.m2,aes(y=ScaledMassIndex,x=ndvivalue),
size=2,col="black",alpha=0.1)+theme_bw()+
facet_grid(ndvilabel~species,scales="free",space="free")+
geom_abline(data=ndvi.coefs,aes(intercept=Int,slope=Slope),size=0.8)+
geom_ribbon(aes(ymin=min,ymax=max),linetype=2,alpha= 0.3)+
theme(text = element_text(size=15),axis.text = element_text(colour="black"))+
theme(strip.text= element_text(size=12, face="bold"))+ylab("Scaled Mass Index")+xlab("NDVI")->ndvi.scat
# save plot
ggsave(filename="/mnt/data1tb/Dropbox/Andrea/ndvi/resultstochoose/figures/MassNDVIJune18.png",
plot =ndvi.scat,width=7,height=8,dpi=400)
# ---Fancy interaction plot (NDVI*PA_cover)
cols <- colorRampPalette(rev(brewer.pal(11, "RdYlBu")))
lfvres3[[4]] %>%
inner_join(modlookup1) %>%
ggplot(data=.,aes(x=NDVI,y=PA_cover,z=pred))+
geom_raster(aes(fill=pred))+facet_wrap(~model.lab,scale="free",ncol=2)+
scale_fill_gradientn(colours = cols(30))+theme_bw()+
labs(fill = "Scaled Body Mass Index")+
ylab("Protected Area Cover")+xlab("NDVI")->intplot
ggsave(filename="/mnt/data1tb/Dropbox/Andrea/ndvi/resultstochoose/figures/Interaction1.png",
plot =intplot,width=7.5,height=6.5,dpi=400)
|
/Analyses.R
|
no_license
|
drmarcogir/vultures
|
R
| false
| false
| 14,593
|
r
|
##########################
# Body condition analyses
##########################
# load required libraries
library(tidyverse);library(nlme);
library(MuMIn);library(stringi)
library(spdep);library(INLA)
library(marcoUtils);library(mgcv)
library(stringi);library(gtools)
library(XLConnect);library(ncf)
library(RColorBrewer)
# source required functions and info for labeling graphs
marcofunctions<-c("fitaddmod.R","fitintmod.R","scattervalues.R","spautowrap.R")
for (f in 1:length(marcofunctions)) {source(marcofunctions[f])}
# load data file
load("vulturedata.rda")
# rescale body condition index
vulturedata %>%
group_by(english) %>%
mutate(ScaledMassIndex=scale(ScaledMassIndex)) -> dat2
# rescale predictors
dat2 %>%
group_by(english) %>%
tidyr::gather(variable,value,-c(site.no,english,SiteID,X,Y,Date,ScaledMassIndex)) %>%
group_by(site.no,variable,SiteID,english,X,Y,Date,ScaledMassIndex) %>%
summarise(value=mean(value)) %>%
group_by(english,variable) %>%
mutate(value=scale(value)) %>%
spread(variable,value) ->dat3
# prepare wbv data
subset(dat3,english=="White-backed vulture") ->wbv1
coordinates(wbv1)<-~X+Y
proj4string(wbv1)<-latlon
wbv1<-spTransform(wbv1,CRS(ml))
as.data.frame(coordinates(wbv1)) %>%
rename(e=X,n=Y) ->coorsml
data.frame(coorsml,as.data.frame(wbv1)) %>%
mutate(NDVI_36m2=NDVI_36m^2,NDVI_24m2=NDVI_24m^2,NDVI_12m2=NDVI_12m^2,
NDVI_3m2=NDVI_3m^2,NDVI_1m2=NDVI_1m^2,NDVI_36m3=NDVI_36m^3,
NDVI_24m3=NDVI_24m^3,NDVI_12m3=NDVI_12m^3,NDVI_3m3=NDVI_3m^3,
NDVI_1m3=NDVI_1m^3,PA_cover2=PA_cover^2,PA_cover3=PA_cover^3,
Year2=Year^2,Year3=Year^3) ->wbv1
wbv1$e<-wbv1$e/1000
wbv1$n<-wbv1$n/1000
# prepare lfv data
subset(dat3,english=="Lappet-faced vulture") ->lfv1
coordinates(lfv1)<-~X+Y
proj4string(lfv1)<-latlon
lfv1<-spTransform(lfv1,CRS(ml))
as.data.frame(coordinates(lfv1)) %>%
rename(e=X,n=Y) ->coorsml
data.frame(coorsml,as.data.frame(lfv1)) %>%
mutate(NDVI_36m2=NDVI_36m^2,NDVI_24m2=NDVI_24m^2,NDVI_12m2=NDVI_12m^2,
NDVI_3m2=NDVI_3m^2,NDVI_1m2=NDVI_1m^2,NDVI_36m3=NDVI_36m^3,
NDVI_24m3=NDVI_24m^3,NDVI_12m3=NDVI_12m^3,NDVI_3m3=NDVI_3m^3,
NDVI_1m3=NDVI_1m^3,PA_cover2=PA_cover^2,PA_cover3=PA_cover^3,
Year2=Year^2,Year3=Year^3) ->lfv1
lfv1$e<-lfv1$e/1000
lfv1$n<-lfv1$n/1000
# Lapped-faced vulture
# construct mesh
mesh1<-inla.mesh.2d(as.matrix(lfv1[,c("e","n")]),max.edge =20,cutoff=40)
# define weight factors
A5<-inla.spde.make.A(mesh1,loc=as.matrix(lfv1[,c("e","n")]))
# define the spde
spde<-inla.spde2.matern(mesh1,alpha=2)
# define spatial field
w.index<-inla.spde.make.index(name="w",n.spde = spde$n.spde,
n.group=1,n.repl=1)
# define the stack
lfvstack<-inla.stack(tag="fit",data=list(y=lfv1$ScaledMassIndex),
A=list(1,1,A5),effects=list(Intercept=rep(1,dim(lfv1)[1]),
X=lfv1[,c(names(lfv1))],w=w.index))
# Fit additive models for Lappet-faced vulture
# linear terms + siteID + gaussian random field
lfvres2<-fitaddmod(indat=lfvstack,ranef='+f(w,model=spde)+f(SiteID,model="iid")',
quad=FALSE)
save(lfvres2,file="/mnt/data1tb/Dropbox/Andrea/ndvi/resultstochoose/Rfiles/lfvres2")
# Interaction models for Lappet-faced vulture (PA_cover *NDVI) (=input raw dataframes!)
lfvres3<-fitintmod(indat=lfv1,ranef='+f(w,model=spde)+f(SiteID,model="iid")',
inplot=lfv)
save(lfvres3,file="/mnt/data1tb/Dropbox/Andrea/ndvi/resultstochoose/Rfiles/lfvres3")
# White-backed vulture
# construct mesh
mesh1<-inla.mesh.2d(as.matrix(wbv1[,c("e","n")]),max.edge =20,cutoff=40)
# define weight factors
A5<-inla.spde.make.A(mesh1,loc=as.matrix(wbv1[,c("e","n")]))
# define the spde
spde<-inla.spde2.matern(mesh1,alpha=2)
# define spatial field
w.index<-inla.spde.make.index(name="w",n.spde = spde$n.spde,
n.group=1,n.repl=1)
# define the stack
wbvstack<-inla.stack(tag="fit",data=list(y=wbv1$ScaledMassIndex),
A=list(1,1,A5),effects=list(Intercept=rep(1,dim(wbv1)[1]),
X=wbv1[,c(names(wbv1))],w=w.index))
# linear terms + siteID + gaussian random field
wbvres2<-fitaddmod(indat=wbvstack,ranef='+f(w,model=spde)+f(SiteID,model="iid")',
quad=FALSE)
save(wbvres2,file="/mnt/data1tb/Dropbox/Andrea/ndvi/resultstochoose/Rfiles/wbvres2")
# ---Coefficient tables
wbvres2[[1]] %>%
group_by(modelset) %>%
filter(variable!="Intercept") %>%
dplyr::select(c(mean,variable,modelset,X0.025quant,X0.975quant)) %>%
rename(lowerCI=X0.025quant,higherCI=X0.975quant,model.name=modelset) %>%
mutate(Species="White-backed vulture") %>%
dplyr::select(Species,model.name,variable,mean,lowerCI,higherCI) ->lin.coef.wbv
writeWorksheetToFile(data=lin.coef.wbv,file="/mnt/data1tb/Dropbox/Andrea/ndvi/resultstochoose/coefs/coefs1.xlsx",
sheet = "Sheet1", header = TRUE,startCol=1,
startRow=1,styleAction =XLC$"STYLE_ACTION.NONE")
# lappet-faced v.: additive models
lfvres2[[1]] %>%
group_by(modelset) %>%
filter(variable!="Intercept") %>%
dplyr::select(c(mean,variable,modelset,X0.025quant,X0.975quant)) %>%
rename(lowerCI=X0.025quant,higherCI=X0.975quant,model.name=modelset) %>%
mutate(Species="Lappet-faced vulture") %>%
dplyr::select(Species,model.name,variable,mean,lowerCI,higherCI) ->lin.coef.lfv
writeWorksheetToFile(data=lin.coef.lfv,file="/mnt/data1tb/Dropbox/Andrea/ndvi/resultstochoose/coefs/coefs1.xlsx",
sheet = "Sheet1", header = TRUE,startCol=1,
startRow=16,styleAction =XLC$"STYLE_ACTION.NONE")
# lappet-faced v.: interaction models
lfvres3[[1]] %>%
group_by(modelset) %>%
filter(variable!="Intercept") %>%
dplyr::select(c(mean,variable,modelset,X0.025quant,X0.975quant)) %>%
rename(lowerCI=X0.025quant,higherCI=X0.975quant,model.name=modelset) %>%
mutate(Species="Lappet-faced vulture") %>%
dplyr::select(Species,model.name,variable,mean,lowerCI,higherCI) ->int.coef.lfv
writeWorksheetToFile(data=int.coef.lfv,file="/mnt/data1tb/Dropbox/Andrea/ndvi/resultstochoose/coefs/coefs1.xlsx",
sheet = "Sheet1", header = TRUE,startCol=8,
startRow=1,styleAction =XLC$"STYLE_ACTION.NONE")
# --- Spatial autocorrelation analysis
# additive models
spautowrap(indat=lfvres2[[3]],group="model",coor=lfv1[,c("e","n")]) %>%
mutate(species="Lappet-faced vulture") %>%
bind_rows(spautowrap(indat=wbvres2[[3]],group="model",coor=wbv1[,c("e","n")]) %>%
mutate(species="White-backed vulture")) %>%
ggplot(data=.,aes(x=distance,y=correlation))+facet_grid(model~species)+
geom_line(size=0.9)+theme_bw()+ylim(-1,1)+xlab("Distance (km)")+
ylab("Correlation")+theme(text=element_text(size=12,colour="black"),axis.text=element_text(colour="black"))+
theme(strip.text.x=element_text(size=12,face="bold"),
strip.text.y=element_text(size=6.5,face="bold")) ->additivesac
ggsave(additivesac,file="/mnt/data1tb/Dropbox/Andrea/ndvi/resultstochoose/figuresNOGRF/additivesac.png",
width=6,height=8,dpi=400)
# Interaction models
spautowrap(indat=lfvres3[[3]],group="model",coor=lfv1[,c("e","n")]) %>%
inner_join(modlookup2) %>%
mutate(species="Lappet-faced vulture") %>%
ggplot(data=.,aes(x=distance,y=correlation))+facet_grid(model.lab~species,scale="fixed")+
geom_line(size=0.9)+theme_bw()+ylim(-1,1)+xlab("Distance (km)")+
ylab("Correlation")+theme(text=element_text(size=12,colour="black"),axis.text=element_text(colour="black"))+
theme(strip.text.x=element_text(size=12,face="bold"),
strip.text.y=element_text(size=8,face="bold")) ->intsac
# combine plots together and save results
combined<-plot_grid(additivesac,intsac,ncol = 2)
ggsave(combined,file="/mnt/data1tb/Dropbox/Andrea/ndvi/resultstochoose/figures/SACall.png",
width=9,height=8,dpi=400)
# --- Posterior distributions
# Lappet-faced vulture
lfvres2[[2]] %>%
filter(var.name!="Intercept") %>%
inner_join(modlookup) %>%
inner_join(varlookup) %>%
rename(variable.lab=label) %>%
bind_rows(lfvres3[[2]] %>%
inner_join(varlookup1) %>%
rename(variable.lab=model.lab) %>%
inner_join(modlookupint)) %>%
mutate(model.lab=factor(model.lab,levels=c(unique(model.lab)))) %>%
ggplot(data=.,aes(x=x,y=y))+facet_wrap(model.lab~variable.lab,scale="free",ncol=4)+
geom_line(size=0.9)+theme_bw()+geom_vline(xintercept =0, linetype="dotted")+
ylab("Density")+xlab("Predictor")+theme(strip.text=element_text(face="bold",size=9,
colour="black"),axis.text =element_text(face="bold",size=10,colour="black"),
axis.title=element_text(face="bold",size=12,colour="black")) ->post.lfv
# save plot
ggsave(post.lfv,file="/mnt/data1tb/Dropbox/Andrea/ndvi/resultstochoose/figures/posteriorsLFV.png",
width=8,height=8.5,dpi=400)
# White-backed vulture
wbvres2[[2]] %>%
filter(var.name!="Intercept") %>%
inner_join(modlookup) %>%
inner_join(varlookup) %>%
ggplot(data=.,aes(x=x,y=y))+facet_wrap(model.lab~label,scale="free")+
geom_line(size=0.9)+theme_bw()+geom_vline(xintercept =0, linetype="dotted")+
ylab("Density")+xlab("Predictor")+theme(strip.text=element_text(face="bold",size=10,
colour="black"),axis.text =element_text(face="bold",size=12,colour="black"),
axis.title=element_text(face="bold",size=12,colour="black")) ->post.wbv.add
# save plot
ggsave(post.wbv.add,file="/mnt/data1tb/Dropbox/Andrea/ndvi/resultstochoose/figures/posteriorsWBVadditivemodels.png",
width=8,height=8.5,dpi=400)
# ---scatterplots
# create dataframe with values
lfv.sct<-scattervalues(indat=lfv,ranef='+f(w,model=spde)+f(SiteID,model="iid")')
wbv.sct<-scattervalues(indat=wbv,ranef='+f(w,model=spde)+f(SiteID,model="iid")')
# PA cover
# fitted values
lfv.sct[[2]] %>%
filter(pred.name=="PA_cover") %>%
mutate(species="Lappet-faced vulture") %>%
bind_rows(wbv.sct[[2]] %>%
filter(pred.name=="PA_cover") %>%
mutate(species="White-backed vulture")) -> pa.fitted
# coefficients
lfv.sct[[1]] %>%
filter(pred.name=="PA_cover") %>%
mutate(species="Lappet-faced vulture") %>%
bind_rows(wbv.sct[[1]] %>%
filter(pred.name=="PA_cover") %>%
mutate(species="White-backed vulture")) -> pa.coefs
# raw data
lfv %>%
mutate(species="Lappet-faced vulture") %>%
bind_rows(wbv %>%
mutate(species="White-backed vulture")) %>%
dplyr::select(species,ScaledMassIndex,SiteID,PA_cover) ->pa.raw
# scatterplot
ggplot(data=pa.fitted,aes(x=predictor,y=pred))+
geom_point(data=pa.raw,aes(y=ScaledMassIndex,x=PA_cover),
size=4,col="black",alpha=0.3)+theme_bw()+
geom_abline(data=pa.coefs,aes(intercept=Int,slope=Slope),size=1)+
geom_ribbon(aes(ymin=min,ymax=max),linetype=2,alpha= 0.3)+
facet_grid(~species)+
theme(strip.text.x = element_text(size=15, face="bold"),
strip.text.y = element_text(size=12, face="bold"))+
scale_x_continuous(breaks = seq(from=0,to=100, by = 25))+
theme(text = element_text(size=15),axis.text.x = element_text(colour="black"),
axis.text.y = element_text(colour="black")) +xlab("PA cover")+
ylab("Scaled Body Mass Index")->pa.scat
# save plot
ggsave(filename="/mnt/data1tb/Dropbox/Andrea/ndvi/resultstochoose/figures/MassPAJune18.png",
plot =pa.scat,width=10,height=8,dpi=400)
# NDVI
# fitted values
lfv.sct[[2]] %>%
filter(pred.name!="PA_cover") %>%
mutate(species="Lappet-faced vulture") %>%
bind_rows(wbv.sct[[2]] %>%
filter(pred.name!="PA_cover") %>%
mutate(species="White-backed vulture")) %>%
left_join(data.frame(pred.name=c("NDVI_1m","NDVI_3m","NDVI_12m","NDVI_24m","NDVI_36m"),
ndvilabel=factor(c("1 month","3 months","1 year"," 2 years"," 3 years"),
levels=c("1 month","3 months","1 year"," 2 years"," 3 years"))))-> ndvi.fitted
# coefficients
lfv.sct[[1]] %>%
filter(pred.name!="PA_cover") %>%
mutate(species="Lappet-faced vulture") %>%
bind_rows(wbv.sct[[1]] %>%
filter(pred.name!="PA_cover") %>%
mutate(species="White-backed vulture")) %>%
left_join(data.frame(pred.name=c("NDVI_1m","NDVI_3m","NDVI_12m","NDVI_24m","NDVI_36m"),
ndvilabel=factor(c("1 month","3 months","1 year"," 2 years"," 3 years"),
levels=c("1 month","3 months","1 year"," 2 years"," 3 years"))))-> ndvi.coefs
# raw data
lfv %>%
mutate(Species="Lappet-faced vulture") %>%
bind_rows(wbv %>%
mutate(Species="White-backed vulture")) %>%
dplyr::select(english,ScaledMassIndex,SiteID,NDVI_1m,NDVI_3m,NDVI_12m,NDVI_24m,NDVI_36m,PA_cover) %>%
tidyr::gather(ndviname,ndvivalue,-c(english,ScaledMassIndex,SiteID)) %>%
inner_join(data.frame(ndviname=c("NDVI_1m","NDVI_3m","NDVI_12m","NDVI_24m","NDVI_36m"),
ndvilabel=factor(c("1 month","3 months","1 year"," 2 years"," 3 years"),
levels=c("1 month","3 months","1 year"," 2 years"," 3 years")))) %>%
dplyr::rename(species=english)->combined.m2
# scatterplot
ggplot(data=ndvi.fitted,aes(x=predictor,y=pred))+
geom_point(data=combined.m2,aes(y=ScaledMassIndex,x=ndvivalue),
size=2,col="black",alpha=0.1)+theme_bw()+
facet_grid(ndvilabel~species,scales="free",space="free")+
geom_abline(data=ndvi.coefs,aes(intercept=Int,slope=Slope),size=0.8)+
geom_ribbon(aes(ymin=min,ymax=max),linetype=2,alpha= 0.3)+
theme(text = element_text(size=15),axis.text = element_text(colour="black"))+
theme(strip.text= element_text(size=12, face="bold"))+ylab("Scaled Mass Index")+xlab("NDVI")->ndvi.scat
# save plot
ggsave(filename="/mnt/data1tb/Dropbox/Andrea/ndvi/resultstochoose/figures/MassNDVIJune18.png",
plot =ndvi.scat,width=7,height=8,dpi=400)
# ---Fancy interaction plot (NDVI*PA_cover)
cols <- colorRampPalette(rev(brewer.pal(11, "RdYlBu")))
lfvres3[[4]] %>%
inner_join(modlookup1) %>%
ggplot(data=.,aes(x=NDVI,y=PA_cover,z=pred))+
geom_raster(aes(fill=pred))+facet_wrap(~model.lab,scale="free",ncol=2)+
scale_fill_gradientn(colours = cols(30))+theme_bw()+
labs(fill = "Scaled Body Mass Index")+
ylab("Protected Area Cover")+xlab("NDVI")->intplot
ggsave(filename="/mnt/data1tb/Dropbox/Andrea/ndvi/resultstochoose/figures/Interaction1.png",
plot =intplot,width=7.5,height=6.5,dpi=400)
|
#############################################
#### 13장. 웹 데이터 수집 (정적 웹크롤링)####
#############################################
install.packages('rvest')
installed.packages()
library(rvest)
url <- 'https://movie.naver.com/movie/point/af/list.nhn'
html <- read_html(url, encoding = 'utf-8')
html
# 영화제목; .title .movie (.color_b)
nodes <- html_nodes(html, '.title .movie')
as.character(nodes)
title <- html_text(nodes)
title
# 해당 영화 안내 페이지
movieInfo <- html_attr(nodes, 'href')
movieInfo <- paste0(url, movieInfo)
movieInfo
# 평점 .list_netizen_score em
nodes <- html_nodes(html, '.list_netizen_score em')
nodes
point <- html_text(nodes)
point
# 리뷰 : td.title
nodes <- html_nodes(html, 'td.title')
as.character(nodes)[1]
text <- html_text(nodes)
text
text <- gsub('\t','',text)
text <- gsub('\n','',text)
text
review <- unlist(strsplit(text, '중[0-9]{1,2}'))[seq(2,20,2)]
review <- gsub('신고','',review)
review
df <- data.frame(title, movieInfo, point, review)
df
View(df)
page <- cbind(title, movieInfo)
page <- cbind(page, point)
page <- cbind(page, review)
View(page)
write.csv(page, 'outData/movie_review.csv')
#### 여러 페이지 정적 웹 크롤링(영화 리뷰 1-100페이지 까지 )
home <- 'https://movie.naver.com/movie/point/af/list.nhn'
site = 'https://movie.naver.com/movie/point/af/list.nhn?&page='
movie.review <- NULL
for(i in 1:100){
url <- paste0(site,i)
html <- read_html(url, encoding = 'utf-8')
# 영화제목; .title .movie (.color_b)
nodes <- html_nodes(html, '.title .movie')
title <- html_text(nodes)
# 해당 영화 안내 페이지
movieInfo <- html_attr(nodes, 'href')
movieInfo <- paste0(home, movieInfo)
# 평점 .list_netizen_score em
nodes <- html_nodes(html, '.list_netizen_score em')
point <- html_text(nodes)
# 리뷰 : td.title
nodes <- html_nodes(html, 'td.title')
text <- html_text(nodes)
text <- gsub('\t','',text)
text <- gsub('\n','',text)
review <- unlist(strsplit(text, '중[0-9]{1,2}'))[seq(2,20,2)]
review <- gsub('신고','',review)
df <- data.frame(title, movieInfo, point, review)
movie.review <- rbind(movie.review, df)
}
View(movie.review)
write.csv(movie.review, 'outData/movie_review.csv', row.names = F)
?write.csv
# 영화 리뷰
library(KoNLP)
library(stringr)
library(ggplot2)
library(dplyr)
library(wordcloud)
class(movie.review)
movie <- movie.review
str(movie)
movie$point <- as.numeric(movie$point)
result <- movie %>%
group_by(title) %>%
summarise(point.mean = mean(point),
point.sum = sum(point),
n=n()) %>%
arrange(-point.mean, -point.sum) %>%
filter(n>10) %>%
head(10)
result
ggplot(result, aes(x=point.mean,y=reorder(title,point.mean))) +
geom_col(aes(fill=title)) +
geom_text(aes(label = paste('총점:',point.sum,'평균:',round(point.mean,1))),hjust=1) +
theme(legend.position = 'none')
# 평점평균이 높은 10개의 리뷰 내용만 뽑아 최빈단어 & 워드클라우드
result$title
movie1 <- movie %>% # result$title 에 있는 영화만 추출
filter(title %in% result$title)
View(movie1)
nrow(movie1)
useNIADic()
# 특수문자 없애기
movie1$review <- gsub('\\W',' ',movie1$review)
movie1$review <- gsub('[ㄱ-ㅎㅏ-ㅣ]',' ',movie1$review)
View(movie1)
# 명사 추출
nouns <- extractNoun(movie1$review)
# 워드 카운트
wordcount <- table(unlist(nouns))
head(wordcount)
View(wordcount)
df_word <- as.data.frame(wordcount, stringsAsFactors = F)
df_word <- rename(df_word, word=Var1, freq=Freq)
df_word <- filter(df_word, nchar(word)>1& word!='영화')
head(df_word)
# 최빈 단어 10개 뽑기
top10 <- df_word[order(df_word$freq, decreasing = T),][1:10,]
top10
pal <- brewer.pal(8, 'Dark2')
# 워드 클라우드
wordcloud(words = df_word$word,
freq = df_word$freq,
min.freq = 5,
max.words = 150,
random.order = F,
rot.per = 0.1,
scale = c(5,0.5),
colors = pal)
|
/src/06_R/ch13_웹데이터수집.R
|
no_license
|
a124124/bigdata
|
R
| false
| false
| 4,047
|
r
|
#############################################
#### 13장. 웹 데이터 수집 (정적 웹크롤링)####
#############################################
install.packages('rvest')
installed.packages()
library(rvest)
url <- 'https://movie.naver.com/movie/point/af/list.nhn'
html <- read_html(url, encoding = 'utf-8')
html
# 영화제목; .title .movie (.color_b)
nodes <- html_nodes(html, '.title .movie')
as.character(nodes)
title <- html_text(nodes)
title
# 해당 영화 안내 페이지
movieInfo <- html_attr(nodes, 'href')
movieInfo <- paste0(url, movieInfo)
movieInfo
# 평점 .list_netizen_score em
nodes <- html_nodes(html, '.list_netizen_score em')
nodes
point <- html_text(nodes)
point
# 리뷰 : td.title
nodes <- html_nodes(html, 'td.title')
as.character(nodes)[1]
text <- html_text(nodes)
text
text <- gsub('\t','',text)
text <- gsub('\n','',text)
text
review <- unlist(strsplit(text, '중[0-9]{1,2}'))[seq(2,20,2)]
review <- gsub('신고','',review)
review
df <- data.frame(title, movieInfo, point, review)
df
View(df)
page <- cbind(title, movieInfo)
page <- cbind(page, point)
page <- cbind(page, review)
View(page)
write.csv(page, 'outData/movie_review.csv')
#### 여러 페이지 정적 웹 크롤링(영화 리뷰 1-100페이지 까지 )
home <- 'https://movie.naver.com/movie/point/af/list.nhn'
site = 'https://movie.naver.com/movie/point/af/list.nhn?&page='
movie.review <- NULL
for(i in 1:100){
url <- paste0(site,i)
html <- read_html(url, encoding = 'utf-8')
# 영화제목; .title .movie (.color_b)
nodes <- html_nodes(html, '.title .movie')
title <- html_text(nodes)
# 해당 영화 안내 페이지
movieInfo <- html_attr(nodes, 'href')
movieInfo <- paste0(home, movieInfo)
# 평점 .list_netizen_score em
nodes <- html_nodes(html, '.list_netizen_score em')
point <- html_text(nodes)
# 리뷰 : td.title
nodes <- html_nodes(html, 'td.title')
text <- html_text(nodes)
text <- gsub('\t','',text)
text <- gsub('\n','',text)
review <- unlist(strsplit(text, '중[0-9]{1,2}'))[seq(2,20,2)]
review <- gsub('신고','',review)
df <- data.frame(title, movieInfo, point, review)
movie.review <- rbind(movie.review, df)
}
View(movie.review)
write.csv(movie.review, 'outData/movie_review.csv', row.names = F)
?write.csv
# 영화 리뷰
library(KoNLP)
library(stringr)
library(ggplot2)
library(dplyr)
library(wordcloud)
class(movie.review)
movie <- movie.review
str(movie)
movie$point <- as.numeric(movie$point)
result <- movie %>%
group_by(title) %>%
summarise(point.mean = mean(point),
point.sum = sum(point),
n=n()) %>%
arrange(-point.mean, -point.sum) %>%
filter(n>10) %>%
head(10)
result
ggplot(result, aes(x=point.mean,y=reorder(title,point.mean))) +
geom_col(aes(fill=title)) +
geom_text(aes(label = paste('총점:',point.sum,'평균:',round(point.mean,1))),hjust=1) +
theme(legend.position = 'none')
# 평점평균이 높은 10개의 리뷰 내용만 뽑아 최빈단어 & 워드클라우드
result$title
movie1 <- movie %>% # result$title 에 있는 영화만 추출
filter(title %in% result$title)
View(movie1)
nrow(movie1)
useNIADic()
# 특수문자 없애기
movie1$review <- gsub('\\W',' ',movie1$review)
movie1$review <- gsub('[ㄱ-ㅎㅏ-ㅣ]',' ',movie1$review)
View(movie1)
# 명사 추출
nouns <- extractNoun(movie1$review)
# 워드 카운트
wordcount <- table(unlist(nouns))
head(wordcount)
View(wordcount)
df_word <- as.data.frame(wordcount, stringsAsFactors = F)
df_word <- rename(df_word, word=Var1, freq=Freq)
df_word <- filter(df_word, nchar(word)>1& word!='영화')
head(df_word)
# 최빈 단어 10개 뽑기
top10 <- df_word[order(df_word$freq, decreasing = T),][1:10,]
top10
pal <- brewer.pal(8, 'Dark2')
# 워드 클라우드
wordcloud(words = df_word$word,
freq = df_word$freq,
min.freq = 5,
max.words = 150,
random.order = F,
rot.per = 0.1,
scale = c(5,0.5),
colors = pal)
|
# carichiamo i pacchetti necessari
install.packages("devtools")
library(devtools)
devtools::install_github("statsbomb/StatsBombR", force = TRUE)
devtools::install_github("FCrSTATS/SBpitch")
# se non caricare dplyr, installatelo e poi library(dplyr)
library(tidyverse)
library(StatsBombR)
library(SBpitch)
# tutte i dati con le competizioni disponibili
Comp<-FreeCompetitions()
# filtriamo per la competizione che ci interessa
Comp<-FreeCompetitions()%>%
filter(competition_id==11, season_name=="2019/2020")
# carichiamo le partite (ci mette tanto)
Matches<-FreeMatches(Comp)
StatsBombData<-StatsBombFreeEvents(MatchesDF = Matches, Parallel = T)
# puliamo i dati
StatsBombData = allclean(StatsBombData)
# filtra per una singola partita
d1<-StatsBombData%>%
filter(match_id == ***, type.name == "Pass", team.name == "***")
#crea il campo
create_Pitch()
# aggiungi i passaggi
geom_point(data = ***, aes(x = ****, y = ****))
#uniamo i passaggi
geom_point(data = ***, aes(x = ****, y = ****))+
geom_segment(data = ***, aes(x = ****, y = ****, xend = ****, yend = ****))
# dove aggiungere "arrow"
arrow = arrow(length = unit(0.08,"inches"))
#aggiusta alpha
# colora per rosso
#L'asse y non è corretto nella funzione create_pitch...
#quindi se tracciate i passaggi di un terzino sinistro si vedrà sulla destra.
# aggiungete dopo geomn_segment
scale_y_reverse()
#aggiungete i titoli
labs(title = "Aggiungi team 1",
subtitle = "vs team 2")
#infine potete filtare per un giocatore solo
d1<-StatsBombData%>%
filter(match_id == 2275096, type.name == "Pass", team.name == "Arsenal WFC", player.name == "Leah Williamson")
|
/R_workshop/02_Lab2_StatsBomb_pass.R
|
no_license
|
FEM-modena/D4SI
|
R
| false
| false
| 1,648
|
r
|
# carichiamo i pacchetti necessari
install.packages("devtools")
library(devtools)
devtools::install_github("statsbomb/StatsBombR", force = TRUE)
devtools::install_github("FCrSTATS/SBpitch")
# se non caricare dplyr, installatelo e poi library(dplyr)
library(tidyverse)
library(StatsBombR)
library(SBpitch)
# tutte i dati con le competizioni disponibili
Comp<-FreeCompetitions()
# filtriamo per la competizione che ci interessa
Comp<-FreeCompetitions()%>%
filter(competition_id==11, season_name=="2019/2020")
# carichiamo le partite (ci mette tanto)
Matches<-FreeMatches(Comp)
StatsBombData<-StatsBombFreeEvents(MatchesDF = Matches, Parallel = T)
# puliamo i dati
StatsBombData = allclean(StatsBombData)
# filtra per una singola partita
d1<-StatsBombData%>%
filter(match_id == ***, type.name == "Pass", team.name == "***")
#crea il campo
create_Pitch()
# aggiungi i passaggi
geom_point(data = ***, aes(x = ****, y = ****))
#uniamo i passaggi
geom_point(data = ***, aes(x = ****, y = ****))+
geom_segment(data = ***, aes(x = ****, y = ****, xend = ****, yend = ****))
# dove aggiungere "arrow"
arrow = arrow(length = unit(0.08,"inches"))
#aggiusta alpha
# colora per rosso
#L'asse y non è corretto nella funzione create_pitch...
#quindi se tracciate i passaggi di un terzino sinistro si vedrà sulla destra.
# aggiungete dopo geomn_segment
scale_y_reverse()
#aggiungete i titoli
labs(title = "Aggiungi team 1",
subtitle = "vs team 2")
#infine potete filtare per un giocatore solo
d1<-StatsBombData%>%
filter(match_id == 2275096, type.name == "Pass", team.name == "Arsenal WFC", player.name == "Leah Williamson")
|
#!/usr/bin/env Rscript
# print usage
usage <- function() {
cat(
'usage: mad.R <file>
mad.R
author: Colby Chiang (cc2qe@virginia.edu)
description: calculate median absolute deviation from a column of numbers
positional arguments:
file File with one column of numerical values [stdin]
')
}
# compute R from linear regression
args <- commandArgs(trailingOnly=TRUE)
file <- args[1]
filename <- basename(file)
# Check input args
# stdin if no file
if (is.na(file)) {
# print help
if (isatty(stdin())) {
usage()
quit(save='no', status=1)
}
else {
file <- file('stdin')
filename <- 'stdin'
}
}
# read input data
x <- matrix(scan(file, what='raw', sep='\t', quiet=TRUE), byrow=TRUE, ncol=1)
class(x) <- 'numeric'
# calculate mad
med <- median(x[,1])
mad <- mad(x[,1])
cat(med, mad, sep='\t')
cat('\n')
|
/bin/mad.R
|
permissive
|
xtmgah/voir
|
R
| false
| false
| 837
|
r
|
#!/usr/bin/env Rscript
# print usage
usage <- function() {
cat(
'usage: mad.R <file>
mad.R
author: Colby Chiang (cc2qe@virginia.edu)
description: calculate median absolute deviation from a column of numbers
positional arguments:
file File with one column of numerical values [stdin]
')
}
# compute R from linear regression
args <- commandArgs(trailingOnly=TRUE)
file <- args[1]
filename <- basename(file)
# Check input args
# stdin if no file
if (is.na(file)) {
# print help
if (isatty(stdin())) {
usage()
quit(save='no', status=1)
}
else {
file <- file('stdin')
filename <- 'stdin'
}
}
# read input data
x <- matrix(scan(file, what='raw', sep='\t', quiet=TRUE), byrow=TRUE, ncol=1)
class(x) <- 'numeric'
# calculate mad
med <- median(x[,1])
mad <- mad(x[,1])
cat(med, mad, sep='\t')
cat('\n')
|
#' Create a history tibble for a onenode tree.
#'
#' A helper function to create_onenode_tree to create clean,
#' uniform tibbles that keep track of the history of the tree.
#'
#' @param z a list containing vectors q and p and a stepsize h representing a point in phase space plus a stepsize.
#' @param depth the depth in the whole NUTS tree that this node sits at
#' @param H the Hamiltonian value at this point in phase space
#' @param valid_subtree whether the subtree at that depth was valid
#' @param uturn whether a uturn occurred at that node
#' @param integrator_error either NA, "divergence", or "newton"
#' @param num_grad number of likelihood gradient evaluations it took to get that step
#' @param num_hess number of likelihood hessian evaluations it took to get that step
#' @param num_hess_vec number of likelihood hessian-vector product evaluations it took to get that step
#' @param num_newton number of Newton iterations it took to get that step
#'
#' @return A tibble with a single row that represents a node in the tree including its depth, energy value, position, and whether the step was invalid
#' @export
#'
#' @examples
create_onenode_hist <- function(z, depth, H0, H, valid_subtree, uturn, integrator_error, num_grad, num_hess, num_hess_vec, num_newton) {
D <- length(z$q)
q <- matrix(z$q, nrow = 1) %>% as_tibble %>% set_names(paste0("q",1:D))
p <- matrix(z$p, nrow = 1) %>% as_tibble %>% set_names(paste0("p",1:D))
tibble(depth = depth, h = z$h, H0 = H0, H = H,
valid_subtree = valid_subtree,
uturn = uturn,
integrator_error = integrator_error) %>%
mutate(num_grad = num_grad,
num_hess = num_hess,
num_hess_vec = num_hess_vec,
num_newton = num_newton) %>%
bind_cols(bind_cols(q, p))
}
#' Create one node tree.
#'
#' This function is akin to a constructor. It makes sure
#' we create trees that uniformly have the same entries with the same names.
#'
#' @param z a list containing vectors q and p and a stepsize h representing a point in phase space plus a stepsize.
#' @param depth the depth in the whole NUTS tree that this node sits at
#' @param H the Hamiltonian value at this point in phase space
#' @param valid_subtree whether the subtree at that depth was valid
#' @param uturn whether a uturn occurred at that node
#' @param integrator_error either NA, "divergence", or "newton"
#' @param num_grad number of likelihood gradient evaluations it took to get that step
#' @param num_hess number of likelihood hessian evaluations it took to get that step
#' @param num_hess_vec number of likelihood hessian-vector product evaluations it took to get that step
#' @param num_newton number of Newton iterations it took to get that step
#' @param DEBUG if this is on a tibble keeping track of the history of the node will be returned as well
#'
#' @return A one node tree which is eseentially a list which several attributes such as depth and whether the tree is valid.
#' @export
#'
#' @examples
create_onenode_tree <- function(z, depth, H0, H, valid_subtree, uturn, integrator_error, num_grad, num_hess, num_hess_vec, num_newton, DEBUG) {
hist <- NULL
if (DEBUG) {
hist <- create_onenode_hist(z, depth, H0, H, valid_subtree, uturn, integrator_error, num_grad, num_hess, num_hess_vec, num_newton)
}
list(depth = depth,
valid = valid_subtree,
integrator_error = integrator_error,
coordinate_uturn = rep(FALSE, length(z$q)),
log_w = H0-H,
rho = z$p,
z_rep = z,
z_minus = z,
z_minus_1 = NULL,
z_minus_2 = NULL,
z_plus = z,
z_plus_1 = NULL,
z_plus_2 = NULL,
num_grad = num_grad,
num_hess = num_hess,
num_hess_vec = num_hess_vec,
num_newton = num_newton,
hist = hist)
}
#' Build tree
#'
#' Build a NUTS tree starting from z0. If depth is 0, then this is just a single node.
#' If depth is 1, then it's two nodes. Depth is j then 2^j nodes. Tree are built recursively
#' e.g. if we need a depth 2 tree which has 4 nodes we'll build 2 trees of depth 1 and join
#' them together.
#'
#' Sometimes a tree can be invalid, either because there was a problem with the
#' integrator or because a U-Turn was detected. In this case the tree is marked as invalid and building
#' of the tree ceases.
#'
#' @param z0 Initial point to start from. Should contain q, p, h
#' @param z_1 z_{-1} Previous point. Useful for determining a guess of z_{1}
#' @param z_1 z_{-2} Previous, previous point. Useful for determining a guess of z_{1}
#' @param depth Number of levels of tree.
#' @param direction Direction we'd like to build tree in (forwards or backwards)
#' @param integrate_step a function that integrates a single step
#' @param DEBUG Flag to determine whether we return tibble that includes history of points
#'
#' @return A list reprenting a tree
#' @export
#'
#' @examples
build_tree <- function(depth, z0, z_1, z_2, direction, ham_system, H0, integrate_step, DEBUG = FALSE) {
new_tree <- NULL
# base case (take a single step)
if(depth == 0){
integrator_result <- integrate_step(z0, z_1, z_2, direction, ham_system, H0)
new_tree <- create_onenode_tree(z = integrator_result$z1,
depth = depth,
H0 = H0,
H = ham_system$compute_H(integrator_result$z1),
valid_subtree = is.na(integrator_result$integrator_error),
uturn = FALSE,
integrator_error = integrator_result$integrator_error,
num_grad = integrator_result$num_grad,
num_hess = integrator_result$num_hess,
num_hess_vec = integrator_result$num_hess_vec,
num_newton = integrator_result$num_newton,
DEBUG = DEBUG)
}
# recursion
else{
inner_subtree <- build_tree(depth-1, z0, z_1, z_2, direction, ham_system, H0, integrate_step, DEBUG)
# only build outer subtree and tack it on if inner subtree was valid. otherwise
# just return the inner_subtree
if (inner_subtree$valid) {
# assume direciton is forward in which case we build outer subtree starting
# from z_plus. If direction is backward then we start from z_minus
z0.outer <- inner_subtree$z_plus
if (direction == -1) {
z0.outer <- inner_subtree$z_minus
}
# build outer_subtree and join even if it's invalid because we might
# want to view its history.
outer_subtree <- build_tree(depth-1, z0.outer, z_1, z_2, direction, ham_system, H0, integrate_step, DEBUG)
new_tree <- join_subtrees(inner_subtree, outer_subtree, direction, biased_progressive_sampling = FALSE, ham_system, DEBUG)
} else {
new_tree <- inner_subtree
}
}
# if we're in debug mode update the depth of the subtree in the history tibble. note that this has to be
# done here in build_tree because join_trees is also used in the main NUTS call where we don't want these depths to be updated in this manner
if (DEBUG) {
depth_ <- depth
new_tree$hist <- new_tree$hist %>%
mutate(depth = depth_) %>%
mutate(valid_subtree = new_tree$valid)
}
new_tree
}
#' Join two subtrees.
#'
#' Joins an inner subtree and an outer subtree.
#'
#' This function is called either in sampling where we tack on a new subtree to our current tree
#' The other place where it's called is in build_tree() where we create two trees and join them.
#'
#' We can always assume both trees passed in are non-null but we can't always assume they're the
#' same size. We can assume the inner_tree is valid, but not the outer. The outer subtree could have stopped early because it was invalid. In which case
#' the representative sample will just be from the inner_subtree
#'
#' The represenative sample is in in accordance with Betancourt's continuous sampling of subtrees,
#' where every tree has a representative node z_rep. For a 1-node tree, the representative is that one node.
#' For a two node tree we sample a representative from the two nodes. This sampling is
#' described in the function sample_new_represenative().
#'
#' @param inner_subtree The valid and non-null subtree created first.
#' @param outer_subtree The subtree created second as an extension of the end of the inner subtree.
#' @param direction the direction the tree were build determines if outer is added on the plus side or minus side
#' @param ham_system the hamiltonian system of the problem
#' @param biased_progressive_sampling whether to sample biasedly sample the new tree (should only be done in the original call to build_tree() called from NUTS)
#' @param DEBUG whether to include the history tibble
#'
#' @return A new joined tree.
#' @export
#'
#' @examples
join_subtrees <- function(inner_subtree, outer_subtree, direction, biased_progressive_sampling, ham_system, DEBUG) {
# returned tree starts as copy of inner_subtree.
tree <- inner_subtree
tree$depth <- outer_subtree$depth + 1
tree$log_w <- log(exp(inner_subtree$log_w) + exp(outer_subtree$log_w))
# only update representative if outer_subtree is valid
if(outer_subtree$valid) {
if(biased_progressive_sampling) {
tree$z_rep <- sample_new_representative_biasedly(inner_subtree, outer_subtree)
} else{
tree$z_rep <- sample_new_representative_uniformly(inner_subtree, outer_subtree)
}
}
# update z_plus, z_plus_1, z_plus_2, z_minus, z_minus_1, z_minus_2. note if depth of the
# new joined tree == 1 then these are set to NULL so we manually set them. if depth == 2
# we'll have four nodes but neither of the subtree will join will have z_plus_2 and z_minus_2
# so we need to set those manually as well
if (direction == 1) {
tree$z_plus = outer_subtree$z_plus
tree$z_plus_1 = outer_subtree$z_plus_1
tree$z_plus_2 = outer_subtree$z_plus_2
} else {
tree$z_minus = outer_subtree$z_minus
tree$z_minus_1 = outer_subtree$z_minus_1
tree$z_minus_2 = outer_subtree$z_minus_2
}
if (tree$depth == 1) {
tree$z_plus_1 = tree$z_minus
tree$z_minus_1 = tree$z_plus
}
if (tree$depth == 2) {
if (direction == 1) {
tree$z_plus_2 = inner_subtree$z_plus
tree$z_minus_2 = outer_subtree$z_minus
} else {
tree$z_plus_2 = outer_subtree$z_plus
tree$z_minus_2 = inner_subtree$z_minus
}
}
# check to see if new joined tree is valid. This only happens if both subtrees
# are valid and if the u-turn criteria is met between z_plus and z_minus
both_subtrees_valid <- inner_subtree$valid & outer_subtree$valid
tree$coordinate_uturn <- update_coordinate_uturn(tree, ham_system)
nouturn_criteria_met <- check_uturn_criteria(tree, ham_system)
#nouturn_criteria_met <- !all(tree$coordinate_uturn)
if(both_subtrees_valid & nouturn_criteria_met) {
tree$valid = TRUE
} else {
tree$valid = FALSE
}
# update the integrator error if there was one
if(!outer_subtree$valid & !is.na(outer_subtree$integrator_error)) {
tree$integrator_error <- outer_subtree$integrator_error
} else {
tree$integrator_error <- as.character(NA)
}
# update number of evals
tree$num_grad <- sum(inner_subtree$num_grad, outer_subtree$num_grad, na.rm = TRUE)
tree$num_hess <- sum(inner_subtree$num_hess, outer_subtree$num_hess, na.rm = TRUE)
tree$num_hess_vec <- sum(inner_subtree$num_hess_vec, outer_subtree$num_hess_vec, na.rm = TRUE)
tree$num_newton <- sum(inner_subtree$num_newton, outer_subtree$num_newton, na.rm = TRUE)
# update hist if we're in debug mode
if (DEBUG) {
tree$hist <- join_tree_histories(inner_subtree, outer_subtree, direction, nouturn_criteria_met, both_subtrees_valid)
}
tree
}
join_tree_histories <- function(inner_subtree, outer_subtree, direction, nouturn_criteria_met, both_subtrees_valid) {
new_hist <- NULL
# append histograms in the order that depends on which direciton we're going
# if the noturn criteria was NOT met AND both subtrees were valid then that means
# this was the subtree that caused the U-Turn indicator to go off. If one of the trees
# was already invalid then this U-Turn wasn't what causes the tree to go invalid
# because it already was before due to an earlier subtree
if (direction == 1) {
new_hist <- bind_rows(inner_subtree$hist, outer_subtree$hist)
} else {
new_hist <- bind_rows(outer_subtree$hist, inner_subtree$hist)
}
# if the noturn criteria was NOT met AND both subtrees were valid then that means
# this was the subtree that caused the U-Turn indicator to go off. If one of the trees
# was already invalid then this U-Turn wasn't what causes the tree to go invalid
# because it already was before due to an earlier subtree
if (!nouturn_criteria_met & both_subtrees_valid) {
new_hist <- new_hist %>% mutate(valid_subtree = FALSE)
new_hist$uturn[1] <- TRUE
new_hist$uturn[nrow(new_hist)] <- TRUE
}
new_hist
}
#' Take a uniform sample over the joined trajectory.
#'
#' Uniformly sample the joined trajectory according the weights of the two constituent parts.
#'
#' @param inner_subtree a valid inner_subtree
#' @param outer_subtree a valid outer_Subtree
#'
#' @return either representative sample from the left or right subtree
#' @export
#'
#' @examples
sample_new_representative_uniformly <- function(inner_subtree, outer_subtree) {
new_z_rep <- inner_subtree$z_rep
log_w_old <- inner_subtree$log_w
log_w_new <- outer_subtree$log_w
log_w_total <- log(exp(log_w_old) + exp(log_w_new))
# if the new tree has greater weight then sample it with prob. 1
# otherwise sample with a prob. proportional the respect tree weights
if (log_w_new >= log_w_total) {
new_z_rep <- outer_subtree$z_rep
} else {
# sample a bernoulli with prob. that depends on weight
# if TRUE then we take rep from outer_subtree else the rep remains the
# the rep from the old (inner) subtree
if (runif(1) <= exp(log_w_new - log_w_total)) {
new_z_rep <- outer_subtree$z_rep
}
}
new_z_rep
}
#' Take a uniform sample over the joined trajectory.
#'
#' If the outer tree has a bigger weight then we use it's representative sample
#' with probability one.
#' Otherwise, we'll make the new representative either the representative of the old
#' tree with probability proportional to the weight of the old tree and we'll make the
#' new representative the representative of the new subtree with probability proportional
#' to the weight of the new subtree.
#'
#' @param inner_subtree a valid inner_subtree
#' @param outer_subtree a valid outer_Subtree
#'
#' @return either representative sample from the left or right subtree
#' @export
#'
#' @examples
sample_new_representative_biasedly <- function(inner_subtree, outer_subtree) {
new_z_rep <- inner_subtree$z_rep
log_w_old <- inner_subtree$log_w
log_w_new <- outer_subtree$log_w
# if the new tree has greater weight then sample it with prob. 1
# otherwise sample with a prob. proportional the respect tree weights
if (log_w_new >= log_w_old) {
new_z_rep <- outer_subtree$z_rep
} else {
# sample a bernoulli with prob. that depends on weight
# if TRUE then we take rep from outer_subtree else the rep remains the
# the rep from the old (inner) subtree
if (runif(1) <= exp(log_w_new - log_w_old)) {
new_z_rep <- outer_subtree$z_rep
}
}
new_z_rep
}
#' Check that there's NO U-Turns
#'
#' Returns true if there's no U-Turn in either direction.
#'
#' @param tree the joined tree we're checking the U-Turn of
#' @param ham_system the Hamiltonian system for the problem
#'
#' @return TRUE if there's no U-Turn and FALSE if there is
#' @export
#'
#' @examples
check_uturn_criteria <- function(tree, ham_system) {
q_plus <- tree$z_plus$q
q_minus <- tree$z_minus$q
# instead of momentums get velocities by multipling by M^{-1}
M <- ham_system$M
# v_plus <- solve(M,tree$z_plus$p)
# v_minus <- solve(M,tree$z_minus$p)
v_plus <- tree$z_plus$p
v_minus <- tree$z_minus$p
no_uturn_forward <- as.numeric(v_plus %*% (q_plus-q_minus)) > 0
no_uturn_backward <- as.numeric(-v_minus %*% (q_minus-q_plus)) > 0
no_uturn_forward & no_uturn_backward
}
#' Check that there's NO U-Turns using new generalized Criteria.
#'
#' Returns true if there's no U-Turn in either direction.
#'
#' @param tree the joined tree we're checking the U-Turn of
#' @param ham_system the Hamiltonian system for the problem
#'
#' @return TRUE if there's no U-Turn and FALSE if there is
#' @export
#'
#' @examples
check_generalized_uturn_criteria <- function(tree, ham_system) {
q_plus <- tree$z_plus$q
q_minus <- tree$z_minus$q
# instead of momentums get velocities by multipling by M^{-1}
M <- ham_system$M
v_plus <- tree$z_plus$p
v_minus <- tree$z_minus$p
no_uturn_forward <- as.numeric(v_plus %*% (q_plus-q_minus)) > 0
no_uturn_backward <- as.numeric(-v_minus %*% (q_minus-q_plus)) > 0
no_uturn_forward & no_uturn_backward
}
#' Check for U-Turns at the coordinate level
#'
#' In each coordinate q(t)-q(0) tells us whether we've gone in the negative or
#' or positive direction. If we multiuply that by p(t) for that dimension we get
#' whether we're U-Turning specifically in that dimension. We keep track of this because
#' it may be a good U-Turn criteria to check that ALL dimensions have U-Turned
#'
#' @param tree the newly joined tree we're going to check
#' @param ham_system the Hamiltonian system for the problem
#'
#' @return a vector tracking the U-Turn status of each coordinate
#' @export
#'
#' @examples
update_coordinate_uturn <- function(tree, ham_system) {
q_plus <- tree$z_plus$q
q_minus <- tree$z_minus$q
# instead of momentums get velocities by multipling by M^{-1}
M <- ham_system$M
v_plus <- solve(M,tree$z_plus$p)
v_minus <- solve(M,tree$z_minus$p)
# this is a vector that is true if that dimension U-Turned
coordinate_uturns_forward <- (v_plus*(q_plus-q_minus) < 0)
coordinate_uturns_backward <- (-v_minus*(q_minus-q_plus) < 0)
tree$coordinate_uturn | coordinate_uturns_forward | coordinate_uturns_backward
}
|
/R/build_tree.R
|
no_license
|
pourzanj/RNUTS
|
R
| false
| false
| 18,288
|
r
|
#' Create a history tibble for a onenode tree.
#'
#' A helper function to create_onenode_tree to create clean,
#' uniform tibbles that keep track of the history of the tree.
#'
#' @param z a list containing vectors q and p and a stepsize h representing a point in phase space plus a stepsize.
#' @param depth the depth in the whole NUTS tree that this node sits at
#' @param H the Hamiltonian value at this point in phase space
#' @param valid_subtree whether the subtree at that depth was valid
#' @param uturn whether a uturn occurred at that node
#' @param integrator_error either NA, "divergence", or "newton"
#' @param num_grad number of likelihood gradient evaluations it took to get that step
#' @param num_hess number of likelihood hessian evaluations it took to get that step
#' @param num_hess_vec number of likelihood hessian-vector product evaluations it took to get that step
#' @param num_newton number of Newton iterations it took to get that step
#'
#' @return A tibble with a single row that represents a node in the tree including its depth, energy value, position, and whether the step was invalid
#' @export
#'
#' @examples
create_onenode_hist <- function(z, depth, H0, H, valid_subtree, uturn, integrator_error, num_grad, num_hess, num_hess_vec, num_newton) {
D <- length(z$q)
q <- matrix(z$q, nrow = 1) %>% as_tibble %>% set_names(paste0("q",1:D))
p <- matrix(z$p, nrow = 1) %>% as_tibble %>% set_names(paste0("p",1:D))
tibble(depth = depth, h = z$h, H0 = H0, H = H,
valid_subtree = valid_subtree,
uturn = uturn,
integrator_error = integrator_error) %>%
mutate(num_grad = num_grad,
num_hess = num_hess,
num_hess_vec = num_hess_vec,
num_newton = num_newton) %>%
bind_cols(bind_cols(q, p))
}
#' Create one node tree.
#'
#' This function is akin to a constructor. It makes sure
#' we create trees that uniformly have the same entries with the same names.
#'
#' @param z a list containing vectors q and p and a stepsize h representing a point in phase space plus a stepsize.
#' @param depth the depth in the whole NUTS tree that this node sits at
#' @param H the Hamiltonian value at this point in phase space
#' @param valid_subtree whether the subtree at that depth was valid
#' @param uturn whether a uturn occurred at that node
#' @param integrator_error either NA, "divergence", or "newton"
#' @param num_grad number of likelihood gradient evaluations it took to get that step
#' @param num_hess number of likelihood hessian evaluations it took to get that step
#' @param num_hess_vec number of likelihood hessian-vector product evaluations it took to get that step
#' @param num_newton number of Newton iterations it took to get that step
#' @param DEBUG if this is on a tibble keeping track of the history of the node will be returned as well
#'
#' @return A one node tree which is eseentially a list which several attributes such as depth and whether the tree is valid.
#' @export
#'
#' @examples
create_onenode_tree <- function(z, depth, H0, H, valid_subtree, uturn, integrator_error, num_grad, num_hess, num_hess_vec, num_newton, DEBUG) {
hist <- NULL
if (DEBUG) {
hist <- create_onenode_hist(z, depth, H0, H, valid_subtree, uturn, integrator_error, num_grad, num_hess, num_hess_vec, num_newton)
}
list(depth = depth,
valid = valid_subtree,
integrator_error = integrator_error,
coordinate_uturn = rep(FALSE, length(z$q)),
log_w = H0-H,
rho = z$p,
z_rep = z,
z_minus = z,
z_minus_1 = NULL,
z_minus_2 = NULL,
z_plus = z,
z_plus_1 = NULL,
z_plus_2 = NULL,
num_grad = num_grad,
num_hess = num_hess,
num_hess_vec = num_hess_vec,
num_newton = num_newton,
hist = hist)
}
#' Build tree
#'
#' Build a NUTS tree starting from z0. If depth is 0, then this is just a single node.
#' If depth is 1, then it's two nodes. Depth is j then 2^j nodes. Tree are built recursively
#' e.g. if we need a depth 2 tree which has 4 nodes we'll build 2 trees of depth 1 and join
#' them together.
#'
#' Sometimes a tree can be invalid, either because there was a problem with the
#' integrator or because a U-Turn was detected. In this case the tree is marked as invalid and building
#' of the tree ceases.
#'
#' @param z0 Initial point to start from. Should contain q, p, h
#' @param z_1 z_{-1} Previous point. Useful for determining a guess of z_{1}
#' @param z_1 z_{-2} Previous, previous point. Useful for determining a guess of z_{1}
#' @param depth Number of levels of tree.
#' @param direction Direction we'd like to build tree in (forwards or backwards)
#' @param integrate_step a function that integrates a single step
#' @param DEBUG Flag to determine whether we return tibble that includes history of points
#'
#' @return A list reprenting a tree
#' @export
#'
#' @examples
build_tree <- function(depth, z0, z_1, z_2, direction, ham_system, H0, integrate_step, DEBUG = FALSE) {
new_tree <- NULL
# base case (take a single step)
if(depth == 0){
integrator_result <- integrate_step(z0, z_1, z_2, direction, ham_system, H0)
new_tree <- create_onenode_tree(z = integrator_result$z1,
depth = depth,
H0 = H0,
H = ham_system$compute_H(integrator_result$z1),
valid_subtree = is.na(integrator_result$integrator_error),
uturn = FALSE,
integrator_error = integrator_result$integrator_error,
num_grad = integrator_result$num_grad,
num_hess = integrator_result$num_hess,
num_hess_vec = integrator_result$num_hess_vec,
num_newton = integrator_result$num_newton,
DEBUG = DEBUG)
}
# recursion
else{
inner_subtree <- build_tree(depth-1, z0, z_1, z_2, direction, ham_system, H0, integrate_step, DEBUG)
# only build outer subtree and tack it on if inner subtree was valid. otherwise
# just return the inner_subtree
if (inner_subtree$valid) {
# assume direciton is forward in which case we build outer subtree starting
# from z_plus. If direction is backward then we start from z_minus
z0.outer <- inner_subtree$z_plus
if (direction == -1) {
z0.outer <- inner_subtree$z_minus
}
# build outer_subtree and join even if it's invalid because we might
# want to view its history.
outer_subtree <- build_tree(depth-1, z0.outer, z_1, z_2, direction, ham_system, H0, integrate_step, DEBUG)
new_tree <- join_subtrees(inner_subtree, outer_subtree, direction, biased_progressive_sampling = FALSE, ham_system, DEBUG)
} else {
new_tree <- inner_subtree
}
}
# if we're in debug mode update the depth of the subtree in the history tibble. note that this has to be
# done here in build_tree because join_trees is also used in the main NUTS call where we don't want these depths to be updated in this manner
if (DEBUG) {
depth_ <- depth
new_tree$hist <- new_tree$hist %>%
mutate(depth = depth_) %>%
mutate(valid_subtree = new_tree$valid)
}
new_tree
}
#' Join two subtrees.
#'
#' Joins an inner subtree and an outer subtree.
#'
#' This function is called either in sampling where we tack on a new subtree to our current tree
#' The other place where it's called is in build_tree() where we create two trees and join them.
#'
#' We can always assume both trees passed in are non-null but we can't always assume they're the
#' same size. We can assume the inner_tree is valid, but not the outer. The outer subtree could have stopped early because it was invalid. In which case
#' the representative sample will just be from the inner_subtree
#'
#' The represenative sample is in in accordance with Betancourt's continuous sampling of subtrees,
#' where every tree has a representative node z_rep. For a 1-node tree, the representative is that one node.
#' For a two node tree we sample a representative from the two nodes. This sampling is
#' described in the function sample_new_represenative().
#'
#' @param inner_subtree The valid and non-null subtree created first.
#' @param outer_subtree The subtree created second as an extension of the end of the inner subtree.
#' @param direction the direction the tree were build determines if outer is added on the plus side or minus side
#' @param ham_system the hamiltonian system of the problem
#' @param biased_progressive_sampling whether to sample biasedly sample the new tree (should only be done in the original call to build_tree() called from NUTS)
#' @param DEBUG whether to include the history tibble
#'
#' @return A new joined tree.
#' @export
#'
#' @examples
join_subtrees <- function(inner_subtree, outer_subtree, direction, biased_progressive_sampling, ham_system, DEBUG) {
# returned tree starts as copy of inner_subtree.
tree <- inner_subtree
tree$depth <- outer_subtree$depth + 1
tree$log_w <- log(exp(inner_subtree$log_w) + exp(outer_subtree$log_w))
# only update representative if outer_subtree is valid
if(outer_subtree$valid) {
if(biased_progressive_sampling) {
tree$z_rep <- sample_new_representative_biasedly(inner_subtree, outer_subtree)
} else{
tree$z_rep <- sample_new_representative_uniformly(inner_subtree, outer_subtree)
}
}
# update z_plus, z_plus_1, z_plus_2, z_minus, z_minus_1, z_minus_2. note if depth of the
# new joined tree == 1 then these are set to NULL so we manually set them. if depth == 2
# we'll have four nodes but neither of the subtree will join will have z_plus_2 and z_minus_2
# so we need to set those manually as well
if (direction == 1) {
tree$z_plus = outer_subtree$z_plus
tree$z_plus_1 = outer_subtree$z_plus_1
tree$z_plus_2 = outer_subtree$z_plus_2
} else {
tree$z_minus = outer_subtree$z_minus
tree$z_minus_1 = outer_subtree$z_minus_1
tree$z_minus_2 = outer_subtree$z_minus_2
}
if (tree$depth == 1) {
tree$z_plus_1 = tree$z_minus
tree$z_minus_1 = tree$z_plus
}
if (tree$depth == 2) {
if (direction == 1) {
tree$z_plus_2 = inner_subtree$z_plus
tree$z_minus_2 = outer_subtree$z_minus
} else {
tree$z_plus_2 = outer_subtree$z_plus
tree$z_minus_2 = inner_subtree$z_minus
}
}
# check to see if new joined tree is valid. This only happens if both subtrees
# are valid and if the u-turn criteria is met between z_plus and z_minus
both_subtrees_valid <- inner_subtree$valid & outer_subtree$valid
tree$coordinate_uturn <- update_coordinate_uturn(tree, ham_system)
nouturn_criteria_met <- check_uturn_criteria(tree, ham_system)
#nouturn_criteria_met <- !all(tree$coordinate_uturn)
if(both_subtrees_valid & nouturn_criteria_met) {
tree$valid = TRUE
} else {
tree$valid = FALSE
}
# update the integrator error if there was one
if(!outer_subtree$valid & !is.na(outer_subtree$integrator_error)) {
tree$integrator_error <- outer_subtree$integrator_error
} else {
tree$integrator_error <- as.character(NA)
}
# update number of evals
tree$num_grad <- sum(inner_subtree$num_grad, outer_subtree$num_grad, na.rm = TRUE)
tree$num_hess <- sum(inner_subtree$num_hess, outer_subtree$num_hess, na.rm = TRUE)
tree$num_hess_vec <- sum(inner_subtree$num_hess_vec, outer_subtree$num_hess_vec, na.rm = TRUE)
tree$num_newton <- sum(inner_subtree$num_newton, outer_subtree$num_newton, na.rm = TRUE)
# update hist if we're in debug mode
if (DEBUG) {
tree$hist <- join_tree_histories(inner_subtree, outer_subtree, direction, nouturn_criteria_met, both_subtrees_valid)
}
tree
}
join_tree_histories <- function(inner_subtree, outer_subtree, direction, nouturn_criteria_met, both_subtrees_valid) {
new_hist <- NULL
# append histograms in the order that depends on which direciton we're going
# if the noturn criteria was NOT met AND both subtrees were valid then that means
# this was the subtree that caused the U-Turn indicator to go off. If one of the trees
# was already invalid then this U-Turn wasn't what causes the tree to go invalid
# because it already was before due to an earlier subtree
if (direction == 1) {
new_hist <- bind_rows(inner_subtree$hist, outer_subtree$hist)
} else {
new_hist <- bind_rows(outer_subtree$hist, inner_subtree$hist)
}
# if the noturn criteria was NOT met AND both subtrees were valid then that means
# this was the subtree that caused the U-Turn indicator to go off. If one of the trees
# was already invalid then this U-Turn wasn't what causes the tree to go invalid
# because it already was before due to an earlier subtree
if (!nouturn_criteria_met & both_subtrees_valid) {
new_hist <- new_hist %>% mutate(valid_subtree = FALSE)
new_hist$uturn[1] <- TRUE
new_hist$uturn[nrow(new_hist)] <- TRUE
}
new_hist
}
#' Take a uniform sample over the joined trajectory.
#'
#' Uniformly sample the joined trajectory according the weights of the two constituent parts.
#'
#' @param inner_subtree a valid inner_subtree
#' @param outer_subtree a valid outer_Subtree
#'
#' @return either representative sample from the left or right subtree
#' @export
#'
#' @examples
sample_new_representative_uniformly <- function(inner_subtree, outer_subtree) {
new_z_rep <- inner_subtree$z_rep
log_w_old <- inner_subtree$log_w
log_w_new <- outer_subtree$log_w
log_w_total <- log(exp(log_w_old) + exp(log_w_new))
# if the new tree has greater weight then sample it with prob. 1
# otherwise sample with a prob. proportional the respect tree weights
if (log_w_new >= log_w_total) {
new_z_rep <- outer_subtree$z_rep
} else {
# sample a bernoulli with prob. that depends on weight
# if TRUE then we take rep from outer_subtree else the rep remains the
# the rep from the old (inner) subtree
if (runif(1) <= exp(log_w_new - log_w_total)) {
new_z_rep <- outer_subtree$z_rep
}
}
new_z_rep
}
#' Take a uniform sample over the joined trajectory.
#'
#' If the outer tree has a bigger weight then we use it's representative sample
#' with probability one.
#' Otherwise, we'll make the new representative either the representative of the old
#' tree with probability proportional to the weight of the old tree and we'll make the
#' new representative the representative of the new subtree with probability proportional
#' to the weight of the new subtree.
#'
#' @param inner_subtree a valid inner_subtree
#' @param outer_subtree a valid outer_Subtree
#'
#' @return either representative sample from the left or right subtree
#' @export
#'
#' @examples
sample_new_representative_biasedly <- function(inner_subtree, outer_subtree) {
new_z_rep <- inner_subtree$z_rep
log_w_old <- inner_subtree$log_w
log_w_new <- outer_subtree$log_w
# if the new tree has greater weight then sample it with prob. 1
# otherwise sample with a prob. proportional the respect tree weights
if (log_w_new >= log_w_old) {
new_z_rep <- outer_subtree$z_rep
} else {
# sample a bernoulli with prob. that depends on weight
# if TRUE then we take rep from outer_subtree else the rep remains the
# the rep from the old (inner) subtree
if (runif(1) <= exp(log_w_new - log_w_old)) {
new_z_rep <- outer_subtree$z_rep
}
}
new_z_rep
}
#' Check that there's NO U-Turns
#'
#' Returns true if there's no U-Turn in either direction.
#'
#' @param tree the joined tree we're checking the U-Turn of
#' @param ham_system the Hamiltonian system for the problem
#'
#' @return TRUE if there's no U-Turn and FALSE if there is
#' @export
#'
#' @examples
check_uturn_criteria <- function(tree, ham_system) {
q_plus <- tree$z_plus$q
q_minus <- tree$z_minus$q
# instead of momentums get velocities by multipling by M^{-1}
M <- ham_system$M
# v_plus <- solve(M,tree$z_plus$p)
# v_minus <- solve(M,tree$z_minus$p)
v_plus <- tree$z_plus$p
v_minus <- tree$z_minus$p
no_uturn_forward <- as.numeric(v_plus %*% (q_plus-q_minus)) > 0
no_uturn_backward <- as.numeric(-v_minus %*% (q_minus-q_plus)) > 0
no_uturn_forward & no_uturn_backward
}
#' Check that there's NO U-Turns using new generalized Criteria.
#'
#' Returns true if there's no U-Turn in either direction.
#'
#' @param tree the joined tree we're checking the U-Turn of
#' @param ham_system the Hamiltonian system for the problem
#'
#' @return TRUE if there's no U-Turn and FALSE if there is
#' @export
#'
#' @examples
check_generalized_uturn_criteria <- function(tree, ham_system) {
q_plus <- tree$z_plus$q
q_minus <- tree$z_minus$q
# instead of momentums get velocities by multipling by M^{-1}
M <- ham_system$M
v_plus <- tree$z_plus$p
v_minus <- tree$z_minus$p
no_uturn_forward <- as.numeric(v_plus %*% (q_plus-q_minus)) > 0
no_uturn_backward <- as.numeric(-v_minus %*% (q_minus-q_plus)) > 0
no_uturn_forward & no_uturn_backward
}
#' Check for U-Turns at the coordinate level
#'
#' In each coordinate q(t)-q(0) tells us whether we've gone in the negative or
#' or positive direction. If we multiuply that by p(t) for that dimension we get
#' whether we're U-Turning specifically in that dimension. We keep track of this because
#' it may be a good U-Turn criteria to check that ALL dimensions have U-Turned
#'
#' @param tree the newly joined tree we're going to check
#' @param ham_system the Hamiltonian system for the problem
#'
#' @return a vector tracking the U-Turn status of each coordinate
#' @export
#'
#' @examples
update_coordinate_uturn <- function(tree, ham_system) {
q_plus <- tree$z_plus$q
q_minus <- tree$z_minus$q
# instead of momentums get velocities by multipling by M^{-1}
M <- ham_system$M
v_plus <- solve(M,tree$z_plus$p)
v_minus <- solve(M,tree$z_minus$p)
# this is a vector that is true if that dimension U-Turned
coordinate_uturns_forward <- (v_plus*(q_plus-q_minus) < 0)
coordinate_uturns_backward <- (-v_minus*(q_minus-q_plus) < 0)
tree$coordinate_uturn | coordinate_uturns_forward | coordinate_uturns_backward
}
|
library(tidyverse)
## Data
library(babynames)
head(babynames)
tail(babynames)
girls <- subset(babynames, sex=="F")
girls
boys <- subset(babynames, sex=="M")
boys
since1950 <- subset(babynames, year>=1950)
since1950
## Number of unique names per year.
ggplot(boys)
ggplot(boys, aes(x=year))
ggplot(boys, aes(x=year)) + geom_bar()
ggplot(girls, aes(x=year)) + geom_bar()
ggplot(babynames, aes(x=year)) + geom_bar() + facet_wrap(~sex)
ggplot(babynames, aes(x=year)) + geom_bar(width=.7) + facet_wrap(~sex)
## Same data can be presented in other ways:
# density plot
ggplot(babynames, aes(x=year)) + geom_density() + facet_wrap(~sex)
# histograms
ggplot(babynames, aes(x=year)) + geom_histogram() + facet_wrap(~sex)
ggplot(babynames, aes(x=year)) + geom_histogram(binwidth = 2) + facet_wrap(~sex)
ggplot(babynames, aes(x=year)) + geom_histogram(binwidth = 1) + facet_wrap(~sex)
ggplot(babynames, aes(x=year)) +
geom_histogram(binwidth = 1, color="darkseagreen", fill="white") +
facet_wrap(~sex)
#girls boys on same chart
ggplot(since1950, aes(x=year, fill=sex)) + geom_bar()
ggplot(since1950, aes(x=year, fill=sex)) + geom_bar(position='dodge')
## If counts are pre-known use stat= identity
patricia <- babynames %>% filter(name=="Patricia")
patricia
patricia %>% arrange(-n)
ggplot(patricia, aes(x=year, y=n)) + geom_bar(stat='identity')
ggplot(patricia, aes(x=year, y=n)) + geom_bar(stat='identity', color='black', lwd=.5, fill='gray33',width=.9)
# Can also use geom_col() for short
ggplot(patricia, aes(x=year, y=n)) + geom_col()
ggplot(patricia, aes(x=year, y=n)) + geom_col(color='firebrick', lwd=.3, fill='mistyrose',width=.9)
### Histograms ----
library(Lahman)
head(Batting)
Batting_sum <- Batting %>%
group_by(playerID) %>%
summarise(totalH = sum(H),
totalAB = sum(AB),
avg = totalH/totalAB
)
Batting_sum <- Batting_sum %>% filter(totalAB>200)
hist(Batting_sum$avg) #quick look using base-r
ggplot(Batting_sum, aes(x=avg)) + geom_histogram()
ggplot(Batting_sum, aes(x=avg)) + geom_histogram(color='darkgreen',fill='lightgreen')
ggplot(Batting_sum, aes(x=avg)) + geom_histogram(bins = 150,color='darkgreen',fill='lightgreen')
ggplot(Batting_sum, aes(x=avg)) + geom_histogram(binwidth = .005, color='darkgreen',fill='lightgreen')
ggplot(Batting_sum, aes(x=avg)) + geom_density()
ggplot(Batting_sum, aes(x=avg)) + geom_density(fill='mistyrose')
# default for histogram is count, but can make it density like this
ggplot(Batting_sum, aes(x=avg)) + geom_histogram(aes(y=..density..),color='darkgreen',fill='lightgreen')
## Add a line
ggplot(Batting_sum, aes(x=avg)) +
geom_histogram(bins = 150,color='darkgreen',fill='lightgreen') +
geom_vline(aes(xintercept=0.3), color="black", linetype="dashed", size=1)
### Side by side Histograms----
# e.g. players prior to 1920 vs players after 1990
Batting_early <- Batting %>%
filter(yearID<=1920) %>%
group_by(playerID) %>%
summarise(totalH = sum(H),
totalAB = sum(AB),
avg = totalH/totalAB
) %>%
mutate(period='early')
Batting_late <- Batting %>%
filter(yearID>=1990) %>%
group_by(playerID) %>%
summarise(totalH = sum(H),
totalAB = sum(AB),
avg = totalH/totalAB
) %>%
mutate(period='late')
Batting_early
Batting_late
Batting_all <- rbind(Batting_early, Batting_late)
Batting_all <- Batting_all %>% filter(totalAB>100)
# Overlaid histograms
ggplot(Batting_all, aes(x=avg, fill=period)) + geom_histogram(position="identity")
ggplot(Batting_all, aes(x=avg, fill=period)) + geom_histogram(position="identity", alpha=.7, binwidth=.005)
ggplot(Batting_all, aes(x=avg, fill=period)) + geom_histogram(position="identity", alpha=.7, binwidth=.005) + facet_wrap(~period)
ggplot(Batting_all, aes(x=avg, fill=period)) + geom_density(alpha=.7, binwidth=.005)
# Interleaved histograms
ggplot(Batting_all, aes(x=avg, fill=period)) + geom_histogram(position="dodge")
### Extra: Back to Back histograms (see advanced tutorial)
# e.g. popularity of unisex names like Skylar over decades
# e.g. population pyramids
|
/ggplot/003_ggplot_distributions.R
|
no_license
|
jalapic/learnR
|
R
| false
| false
| 4,339
|
r
|
library(tidyverse)
## Data
library(babynames)
head(babynames)
tail(babynames)
girls <- subset(babynames, sex=="F")
girls
boys <- subset(babynames, sex=="M")
boys
since1950 <- subset(babynames, year>=1950)
since1950
## Number of unique names per year.
ggplot(boys)
ggplot(boys, aes(x=year))
ggplot(boys, aes(x=year)) + geom_bar()
ggplot(girls, aes(x=year)) + geom_bar()
ggplot(babynames, aes(x=year)) + geom_bar() + facet_wrap(~sex)
ggplot(babynames, aes(x=year)) + geom_bar(width=.7) + facet_wrap(~sex)
## Same data can be presented in other ways:
# density plot
ggplot(babynames, aes(x=year)) + geom_density() + facet_wrap(~sex)
# histograms
ggplot(babynames, aes(x=year)) + geom_histogram() + facet_wrap(~sex)
ggplot(babynames, aes(x=year)) + geom_histogram(binwidth = 2) + facet_wrap(~sex)
ggplot(babynames, aes(x=year)) + geom_histogram(binwidth = 1) + facet_wrap(~sex)
ggplot(babynames, aes(x=year)) +
geom_histogram(binwidth = 1, color="darkseagreen", fill="white") +
facet_wrap(~sex)
#girls boys on same chart
ggplot(since1950, aes(x=year, fill=sex)) + geom_bar()
ggplot(since1950, aes(x=year, fill=sex)) + geom_bar(position='dodge')
## If counts are pre-known use stat= identity
patricia <- babynames %>% filter(name=="Patricia")
patricia
patricia %>% arrange(-n)
ggplot(patricia, aes(x=year, y=n)) + geom_bar(stat='identity')
ggplot(patricia, aes(x=year, y=n)) + geom_bar(stat='identity', color='black', lwd=.5, fill='gray33',width=.9)
# Can also use geom_col() for short
ggplot(patricia, aes(x=year, y=n)) + geom_col()
ggplot(patricia, aes(x=year, y=n)) + geom_col(color='firebrick', lwd=.3, fill='mistyrose',width=.9)
### Histograms ----
library(Lahman)
head(Batting)
Batting_sum <- Batting %>%
group_by(playerID) %>%
summarise(totalH = sum(H),
totalAB = sum(AB),
avg = totalH/totalAB
)
Batting_sum <- Batting_sum %>% filter(totalAB>200)
hist(Batting_sum$avg) #quick look using base-r
ggplot(Batting_sum, aes(x=avg)) + geom_histogram()
ggplot(Batting_sum, aes(x=avg)) + geom_histogram(color='darkgreen',fill='lightgreen')
ggplot(Batting_sum, aes(x=avg)) + geom_histogram(bins = 150,color='darkgreen',fill='lightgreen')
ggplot(Batting_sum, aes(x=avg)) + geom_histogram(binwidth = .005, color='darkgreen',fill='lightgreen')
ggplot(Batting_sum, aes(x=avg)) + geom_density()
ggplot(Batting_sum, aes(x=avg)) + geom_density(fill='mistyrose')
# default for histogram is count, but can make it density like this
ggplot(Batting_sum, aes(x=avg)) + geom_histogram(aes(y=..density..),color='darkgreen',fill='lightgreen')
## Add a line
ggplot(Batting_sum, aes(x=avg)) +
geom_histogram(bins = 150,color='darkgreen',fill='lightgreen') +
geom_vline(aes(xintercept=0.3), color="black", linetype="dashed", size=1)
### Side by side Histograms----
# e.g. players prior to 1920 vs players after 1990
Batting_early <- Batting %>%
filter(yearID<=1920) %>%
group_by(playerID) %>%
summarise(totalH = sum(H),
totalAB = sum(AB),
avg = totalH/totalAB
) %>%
mutate(period='early')
Batting_late <- Batting %>%
filter(yearID>=1990) %>%
group_by(playerID) %>%
summarise(totalH = sum(H),
totalAB = sum(AB),
avg = totalH/totalAB
) %>%
mutate(period='late')
Batting_early
Batting_late
Batting_all <- rbind(Batting_early, Batting_late)
Batting_all <- Batting_all %>% filter(totalAB>100)
# Overlaid histograms
ggplot(Batting_all, aes(x=avg, fill=period)) + geom_histogram(position="identity")
ggplot(Batting_all, aes(x=avg, fill=period)) + geom_histogram(position="identity", alpha=.7, binwidth=.005)
ggplot(Batting_all, aes(x=avg, fill=period)) + geom_histogram(position="identity", alpha=.7, binwidth=.005) + facet_wrap(~period)
ggplot(Batting_all, aes(x=avg, fill=period)) + geom_density(alpha=.7, binwidth=.005)
# Interleaved histograms
ggplot(Batting_all, aes(x=avg, fill=period)) + geom_histogram(position="dodge")
### Extra: Back to Back histograms (see advanced tutorial)
# e.g. popularity of unisex names like Skylar over decades
# e.g. population pyramids
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ts_normalization.R
\name{ts_normalization}
\alias{ts_normalization}
\title{Normalize univariate timeseries}
\usage{
ts_normalization(
data,
length_val,
length_test,
value_col = "value",
joined = TRUE,
metrics = FALSE
)
}
\arguments{
\item{data}{univariate time series (data.frame / data.table)}
\item{length_val}{length for validation set}
\item{length_test}{length for test set}
\item{value_col}{column(s) to normalize, searched by starting pattern. E.g.
\code{value_col = "index"} will catch column "index" and "index_2" but not
"2_index"}
\item{joined}{joined normalization for same pattern? TRUE by default. See
section "Joined value columns" for details}
\item{metrics}{return data only or list of data and metrics?}
}
\value{
Depending on \code{metrics}, processed DT object or list of "data" and
"metrics" (center and scale)
}
\description{
Normalize univariate timeseries
}
\section{Joined value columns}{
Joined means to normalize all columns detected by pattern with the one column
exactly matching. Watch out for this condition to hold if \code{joined = TRUE}.\cr
\code{joined} is of particular use for lagged time series. E.g. column "value"
should be used to normalize not only column "value" but also "value_lag1" etc.
}
\examples{
# without metrics
DT_norm <- ts_normalization(tsRNN::DT_apple, 10, 10); DT_norm
# with metrics
ts_normalization(tsRNN::DT_apple, 10, 10, metrics = TRUE)
}
|
/man/ts_normalization.Rd
|
permissive
|
thfuchs/tsRNN
|
R
| false
| true
| 1,500
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ts_normalization.R
\name{ts_normalization}
\alias{ts_normalization}
\title{Normalize univariate timeseries}
\usage{
ts_normalization(
data,
length_val,
length_test,
value_col = "value",
joined = TRUE,
metrics = FALSE
)
}
\arguments{
\item{data}{univariate time series (data.frame / data.table)}
\item{length_val}{length for validation set}
\item{length_test}{length for test set}
\item{value_col}{column(s) to normalize, searched by starting pattern. E.g.
\code{value_col = "index"} will catch column "index" and "index_2" but not
"2_index"}
\item{joined}{joined normalization for same pattern? TRUE by default. See
section "Joined value columns" for details}
\item{metrics}{return data only or list of data and metrics?}
}
\value{
Depending on \code{metrics}, processed DT object or list of "data" and
"metrics" (center and scale)
}
\description{
Normalize univariate timeseries
}
\section{Joined value columns}{
Joined means to normalize all columns detected by pattern with the one column
exactly matching. Watch out for this condition to hold if \code{joined = TRUE}.\cr
\code{joined} is of particular use for lagged time series. E.g. column "value"
should be used to normalize not only column "value" but also "value_lag1" etc.
}
\examples{
# without metrics
DT_norm <- ts_normalization(tsRNN::DT_apple, 10, 10); DT_norm
# with metrics
ts_normalization(tsRNN::DT_apple, 10, 10, metrics = TRUE)
}
|
gr_ll_flexrsurv_fromto_GA0B0ABE0Br0PeriodControl<-function(allparam,
Y, X0, X, Z, W,
BX0,
Id, FirstId, LastId=NULL,
expected_rate,
expected_logit_end,
expected_logit_enter,
expected_logit_end_byperiod,
expected_logit_enter_byperiod,
weights_byperiod,
Id_byperiod,
weights=NULL,
Ycontrol, BX0control,
weightscontrol=NULL,
Idcontrol, FirstIdcontrol,
expected_ratecontrol,
expected_logit_endcontrol,
expected_logit_entercontrol,
expected_logit_end_byperiodcontrol,
expected_logit_enter_byperiodcontrol,
weights_byperiodcontrol,
Id_byperiodcontrol,
step, Nstep,
intTD=intTDft_NC, intweightsfunc=intweights_CAV_SIM,
intTD_base=intTDft_base_NC,
intTD_WCEbase=intTDft_WCEbase_NC,
nT0basis,
Spline_t0=BSplineBasis(knots=NULL, degree=3, keep.duplicates=TRUE), Intercept_t0=TRUE,
ialpha0, nX0,
ibeta0, nX,
ialpha,
ibeta,
nTbasis,
ieta0, iWbeg, iWend, nW,
Spline_t =BSplineBasis(knots=NULL, degree=3, keep.duplicates=TRUE),
Intercept_t_NPH=rep(TRUE, nX),
ISpline_W =MSplineBasis(knots=NULL, degree=3, keep.duplicates=TRUE),
Intercept_W=TRUE,
nBbasis,
Spline_B, Intercept_B=TRUE,
ibrass0, nbrass0,
ibalpha0, nBX0,
debug.gr=TRUE, ...){
# same as ll_flexrsurv_fromto_GA0B0ABE0Br0.R but with a control group
# compute log likelihood of the relative survival model
# excess rate = exp( f(t)%*%gamma + X0%*%alpha0 + X%*%beta0(t) + sum( alphai(zi)betai(t) + sum ( wce(Wi , eta0i)(t)) ))
#################################################################################################################
#################################################################################################################
# the coef of the first t-basis is constraint to 1 for nat-spline, and n-sum(other beta) if bs using expand() method
#################################################################################################################
#################################################################################################################
#################################################################################################################
# allparam ; vector of all coefs
# gamma0 = allparam[1:nY0basis]
# alpha0= allparam[ialpha0]
# beta0= matrix(allparam[ibeta0], ncol=nX, nrow=nTbasis)
# alpha= diag(allparam[ialpha])
# beta= expand(matrix(allparam[ibeta], ncol=Z@nZ, nrow=nTbasis-1))
# beta does not contains coef for the first t-basis
# eta0 = allparam[ieta0]
# brass0 = allparam[ibrass0]
# balpha0 = allparam[ibalpha0]
# corection of lifetable according to generalized brass method
# Cohort-independent generalized Brass model in an age-cohort table
# stratified brass model according to fixed effects BX0 (one brass function per combination)
# for control group
# rate = brass0(expected-ratecontrol, expected_logitcontrol)*exp(BX0control balpha0)
# but for exposed
# rate = brass0(expected-rate, expected_logit)*exp(BX0 balpha0) + exp(gamma0(t) + time-independent effect(LL + NLL)(X0) + NPH(X) + NPHNLL(Z) + WCE(W))
# brass0 : BRASS model wiht parameter Spline_B
# logit(F) = evaluate(Spline_B, logit(F_pop), brass0) * exp(Balpha %*% BX0)
# HCum(t_1, t_2) = log(1 + exp(evaluate(Spline_B, logit(F_pop(t_2)), brass0)) - log(1 + exp(evaluate(Spline_B, logit(F_pop(t_1)), brass0))
# rate(t_1) = rate_ref * (1 + exp(-logit(F_pop(t)))/(1 + exp(evaluate(Spline_B, logit(F_pop(t)), brass0)))*
# evaluate(deriv(Spline_B), logit(F_pop(t)), brass0)
# expected_logit_end = logit(F_pop(Y[,2]))
# expected_logit_enter = logit(F_pop(Y[,1]))
# brass0 = allparam[ibrass0]
# Spline_B : object of class "AnySplineBasis" (suitable for Brass model) with method deriv() and evaluate()
# IMPORTANT : the coef of the first basis is constraints to one and evaluate(deriv(spline_B), left_boundary_knots) == 1 for Brass transform
#
# parameters for exposed group
#################################################################################################################
# Y : object of class Surv but the matrix has 4 columns :
# Y[,1] beginning(1) , fromT
# Y[,2] end(2), toT,
# Y[,3] status(3) fail
# Y[,4] end of followup(4)
# end of followup is assumed constant by Id
# X0 : non-time dependante variable (may contain spline bases expended for non-loglinear terms)
# X : log lineair but time dependante variable
# Z : object of class "DesignMatrixNPHNLL" time dependent variables (spline basis expended)
# W : Exposure variables used in Weighted Cumulative Exposure Models
# BX0 : non-time dependante variable for the correction of life table (may contain spline bases expended for non-loglinear terms)
# Id : varibale indicating individuals Id, lines with the same Id are considered to be from the same individual
# FirstId : all lines in FirstId[iT]:iT in the data comes from the same individual
# expected_rate : expected rate at event time T
# expected_logit_end : logit of the expected survival at the end of the followup
# expected_logit_enter : logit of the expected survival at the beginning of the followup
# weights : vector of weights : LL = sum_i w_i ll_i
# expected_logit_end_byperiod, : expected logit of periode survival at exit of each period (used in the Brass model
# expected_logit_enter_byperiod, : expected logit of periode survival at entry of each period (used in the Brass model
# weights_byperiod, : weight of each period (used in the Brass model weights_byperiod = weight[Id_byperiod]
# Id_byperiod, : index in the Y object : XX_byperiod[i] corrsponds to the row Id_byperiod[i] of Y, X, Z, ...
# parameters for exposd population
#################################################################################################################
# parameters for exposed group
#################################################################################################################
# Ycontrol : object of class Surv but the matrix has 4 columns :
# Ycontrol[,1] beginning(1) , fromT
# Ycontrol[,2] end(2), toT,
# Ycontrol[,3] status(3) fail
# Ycontrol[,4] end of followup(4)
# end of followup is assumed constant by Id
# BX0control : non-time dependante variable for the correction of life table (may contain spline bases expended for non-loglinear terms)
# Idcontrol : varibale indicating individuals Id, lines with the same Id are considered to be from the same individual
# FirstIdcontrol : all lines in FirstId[iT]:iT in the data comes from the same individual
# expected_ratecontrol : expected rate at event time T
# expected_logit_endcontrol : logit of the expected survival at the end of the followup
# expected_logit_entercontrol : logit of the expected survival at the beginning of the followup
# weightscontrol : vector of weights : LL = sum_i w_i ll_i
# expected_logit_end_byperiodcontrol, : expected logit of periode survival at exit of each period (used in the Brass model
# expected_logit_enter_byperiodcontrol, : expected logit of periode survival at entry of each period (used in the Brass model
# weights_byperiodcontrol, : weight of each period (used in the Brass model weights_byperiod = weight[Id_byperiod]
# Id_byperiodcontrol, : index in the Y object : XX_byperiod[i] corrsponds to the row Id_byperiod[i] of Y, X, Z, ...
#################################################################################################################
# model parameters
# step : object of class "NCLagParam" or "GLMLagParam"
# Nstep : number of lag for each observation
# intTD : function to perform numerical integration
# intweightfunc : function to compute weightsfor numerical integration
# nT0basis : number of spline basis
# Spline_t0, spline object for baseline hazard, with evaluate() method
# Intercept_t0=FALSE, option for evaluate, = TRUE all the basis, =FALSE all but first basis
# nTbasis : number of time spline basis for NPH or NLL effects
# nX0 : nb of PH variables dim(X0)=c(nobs, nX0)
# nX : nb of NPHLIN variables dim(X)=c(nobs, nX)
# Spline_t, spline object for time dependant effects, with evaluate() method
# Intercept_t_NPH vector of intercept option for NPH spline (=FALSE when X is NLL too, ie in case of remontet additif NLLNPH)
# nW : nb of WCE variables dim(W)=c(nobs, nW)
# iWbeg, iWend : coef of the ith WCE variable is eta0[iWbeg[i]:iWend[i]]
# ISpline_W, list of nW spline object for WCE effects, with evaluate() method
# ISpline is already integreted
# ... not used args
# the function do not check the concorcance between length of parameter vectors and the number of knots and the Z.signature
# returned value : the log liikelihood of the model
#cat("************gr_flexrsurv_fromto_1WCEaddBr0Control ")
#print(format(allparam, scientific = TRUE, digits=12))
################################################################################
# excess rate
if(is.null(Z)){
nZ <- 0
Zalphabeta <- NULL
} else {
nZ <- Z@nZ
}
# LastId
if(is.null(LastId)){
first <- unique(FirstId)
nline <- c(first[-1],length(FirstId)+1)-first
LastId <- FirstId+rep(nline, nline)-1
}
if(is.null(Spline_t0)){
YT0 <- NULL
YT0Gamma0 <- 0.0
Spt0g <- NULL
igamma0 <- NULL
}
else {
igamma0 <- 1:nT0basis
if(Intercept_t0){
tmpgamma0 <- allparam[igamma0]
}
else {
tmpgamma0 <- c(0, allparam[igamma0])
}
# baseline hazard at the end of the interval
Spt0g <- Spline_t0*tmpgamma0
YT0Gamma0 <- predictSpline(Spt0g, Y[,2])
YT0 <- fevaluate(Spline_t0, Y[,2], intercept=Intercept_t0)
}
# contribution of non time dependant variables
if( nX0){
PHterm <-exp(X0 %*% allparam[ialpha0])
} else {
PHterm <- 1
}
# contribution of time d?pendant effect
# parenthesis are important for efficiency
if(nZ) {
# add a row for the first basis
tBeta <- t(ExpandAllCoefBasis(allparam[ibeta], ncol=nZ, value=1))
# Zalpha est la matrice des alpha(Z)
# parenthesis important for speed ?
Zalpha <- Z@DM %*%( diag(allparam[ialpha]) %*% Z@signature )
Zalphabeta <- Zalpha %*% tBeta
if(nX) {
# add a row of 0 for the first T-basis when !Intercept_T_NPH
Zalphabeta <- Zalphabeta + X %*% t(ExpandCoefBasis(allparam[ibeta0],
ncol=nX,
splinebasis=Spline_t,
expand=!Intercept_t_NPH,
value=0))
}
} else {
if(nX) {
Zalphabeta <- X %*% t(ExpandCoefBasis(allparam[ibeta0],
ncol=nX,
splinebasis=Spline_t,
expand=!Intercept_t_NPH,
value=0))
}
else {
Zalphabeta <- NULL
}
}
if(nW) {
IS_W <- ISpline_W
eta0 <- allparam[ieta0]
for(iW in 1:nW){
if(Intercept_W[iW]){
IS_W[[iW]] <- ISpline_W[[iW]] * eta0[iWbeg[iW]:iWend[iW]]
}
else {
IS_W[[iW]]<- ISpline_W[[iW]] * c(0, eta0[iWbeg[iW]:iWend[iW]])
}
IntbW <- list()
}
if(identical(intweightsfunc , intweights_CAV_SIM, ignore.srcref=TRUE) ){
degree <- 2L
}
else if(identical(intweightsfunc , intweights_SIM_3_8, ignore.srcref=TRUE) ){
degree <- 3L
}
else if(identical(intweightsfunc , intweights_BOOLE, ignore.srcref=TRUE) ){
degree <- 4L
}
else {
degree <- 0L
}
if(nX + nZ) {
NPHterm <- intTD(rateTD_gamma0alphabetaeta0, intFrom=Y[,1], intTo=Y[,2], intToStatus=Y[,3],
step=step, Nstep=Nstep,
intweightsfunc=intweightsfunc,
fromT=Y[,1], toT=Y[,2], FirstId=FirstId, LastId=LastId,
gamma0=allparam[igamma0], Zalphabeta=Zalphabeta,
nW = nW, W = W, eta0=allparam[ieta0], iWbeg=iWbeg, iWend=iWend,
Spline_t0=Spt0g, Intercept_t0=Intercept_t0,
Spline_t = Spline_t, Intercept_t=TRUE,
ISpline_W = IS_W, Intercept_W=Intercept_W)
if(is.null(Spline_t0)){
Intb0 <- rep(0.0, dim(Y)[1])
} else {
Intb0 <- intTD_base(func=rateTD_gamma0alphabetaeta0, intFrom=Y[,1], intTo=Y[,2], intToStatus=Y[,3],
Spline=Spline_t0,
step=step, Nstep=Nstep, intweightsfunc=intweightsfunc,
fromT=Y[,1], toT=Y[,2], FirstId=FirstId, LastId=LastId,
gamma0=allparam[igamma0], Zalphabeta=Zalphabeta,
nW = nW, W = W, eta0=allparam[ieta0], iWbeg=iWbeg, iWend=iWend,
Spline_t0=Spt0g, Intercept_t0=Intercept_t0,
Spline_t = Spline_t, Intercept_t=TRUE,
ISpline_W = IS_W, Intercept_W=Intercept_W,
debug=debug.gr)
}
if( identical(Spline_t0, Spline_t)){
Intb <- Intb0
}
else {
Intb <- intTD_base(func=rateTD_gamma0alphabetaeta0, intFrom=Y[,1], intTo=Y[,2], intToStatus=Y[,3],
Spline=Spline_t,
step=step, Nstep=Nstep, intweightsfunc=intweightsfunc,
fromT=Y[,1], toT=Y[,2], FirstId=FirstId, LastId=LastId,
gamma0=allparam[igamma0], Zalphabeta=Zalphabeta,
nW = nW, W = W, eta0=allparam[ieta0], iWbeg=iWbeg, iWend=iWend,
Spline_t0=Spt0g, Intercept_t0=Intercept_t0,
Spline_t = Spline_t, Intercept_t=TRUE,
ISpline_W = IS_W, Intercept_W=Intercept_W,
debug=debug.gr)
}
if(!Intercept_t0 & !is.null(Spline_t0)){
Intb0<- Intb0[,-1]
}
indx_without_intercept <- 2:getNBases(Spline_t)
for(iW in 1:nW){
# in IntbW, the integrated WCE splines (parameter named Spline) are not scaled by eta0
IntbW[[iW]] <- intTD_WCEbase(func=rateTD_gamma0alphabetaeta0, intFrom=Y[,1], intTo=Y[,2], intToStatus=Y[,3],
Spline=ISpline_W[[iW]], intercept=Intercept_W[iW], theW=W[,iW],
step=step, Nstep=Nstep, degree=degree, intweightsfunc=intweightsfunc,
fromT=Y[,1], toT=Y[,2], FirstId=FirstId, LastId=LastId,
gamma0=allparam[igamma0], Zalphabeta=Zalphabeta,
nW = nW, W = W, eta0=allparam[ieta0], iWbeg=iWbeg, iWend=iWend,
Spline_t0=Spt0g, Intercept_t0=Intercept_t0,
Spline_t = Spline_t, Intercept_t=TRUE,
ISpline_W = IS_W, Intercept_W=Intercept_W,
debug=debug.gr)
}
}
else {
NPHterm <- intTD(rateTD_gamma0eta0, intFrom=Y[,1], intTo=Y[,2], intToStatus=Y[,3],
step=step, Nstep=Nstep,
intweightsfunc=intweightsfunc,
fromT=Y[,1], toT=Y[,2], FirstId=FirstId, LastId=LastId,
gamma0=allparam[igamma0],
nW = nW, W = W, eta0=allparam[ieta0], iWbeg=iWbeg, iWend=iWend,
Spline_t0=Spt0g, Intercept_t0=Intercept_t0,
ISpline_W = IS_W, Intercept_W=Intercept_W)
if(is.null(Spline_t0)){
Intb0 <- rep(0.0, dim(Y)[1])
} else {
Intb0 <- intTD_base(func=rateTD_gamma0eta0, intFrom=Y[,1], intTo=Y[,2], intToStatus=Y[,3],
Spline=Spline_t0,
step=step, Nstep=Nstep, intweightsfunc=intweightsfunc,
fromT=Y[,1], toT=Y[,2], FirstId=FirstId, LastId=LastId,
gamma0=allparam[igamma0], Zalphabeta=Zalphabeta,
nW = nW, W = W, eta0=allparam[ieta0], iWbeg=iWbeg, iWend=iWend,
Spline_t0=Spt0g, Intercept_t0=Intercept_t0,
Spline_t = Spline_t, Intercept_t=TRUE,
ISpline_W = IS_W, Intercept_W=Intercept_W,
debug=debug.gr)
if(!Intercept_t0){
Intb0<- Intb0[,-1]
}
}
Intb <- NULL
for(iW in 1:nW){
# in IntbW, the integrated WCE splines are not scaled by eta0
IntbW[[iW]] <- intTD_WCEbase(func=rateTD_gamma0eta0, intFrom=Y[,1], intTo=Y[,2], intToStatus=Y[,3],
Spline=ISpline_W[[iW]], intercept=Intercept_W[iW], theW=W[,iW],
step=step, Nstep=Nstep, degree=degree, intweightsfunc=intweightsfunc,
fromT=Y[,1], toT=Y[,2], FirstId=FirstId, LastId=LastId,
gamma0=allparam[igamma0],
nW = nW, W = W, eta0=allparam[ieta0], iWbeg=iWbeg, iWend=iWend,
Spline_t0=Spt0g, Intercept_t0=Intercept_t0,
Spline_t = Spline_t, Intercept_t=TRUE,
ISpline_W = IS_W, Intercept_W=Intercept_W,
debug=debug.gr)
}
}
}
else {
# no VCE effect, same NPH term than ll_flexrsurv_fromto_G0A0B0AB
if(nX + nZ) {
NPHterm <- intTD(rateTD_gamma0alphabeta, intFrom=Y[,1], intTo=Y[,2], intToStatus=Y[,3],
step=step, Nstep=Nstep,
intweightsfunc=intweightsfunc,
gamma0=allparam[igamma0], Zalphabeta=Zalphabeta,
Spline_t0=Spt0g, Intercept_t0=Intercept_t0,
Spline_t = Spline_t, Intercept_t=TRUE)
if(is.null(Spline_t0)){
Intb0 <- rep(0.0, dim(Y)[1])
} else {
Intb0 <- intTD_base(func=rateTD_gamma0alphabeta, intFrom=Y[,1], intTo=Y[,2], intToStatus=Y[,3],
Spline=Spline_t0,
step=step, Nstep=Nstep, intweightsfunc=intweightsfunc,
gamma0=allparam[igamma0], Zalphabeta=Zalphabeta,
Spline_t0=Spt0g, Intercept_t0=Intercept_t0,
Spline_t = Spline_t, Intercept_t=TRUE,
debug=debug.gr)
}
if( identical(Spline_t0, Spline_t)){
Intb <- Intb0
}
else {
Intb <- intTD_base(func=rateTD_gamma0alphabeta, intFrom=Y[,1], intTo=Y[,2], intToStatus=Y[,3],
Spline=Spline_t,
step=step, Nstep=Nstep, intweightsfunc=intweightsfunc,
gamma0=allparam[igamma0], Zalphabeta=Zalphabeta,
Spline_t0=Spt0g, Intercept_t0=Intercept_t0,
Spline_t = Spline_t, Intercept_t=TRUE)
}
if(!Intercept_t0 & !is.null(Spline_t0)){
Intb0<- Intb0[,-1]
}
indx_without_intercept <- 2:getNBases(Spline_t)
}
else {
NPHterm <- intTD(rateTD_gamma0, intFrom=Y[,1], intTo=Y[,2], intToStatus=Y[,3],
step=step, Nstep=Nstep, intweightsfunc=intweightsfunc,
gamma0=allparam[igamma0],
Spline_t0=Spt0g, Intercept_t0=Intercept_t0)
if(is.null(Spline_t0)){
Intb0 <- rep(0.0, dim(Y)[1])
} else {
Intb0 <- intTD_base(func=rateTD_gamma0, intFrom=Y[,1], intTo=Y[,2], intToStatus=Y[,3],
Spline=Spline_t0,
step=step, Nstep=Nstep, intweightsfunc=intweightsfunc,
gamma0=allparam[igamma0],
Spline_t0=Spt0g, Intercept_t0=Intercept_t0,
debug=debug.gr)
if(!Intercept_t0){
Intb0<- Intb0[,-1]
}
}
Intb <- NULL
}
}
################################################################################
################################################################################
################################################################################
################################################################################
################################################################################
################################################################################
################################################################################
################################################################################
################################################################################
################################################################################
################################################################################
#*****
# the contribution of the WCE to the excess rate at TFinal is in WCEContrib[LastId, ]
if(nW){
# eta0 = NULL because IS_W = ISpline_W * eta0
# WCEcontrib <- weighted_cummulative_exposure_old(Increment=W, fromT=Y[,1], finalT=Y[,4], Id=Id,
# eta0=NULL, iWbeg=iWbeg, iWend=iWend, ISpline_W = IS_W)
WCEcontrib <- weighted_cummulative_exposure(Increment=W, fromT=Y[,1], toT=, Y[,2], FirstId=FirstId, LastId=LastId,
theT=Y[,4], tId=LastId,
eta0=NULL, iWbeg=iWbeg, iWend=iWend, ISpline_W = IS_W, Intercept_W=Intercept_W)
}
else {
WCEcontrib <- NULL
}
################################################################################
# control group
# only Brass model
if(!is.null(Ycontrol)){
# computes intermediates
if(is.null(Spline_B)){
if( nBX0){
BX0_byperiodcontrol <- BX0control[Id_byperiodcontrol,]
BPHtermcontrol <-exp(BX0control %*% allparam[ibalpha0])
modified_ratecontrol <- expected_ratecontrol * BPHtermcontrol
modified_cumratecontrol <- log((1 + exp( expected_logit_endcontrol))/(1 + exp(expected_logit_entercontrol))) * BPHtermcontrol
BPHtermbyPcontrol <-exp(BX0_byperiodcontrol %*% allparam[ibalpha0])
modified_cumratebyPcontrol <- log((1 + exp( expected_logit_end_byperiodcontrol))/(1 + exp(expected_logit_enter_byperiodcontrol))) * BPHtermbyPcontrol
}
else {
BPHtermcontrol <-1.0
modified_ratecontrol <- expected_ratecontrol
modified_cumratecontrol <- log((1 + exp( expected_logit_endcontrol))/(1 + exp(expected_logit_entercontrol)))
modified_cumratebyPcontrol <- log((1 + exp( expected_logit_end_byperiodcontrol))/(1 + exp(expected_logit_enter_byperiodcontrol)))
BPHtermbyPcontrol <-1.0
}
}
else {
# parameter of the first basis is one
brass0 <- c(1.0, allparam[ibrass0])
S_B <- Spline_B * brass0
Y2C <- exp(predictSpline(S_B, expected_logit_endcontrol))
Y1C <- exp(predictSpline(S_B, expected_logit_entercontrol))
evalderivbrasscontrol <- predictSpline(deriv(S_B), expected_logit_endcontrol)
# E(x2) spline bases of the brass transformation at exit
E2C <- evaluate(Spline_B, expected_logit_endcontrol)[,-1]
# E(x1) spline bases of the brass transformation at enter
E1C <- evaluate(Spline_B, expected_logit_entercontrol)[,-1]
# E'(x2) derivative of the spline bases of the brass transformation at exit
DE2C <- evaluate(deriv(Spline_B), expected_logit_endcontrol)[,-1]
# contribution of non time dependant variables
modified_ratecontrol <- expected_ratecontrol * (1 + exp(-expected_logit_endcontrol))/(1+ 1/Y2C) * evalderivbrasscontrol
# by period
Y2CbyP <- exp(predictSpline(S_B, expected_logit_end_byperiodcontrol))
Y1CbyP <- exp(predictSpline(S_B, expected_logit_enter_byperiodcontrol))
evalderivbrassbyPcontrol <- predictSpline(deriv(S_B), expected_logit_end_byperiodcontrol)
# E(x2) spline bases of the brass transformation at exit
E2CbyP <- evaluate(Spline_B, expected_logit_end_byperiodcontrol)[,-1]
# E(x1) spline bases of the brass transformation at enter
E1CbyP <- evaluate(Spline_B, expected_logit_enter_byperiodcontrol)[,-1]
# E'(x2) derivative of the spline bases of the brass transformation at exit
DE2CbyP <- evaluate(deriv(Spline_B), expected_logit_end_byperiodcontrol)[,-1] # contribution of non time dependant variables
modified_cumratebyPcontrol <- log((1 + Y2CbyP)/(1 + Y1CbyP))
# modified cumrate is computed once for each individual (aggregated accors periods from t_enter to t_end of folowup)
# modified_cumratecontrol <- log((1 + Y2C)/(1 + Y1C))
modified_cumratecontrol <- tapply(modified_cumratebyPcontrol, as.factor(Id_byperiodcontrol), FUN=sum)
if( nBX0){
BPHtermcontrol <-exp(BX0control %*% allparam[ibalpha0])
BPHtermbyPcontrol <-exp(BX0_byperiodcontrol %*% allparam[ibalpha0])
modified_ratecontrol <- modified_ratecontrol * BPHtermcontrol
# modified cumrate is computed once for each individual (from t_enter to t_end of folowup)
modified_cumratecontrol <- modified_cumratecontrol * BPHtermcontrol
# by period
modified_cumratebyPcontrol <- modified_cumratebyPcontrol * BPHtermcontrol
} else {
BPHtermcontrol <- 1
BPHtermbyPcontrol <-1.0
}
if(sum(is.na(modified_ratecontrol)) | sum(is.na(modified_cumratecontrol))){
warning(paste0(sum(is.na(modified_ratecontrol)),
" NA rate control and ",
sum(is.na(modified_cumratecontrol)),
" NA cumrate control with Brass coef",
paste(format(brass0), collapse = " ")))
}
if(min(modified_ratecontrol, na.rm=TRUE)<0 | min(modified_cumratecontrol, na.rm=TRUE)<0){
warning(paste0(sum(modified_ratecontrol<0, na.rm=TRUE),
" negative rate control and ",
sum(modified_cumratecontrol<0, na.rm=TRUE),
" negative cumrate control with Brass coef",
paste(format(brass0), collapse = " ")))
}
}
###################
# compute dL/d brass0
if(is.null(Spline_B)){
dLdbrass0 <- NULL
}
else {
if (!is.null(weightscontrol)) {
dLdbrass0 <- crossprod(DE2C, Ycontrol[,3]*weightscontrol/evalderivbrasscontrol) +
crossprod(E2C, Ycontrol[,3] * weightscontrol /(1+ Y2C) ) +
# cumulative part
crossprod(E1CbyP, Y1CbyP * BPHtermbyPcontrol * weights_byperiodcontrol /(1+ Y1CbyP) ) -
crossprod(E2CbyP, ( Y2CbyP * BPHtermbyPcontrol)* weights_byperiodcontrol /(1+ Y2CbyP) )
} else {
dLdbrass0 <- crossprod(DE2C, Ycontrol[,3]/evalderivbrasscontrol) +
crossprod(E2C, Ycontrol[,3]/(1+ Y2C) ) +
# cumulative part
crossprod(E1CbyP, Y1CbyP * BPHtermbyPcontrol /(1+ Y1CbyP) ) -
crossprod(E2CbyP, (Y2CbyP * BPHtermbyPcontrol)/(1+ Y2CbyP) )
}
}
if( nBX0){
# compute dL/d balpha0
if (!is.null(weightscontrol)) {
dLdbalpha0 <- crossprod(BX0control ,(Ycontrol[,3] * weightscontrol) ) - crossprod(BX0_byperiodcontrol , modified_cumratebyPcontrol * weights_byperiodcontrol)
} else {
dLdbalpha0 <- crossprod(BX0control ,Ycontrol[,3]) - crossprod(BX0_byperiodcontrol ,modified_cumratebyPcontrol )
}
}
else {
dLdbalpha0 <- NULL
}
gr_control <- c(rep(0, length(allparam) - nbrass0 - nBX0),
dLdbrass0,
dLdbalpha0)
}
else {
modified_ratecontrol <- NULL
modified_cumratecontrol <- NULL
modified_cumratebyPcontrol <- NULL
gr_control <- 0.0
}
# print("*************************************************gr_control")
# print(gr_control)
################################################################################
# exposed group
# Brass model
# computes intermediates
if(is.null(Spline_B)){
modified_rate <- expected_rate
modified_cumrate <- log((1 + exp( expected_logit_end))/(1 + exp(expected_logit_enter)))
modified_cumratebyP <- log((1 + exp( expected_logit_end_byperiod))/(1 + exp(expected_logit_enter_byperiod)))
}
else {
# parameter of the first basis is one
brass0 <- c(1.0, allparam[ibrass0])
S_B <- Spline_B * brass0
Y2E <- exp(predictSpline(S_B, expected_logit_end))
Y1E <- exp(predictSpline(S_B, expected_logit_enter))
evalderivbrass <- predictSpline(deriv(S_B), expected_logit_end)
# E(x2) spline bases of the brass transformation at exit
E2E <- evaluate(Spline_B, expected_logit_end)[,-1]
# E(x1) spline bases of the brass transformation at enter
E1E <- evaluate(Spline_B, expected_logit_enter)[,-1]
# E'(x2) derivative of the spline bases of the brass transformation at exit
DE2E <- evaluate(deriv(Spline_B), expected_logit_end)[,-1]
# contribution of non time dependant variables
modified_rate <- expected_rate * (1 + exp(-expected_logit_end))/(1+ 1/Y2E) * evalderivbrass
# by period
Y2EbyP <- exp(predictSpline(S_B, expected_logit_end_byperiod))
Y1EbyP <- exp(predictSpline(S_B, expected_logit_enter_byperiod))
evalderivbrassbyP <- predictSpline(deriv(S_B), expected_logit_end_byperiod)
# E(x2) spline bases of the brass transformation at exit
E2EbyP <- evaluate(Spline_B, expected_logit_end_byperiod)[,-1]
# E(x1) spline bases of the brass transformation at enter
E1EbyP <- evaluate(Spline_B, expected_logit_enter_byperiod)[,-1]
# E'(x2) derivative of the spline bases of the brass transformation at exit
DE2EbyP <- evaluate(deriv(Spline_B), expected_logit_end_byperiod)[,-1]
# contribution of non time dependant variables
modified_cumratebyP <- log((1 + Y2EbyP)/(1 + Y1EbyP))
# modified_cumratecontrol <- log((1 + Y2C)/(1 + Y1C))
modified_cumrate <- tapply(modified_cumratebyP, as.factor(Id_byperiod), FUN=sum)
}
if( nBX0){
BPHterm <-exp(BX0 %*% allparam[ibalpha0])
modified_rate <- modified_rate * BPHterm
modified_cumrate <- modified_cumrate * BPHterm
BX0_byperiod <- BX0[Id_byperiod,]
BPHtermbyP <-exp(BX0_byperiod %*% allparam[ibalpha0])
modified_cumratebyP <- modified_cumratebyP * BPHtermbyP
}
else {
BPHterm <- 1.0
BPHtermbyP <- 1.0
}
if(sum(is.na(modified_rate)) | sum(is.na(modified_cumrate))){
warning(paste0(sum(is.na(modified_rate)),
" NA rate and ",
sum(is.na(modified_cumrate)),
" NA cumrate with Brass coef",
paste(format(brass0), collapse = " ")))
}
if(min(modified_rate, na.rm=TRUE)<0 | min(modified_cumrate, na.rm=TRUE)<0){
warning(paste0(sum(modified_rate<0, na.rm=TRUE),
" negative rate and ",
sum(modified_cumrate<0, na.rm=TRUE),
" negative cumrate with Brass coef",
paste(format(brass0), collapse = " ")))
}
# spline bases for each TD effect
if(nX + nZ){
# spline bases for each TD effect at the end of the interval
YT <- evaluate(Spline_t, Y[,2], intercept=TRUE)
if(nW){
RatePred <- ifelse(Y[,3] ,
PHterm * exp(YT0Gamma0 + apply(YT * Zalphabeta, 1, sum) + apply(WCEcontrib, 1, sum)),
0)
}
else {
RatePred <- ifelse(Y[,3] ,
PHterm * exp(YT0Gamma0 + apply(YT * Zalphabeta, 1, sum)),
0)
}
} else {
if(nW){
RatePred <- ifelse(Y[,3] ,
PHterm * exp(YT0Gamma0 + apply(WCEcontrib, 1, sum)),
0)
}
else {
RatePred <- ifelse(Y[,3] ,
PHterm * exp(YT0Gamma0),
0)
}
}
F <- ifelse(Y[,3] ,
RatePred/(RatePred + modified_rate ),
0)
Ftable <- ifelse(Y[,3] ,
modified_rate/(RatePred + modified_rate ),
0)
# for each row i of an Id, FId[i] <- F[final_time of the id]
FId <- F[LastId]
if(nX + nZ) {
if(nX0>0) {
Intb <- Intb * c(PHterm)
}
IntbF <- YT*F - Intb
}
else {
IntbF <- NULL
}
Intb0 <- Intb0 * c(PHterm)
WF <- list()
if(nW){
for(i in 1:nW){
if(nX0>0) {
# rescale IndbW by PHterm
IntbW[[i]] <- IntbW[[i]] * c(PHterm)
}
WF[[i]] <- evaluate(ISpline_W[[i]], Y[,4] - Y[,1], intercept=Intercept_W[i]) * FId
}
}
else {
WF <- NULL
}
#####################################################################"
# now computes the mean score and the gradients
#^parameters of the correction of the life table
if(is.null(Spline_B)){
dLdbrass0 <- NULL
}
else {
if (!is.null(weights)) {
# compute dL/d brass0
dLdbrass0 <- crossprod(DE2E , Ftable *weights/evalderivbrass) +
crossprod(E2E, Ftable * weights /(1+ Y2E) ) +
# cumulative part
crossprod(E1EbyP, (Y1EbyP * BPHtermbyP) * weights_byperiod /(1+ Y1EbyP) ) -
crossprod(E2EbyP, (Y2EbyP * BPHtermbyP) * weights_byperiod /(1+ Y2EbyP) )
} else {
# compute dL/d brass0
dLdbrass0 <- crossprod(DE2E, Ftable / evalderivbrass) +
crossprod(E2E, Ftable/(1+ Y2E) ) +
# cumulative part
crossprod(E1EbyP, (Y1EbyP * BPHtermbyP) /(1+ Y1EbyP) ) -
crossprod(E2EbyP, (Y2EbyP * BPHtermbyP) /(1+ Y2EbyP) )
}
}
if( nBX0){
# compute dL/d balpha0
if (!is.null(weights)) {
dLdbalpha0 <- crossprod(BX0 ,( Ftable - modified_cumrate )* weights )
} else {
dLdbalpha0 <- crossprod(BX0 , ( Ftable - modified_cumrate ) )
}
}
else {
dLdbalpha0 <- NULL
}
if (!is.null(weights)) {
# dldgamma0
if(is.null(Spline_t0)){
dLdgamma0 <- NULL
}
else {
dLdgamma0 <- crossprod( YT0 * F - Intb0 , weights)
}
if (nX0) {
dLdalpha0 <- crossprod(X0 , (F - PHterm * NPHterm) * weights )
}
else {
dLdalpha0 <- NULL
}
if (nX){
# traiter les Intercept_t_NPH
dLdbeta0 <- NULL
for(i in 1:nX){
if ( Intercept_t_NPH[i] ){
dLdbeta0 <- c(dLdbeta0, crossprod(X[,i] , IntbF * weights))
}
else {
dLdbeta0 <- c(dLdbeta0, crossprod(X[,i] , IntbF[,indx_without_intercept] * weights))
}
}
}
else {
dLdbeta0 <- NULL
}
if (nZ) {
baseIntbF <- IntbF %*% t(tBeta)
dLdalpha <- rep(0,getNparam(Z) )
indZ <- getIndex(Z)
for(iZ in 1:nZ){
if ( debug.gr > 200 ){
}
dLdalpha[indZ[iZ,1]:indZ[iZ,2]] <- crossprod(Z@DM[,indZ[iZ,1]:indZ[iZ,2]], baseIntbF[,iZ] * weights )
}
dLdbeta <- c(crossprod((IntbF[,-1, drop=FALSE]),Zalpha * weights))
}
else {
dLdalpha <- NULL
dLdbeta <- NULL
}
if(nW){
dLdeta0 <- NULL
for(i in 1:nW){
dLdeta0 <- cbind(dLdeta0, crossprod(weights, W[,i] * WF[[i]] - IntbW[[i]]))
}
}
else{
dLdeta0 <- NULL
}
} # end weights!=NULL
else {
# d<dgamma0
if(is.null(Spline_t0)){
dLdgamma0 <- NULL
}
else {
dLdgamma0 <- apply( YT0 * F - Intb0 , 2, sum)
}
if (nX0) {
dLdalpha0 <- crossprod(X0 , F - PHterm* NPHterm )
}
else {
dLdalpha0 <- NULL
}
if (nX){
# traiter les Intercept_t_NPH
dLdbeta0 <- NULL
for(i in 1:nX){
if ( Intercept_t_NPH[i] ){
dLdbeta0 <- c(dLdbeta0, crossprod(X[,i] , IntbF))
}
else {
dLdbeta0 <- c(dLdbeta0, crossprod(X[,i] , IntbF[,indx_without_intercept]))
}
}
}
else {
dLdbeta0 <- NULL
}
if (nZ) {
baseIntbF <- IntbF %*% t(tBeta)
dLdalpha <- rep(0,getNparam(Z) )
indZ <- getIndex(Z)
for(iZ in 1:nZ){
dLdalpha[indZ[iZ,1]:indZ[iZ,2]] <- crossprod(Z@DM[,indZ[iZ,1]:indZ[iZ,2]], baseIntbF[,iZ] )
}
dLdbeta <- c(crossprod((IntbF[,-1, drop=FALSE]),Zalpha ))
}
else {
dLdalpha <- NULL
dLdbeta <- NULL
}
# WCE effects
if(nW){
dLdeta0 <- NULL
for(i in 1:nW){
dLdeta0 <- c(dLdeta0, crossprod(W[,i] , WF[[i]]) - apply(IntbW[[i]], 2, sum))
}
}
else{
dLdeta0 <- NULL
}
} # end weights==NULL
gr_exposed <- c(dLdgamma0,
dLdalpha0,
dLdbeta0,
dLdalpha,
dLdbeta,
dLdeta0,
dLdbrass0,
dLdbalpha0)
# print("debdLdeta0grad")
# print(summary(F))
# print(summary(PHterm))
# print(summary(NPHterm ))
# print(summary(X0))
# print(summary(c(PHterm)* NPHterm ) )
# print(summary(( F - c(PHterm)* NPHterm ) ))
# print(summary(( F - c(PHterm)* NPHterm ) * X0))
# print("findLdeta0grad")
# print("*************************************************gr_exposed")
# print(gr_exposed)
ret <- gr_control + gr_exposed
#cat("gr ")
#print(ret)
#cat("gC ")
#print(gr_control)
#cat("gE ")
#print(gr_exposed)
if(debug.gr){
attr(rep, "intb0") <- Intb0
attr(rep, "F") <- F
attr(rep, "YT0") <- YT0
if(nX+nZ){
attr(rep, "YT") <- YT
attr(rep, "intb") <- Intb
attr(rep, "intbF") <- IntbF
}
if(nW){
attr(rep, "intbW") <- IntbW
}
attr(rep, "RatePred") <- RatePred
if(debug.gr > 1000){
cat("grad value and parameters :", "\n")
print(cbind( rep, allparam))
}
}
if ( debug.gr) {
attr(ret, "PHterm") <- PHterm
attr(ret, "NPHterm") <- NPHterm
attr(ret, "WCEcontrib") <- WCEcontrib
attr(ret, "modified_rate") <- modified_rate
attr(ret, "modified_cumrate") <- modified_cumrate
attr(ret, "modified_cumratebyP") <- modified_cumratebyP
attr(ret, "gr_exposed") <- gr_exposed
attr(ret, "modified_ratecontrol") <- modified_ratecontrol
attr(ret, "modified_cumratecontrol") <- modified_cumratecontrol
attr(ret, "modified_cumratebyPcontrol") <- modified_cumratebyPcontrol
attr(ret, "gr_control") <- gr_control
if ( debug.gr > 1000) cat("fin gr_flexrsurv_GA0B0ABE0Br0Control **", ret, "++ \n")
}
#cat("************gr_flexrsurv_fromto_1WCEaddBr0Control ")
#print(cbind(allparam, ret), digits=12)
ret
}
|
/R/gr_ll_flexrsurv_fromto_GA0B0ABE0Br0PeriodControl.R
|
no_license
|
cran/flexrsurv
|
R
| false
| false
| 35,639
|
r
|
gr_ll_flexrsurv_fromto_GA0B0ABE0Br0PeriodControl<-function(allparam,
Y, X0, X, Z, W,
BX0,
Id, FirstId, LastId=NULL,
expected_rate,
expected_logit_end,
expected_logit_enter,
expected_logit_end_byperiod,
expected_logit_enter_byperiod,
weights_byperiod,
Id_byperiod,
weights=NULL,
Ycontrol, BX0control,
weightscontrol=NULL,
Idcontrol, FirstIdcontrol,
expected_ratecontrol,
expected_logit_endcontrol,
expected_logit_entercontrol,
expected_logit_end_byperiodcontrol,
expected_logit_enter_byperiodcontrol,
weights_byperiodcontrol,
Id_byperiodcontrol,
step, Nstep,
intTD=intTDft_NC, intweightsfunc=intweights_CAV_SIM,
intTD_base=intTDft_base_NC,
intTD_WCEbase=intTDft_WCEbase_NC,
nT0basis,
Spline_t0=BSplineBasis(knots=NULL, degree=3, keep.duplicates=TRUE), Intercept_t0=TRUE,
ialpha0, nX0,
ibeta0, nX,
ialpha,
ibeta,
nTbasis,
ieta0, iWbeg, iWend, nW,
Spline_t =BSplineBasis(knots=NULL, degree=3, keep.duplicates=TRUE),
Intercept_t_NPH=rep(TRUE, nX),
ISpline_W =MSplineBasis(knots=NULL, degree=3, keep.duplicates=TRUE),
Intercept_W=TRUE,
nBbasis,
Spline_B, Intercept_B=TRUE,
ibrass0, nbrass0,
ibalpha0, nBX0,
debug.gr=TRUE, ...){
# same as ll_flexrsurv_fromto_GA0B0ABE0Br0.R but with a control group
# compute log likelihood of the relative survival model
# excess rate = exp( f(t)%*%gamma + X0%*%alpha0 + X%*%beta0(t) + sum( alphai(zi)betai(t) + sum ( wce(Wi , eta0i)(t)) ))
#################################################################################################################
#################################################################################################################
# the coef of the first t-basis is constraint to 1 for nat-spline, and n-sum(other beta) if bs using expand() method
#################################################################################################################
#################################################################################################################
#################################################################################################################
# allparam ; vector of all coefs
# gamma0 = allparam[1:nY0basis]
# alpha0= allparam[ialpha0]
# beta0= matrix(allparam[ibeta0], ncol=nX, nrow=nTbasis)
# alpha= diag(allparam[ialpha])
# beta= expand(matrix(allparam[ibeta], ncol=Z@nZ, nrow=nTbasis-1))
# beta does not contains coef for the first t-basis
# eta0 = allparam[ieta0]
# brass0 = allparam[ibrass0]
# balpha0 = allparam[ibalpha0]
# corection of lifetable according to generalized brass method
# Cohort-independent generalized Brass model in an age-cohort table
# stratified brass model according to fixed effects BX0 (one brass function per combination)
# for control group
# rate = brass0(expected-ratecontrol, expected_logitcontrol)*exp(BX0control balpha0)
# but for exposed
# rate = brass0(expected-rate, expected_logit)*exp(BX0 balpha0) + exp(gamma0(t) + time-independent effect(LL + NLL)(X0) + NPH(X) + NPHNLL(Z) + WCE(W))
# brass0 : BRASS model wiht parameter Spline_B
# logit(F) = evaluate(Spline_B, logit(F_pop), brass0) * exp(Balpha %*% BX0)
# HCum(t_1, t_2) = log(1 + exp(evaluate(Spline_B, logit(F_pop(t_2)), brass0)) - log(1 + exp(evaluate(Spline_B, logit(F_pop(t_1)), brass0))
# rate(t_1) = rate_ref * (1 + exp(-logit(F_pop(t)))/(1 + exp(evaluate(Spline_B, logit(F_pop(t)), brass0)))*
# evaluate(deriv(Spline_B), logit(F_pop(t)), brass0)
# expected_logit_end = logit(F_pop(Y[,2]))
# expected_logit_enter = logit(F_pop(Y[,1]))
# brass0 = allparam[ibrass0]
# Spline_B : object of class "AnySplineBasis" (suitable for Brass model) with method deriv() and evaluate()
# IMPORTANT : the coef of the first basis is constraints to one and evaluate(deriv(spline_B), left_boundary_knots) == 1 for Brass transform
#
# parameters for exposed group
#################################################################################################################
# Y : object of class Surv but the matrix has 4 columns :
# Y[,1] beginning(1) , fromT
# Y[,2] end(2), toT,
# Y[,3] status(3) fail
# Y[,4] end of followup(4)
# end of followup is assumed constant by Id
# X0 : non-time dependante variable (may contain spline bases expended for non-loglinear terms)
# X : log lineair but time dependante variable
# Z : object of class "DesignMatrixNPHNLL" time dependent variables (spline basis expended)
# W : Exposure variables used in Weighted Cumulative Exposure Models
# BX0 : non-time dependante variable for the correction of life table (may contain spline bases expended for non-loglinear terms)
# Id : varibale indicating individuals Id, lines with the same Id are considered to be from the same individual
# FirstId : all lines in FirstId[iT]:iT in the data comes from the same individual
# expected_rate : expected rate at event time T
# expected_logit_end : logit of the expected survival at the end of the followup
# expected_logit_enter : logit of the expected survival at the beginning of the followup
# weights : vector of weights : LL = sum_i w_i ll_i
# expected_logit_end_byperiod, : expected logit of periode survival at exit of each period (used in the Brass model
# expected_logit_enter_byperiod, : expected logit of periode survival at entry of each period (used in the Brass model
# weights_byperiod, : weight of each period (used in the Brass model weights_byperiod = weight[Id_byperiod]
# Id_byperiod, : index in the Y object : XX_byperiod[i] corrsponds to the row Id_byperiod[i] of Y, X, Z, ...
# parameters for exposd population
#################################################################################################################
# parameters for exposed group
#################################################################################################################
# Ycontrol : object of class Surv but the matrix has 4 columns :
# Ycontrol[,1] beginning(1) , fromT
# Ycontrol[,2] end(2), toT,
# Ycontrol[,3] status(3) fail
# Ycontrol[,4] end of followup(4)
# end of followup is assumed constant by Id
# BX0control : non-time dependante variable for the correction of life table (may contain spline bases expended for non-loglinear terms)
# Idcontrol : varibale indicating individuals Id, lines with the same Id are considered to be from the same individual
# FirstIdcontrol : all lines in FirstId[iT]:iT in the data comes from the same individual
# expected_ratecontrol : expected rate at event time T
# expected_logit_endcontrol : logit of the expected survival at the end of the followup
# expected_logit_entercontrol : logit of the expected survival at the beginning of the followup
# weightscontrol : vector of weights : LL = sum_i w_i ll_i
# expected_logit_end_byperiodcontrol, : expected logit of periode survival at exit of each period (used in the Brass model
# expected_logit_enter_byperiodcontrol, : expected logit of periode survival at entry of each period (used in the Brass model
# weights_byperiodcontrol, : weight of each period (used in the Brass model weights_byperiod = weight[Id_byperiod]
# Id_byperiodcontrol, : index in the Y object : XX_byperiod[i] corrsponds to the row Id_byperiod[i] of Y, X, Z, ...
#################################################################################################################
# model parameters
# step : object of class "NCLagParam" or "GLMLagParam"
# Nstep : number of lag for each observation
# intTD : function to perform numerical integration
# intweightfunc : function to compute weightsfor numerical integration
# nT0basis : number of spline basis
# Spline_t0, spline object for baseline hazard, with evaluate() method
# Intercept_t0=FALSE, option for evaluate, = TRUE all the basis, =FALSE all but first basis
# nTbasis : number of time spline basis for NPH or NLL effects
# nX0 : nb of PH variables dim(X0)=c(nobs, nX0)
# nX : nb of NPHLIN variables dim(X)=c(nobs, nX)
# Spline_t, spline object for time dependant effects, with evaluate() method
# Intercept_t_NPH vector of intercept option for NPH spline (=FALSE when X is NLL too, ie in case of remontet additif NLLNPH)
# nW : nb of WCE variables dim(W)=c(nobs, nW)
# iWbeg, iWend : coef of the ith WCE variable is eta0[iWbeg[i]:iWend[i]]
# ISpline_W, list of nW spline object for WCE effects, with evaluate() method
# ISpline is already integreted
# ... not used args
# the function do not check the concorcance between length of parameter vectors and the number of knots and the Z.signature
# returned value : the log liikelihood of the model
#cat("************gr_flexrsurv_fromto_1WCEaddBr0Control ")
#print(format(allparam, scientific = TRUE, digits=12))
################################################################################
# excess rate
if(is.null(Z)){
nZ <- 0
Zalphabeta <- NULL
} else {
nZ <- Z@nZ
}
# LastId
if(is.null(LastId)){
first <- unique(FirstId)
nline <- c(first[-1],length(FirstId)+1)-first
LastId <- FirstId+rep(nline, nline)-1
}
if(is.null(Spline_t0)){
YT0 <- NULL
YT0Gamma0 <- 0.0
Spt0g <- NULL
igamma0 <- NULL
}
else {
igamma0 <- 1:nT0basis
if(Intercept_t0){
tmpgamma0 <- allparam[igamma0]
}
else {
tmpgamma0 <- c(0, allparam[igamma0])
}
# baseline hazard at the end of the interval
Spt0g <- Spline_t0*tmpgamma0
YT0Gamma0 <- predictSpline(Spt0g, Y[,2])
YT0 <- fevaluate(Spline_t0, Y[,2], intercept=Intercept_t0)
}
# contribution of non time dependant variables
if( nX0){
PHterm <-exp(X0 %*% allparam[ialpha0])
} else {
PHterm <- 1
}
# contribution of time d?pendant effect
# parenthesis are important for efficiency
if(nZ) {
# add a row for the first basis
tBeta <- t(ExpandAllCoefBasis(allparam[ibeta], ncol=nZ, value=1))
# Zalpha est la matrice des alpha(Z)
# parenthesis important for speed ?
Zalpha <- Z@DM %*%( diag(allparam[ialpha]) %*% Z@signature )
Zalphabeta <- Zalpha %*% tBeta
if(nX) {
# add a row of 0 for the first T-basis when !Intercept_T_NPH
Zalphabeta <- Zalphabeta + X %*% t(ExpandCoefBasis(allparam[ibeta0],
ncol=nX,
splinebasis=Spline_t,
expand=!Intercept_t_NPH,
value=0))
}
} else {
if(nX) {
Zalphabeta <- X %*% t(ExpandCoefBasis(allparam[ibeta0],
ncol=nX,
splinebasis=Spline_t,
expand=!Intercept_t_NPH,
value=0))
}
else {
Zalphabeta <- NULL
}
}
if(nW) {
IS_W <- ISpline_W
eta0 <- allparam[ieta0]
for(iW in 1:nW){
if(Intercept_W[iW]){
IS_W[[iW]] <- ISpline_W[[iW]] * eta0[iWbeg[iW]:iWend[iW]]
}
else {
IS_W[[iW]]<- ISpline_W[[iW]] * c(0, eta0[iWbeg[iW]:iWend[iW]])
}
IntbW <- list()
}
if(identical(intweightsfunc , intweights_CAV_SIM, ignore.srcref=TRUE) ){
degree <- 2L
}
else if(identical(intweightsfunc , intweights_SIM_3_8, ignore.srcref=TRUE) ){
degree <- 3L
}
else if(identical(intweightsfunc , intweights_BOOLE, ignore.srcref=TRUE) ){
degree <- 4L
}
else {
degree <- 0L
}
if(nX + nZ) {
NPHterm <- intTD(rateTD_gamma0alphabetaeta0, intFrom=Y[,1], intTo=Y[,2], intToStatus=Y[,3],
step=step, Nstep=Nstep,
intweightsfunc=intweightsfunc,
fromT=Y[,1], toT=Y[,2], FirstId=FirstId, LastId=LastId,
gamma0=allparam[igamma0], Zalphabeta=Zalphabeta,
nW = nW, W = W, eta0=allparam[ieta0], iWbeg=iWbeg, iWend=iWend,
Spline_t0=Spt0g, Intercept_t0=Intercept_t0,
Spline_t = Spline_t, Intercept_t=TRUE,
ISpline_W = IS_W, Intercept_W=Intercept_W)
if(is.null(Spline_t0)){
Intb0 <- rep(0.0, dim(Y)[1])
} else {
Intb0 <- intTD_base(func=rateTD_gamma0alphabetaeta0, intFrom=Y[,1], intTo=Y[,2], intToStatus=Y[,3],
Spline=Spline_t0,
step=step, Nstep=Nstep, intweightsfunc=intweightsfunc,
fromT=Y[,1], toT=Y[,2], FirstId=FirstId, LastId=LastId,
gamma0=allparam[igamma0], Zalphabeta=Zalphabeta,
nW = nW, W = W, eta0=allparam[ieta0], iWbeg=iWbeg, iWend=iWend,
Spline_t0=Spt0g, Intercept_t0=Intercept_t0,
Spline_t = Spline_t, Intercept_t=TRUE,
ISpline_W = IS_W, Intercept_W=Intercept_W,
debug=debug.gr)
}
if( identical(Spline_t0, Spline_t)){
Intb <- Intb0
}
else {
Intb <- intTD_base(func=rateTD_gamma0alphabetaeta0, intFrom=Y[,1], intTo=Y[,2], intToStatus=Y[,3],
Spline=Spline_t,
step=step, Nstep=Nstep, intweightsfunc=intweightsfunc,
fromT=Y[,1], toT=Y[,2], FirstId=FirstId, LastId=LastId,
gamma0=allparam[igamma0], Zalphabeta=Zalphabeta,
nW = nW, W = W, eta0=allparam[ieta0], iWbeg=iWbeg, iWend=iWend,
Spline_t0=Spt0g, Intercept_t0=Intercept_t0,
Spline_t = Spline_t, Intercept_t=TRUE,
ISpline_W = IS_W, Intercept_W=Intercept_W,
debug=debug.gr)
}
if(!Intercept_t0 & !is.null(Spline_t0)){
Intb0<- Intb0[,-1]
}
indx_without_intercept <- 2:getNBases(Spline_t)
for(iW in 1:nW){
# in IntbW, the integrated WCE splines (parameter named Spline) are not scaled by eta0
IntbW[[iW]] <- intTD_WCEbase(func=rateTD_gamma0alphabetaeta0, intFrom=Y[,1], intTo=Y[,2], intToStatus=Y[,3],
Spline=ISpline_W[[iW]], intercept=Intercept_W[iW], theW=W[,iW],
step=step, Nstep=Nstep, degree=degree, intweightsfunc=intweightsfunc,
fromT=Y[,1], toT=Y[,2], FirstId=FirstId, LastId=LastId,
gamma0=allparam[igamma0], Zalphabeta=Zalphabeta,
nW = nW, W = W, eta0=allparam[ieta0], iWbeg=iWbeg, iWend=iWend,
Spline_t0=Spt0g, Intercept_t0=Intercept_t0,
Spline_t = Spline_t, Intercept_t=TRUE,
ISpline_W = IS_W, Intercept_W=Intercept_W,
debug=debug.gr)
}
}
else {
NPHterm <- intTD(rateTD_gamma0eta0, intFrom=Y[,1], intTo=Y[,2], intToStatus=Y[,3],
step=step, Nstep=Nstep,
intweightsfunc=intweightsfunc,
fromT=Y[,1], toT=Y[,2], FirstId=FirstId, LastId=LastId,
gamma0=allparam[igamma0],
nW = nW, W = W, eta0=allparam[ieta0], iWbeg=iWbeg, iWend=iWend,
Spline_t0=Spt0g, Intercept_t0=Intercept_t0,
ISpline_W = IS_W, Intercept_W=Intercept_W)
if(is.null(Spline_t0)){
Intb0 <- rep(0.0, dim(Y)[1])
} else {
Intb0 <- intTD_base(func=rateTD_gamma0eta0, intFrom=Y[,1], intTo=Y[,2], intToStatus=Y[,3],
Spline=Spline_t0,
step=step, Nstep=Nstep, intweightsfunc=intweightsfunc,
fromT=Y[,1], toT=Y[,2], FirstId=FirstId, LastId=LastId,
gamma0=allparam[igamma0], Zalphabeta=Zalphabeta,
nW = nW, W = W, eta0=allparam[ieta0], iWbeg=iWbeg, iWend=iWend,
Spline_t0=Spt0g, Intercept_t0=Intercept_t0,
Spline_t = Spline_t, Intercept_t=TRUE,
ISpline_W = IS_W, Intercept_W=Intercept_W,
debug=debug.gr)
if(!Intercept_t0){
Intb0<- Intb0[,-1]
}
}
Intb <- NULL
for(iW in 1:nW){
# in IntbW, the integrated WCE splines are not scaled by eta0
IntbW[[iW]] <- intTD_WCEbase(func=rateTD_gamma0eta0, intFrom=Y[,1], intTo=Y[,2], intToStatus=Y[,3],
Spline=ISpline_W[[iW]], intercept=Intercept_W[iW], theW=W[,iW],
step=step, Nstep=Nstep, degree=degree, intweightsfunc=intweightsfunc,
fromT=Y[,1], toT=Y[,2], FirstId=FirstId, LastId=LastId,
gamma0=allparam[igamma0],
nW = nW, W = W, eta0=allparam[ieta0], iWbeg=iWbeg, iWend=iWend,
Spline_t0=Spt0g, Intercept_t0=Intercept_t0,
Spline_t = Spline_t, Intercept_t=TRUE,
ISpline_W = IS_W, Intercept_W=Intercept_W,
debug=debug.gr)
}
}
}
else {
# no VCE effect, same NPH term than ll_flexrsurv_fromto_G0A0B0AB
if(nX + nZ) {
NPHterm <- intTD(rateTD_gamma0alphabeta, intFrom=Y[,1], intTo=Y[,2], intToStatus=Y[,3],
step=step, Nstep=Nstep,
intweightsfunc=intweightsfunc,
gamma0=allparam[igamma0], Zalphabeta=Zalphabeta,
Spline_t0=Spt0g, Intercept_t0=Intercept_t0,
Spline_t = Spline_t, Intercept_t=TRUE)
if(is.null(Spline_t0)){
Intb0 <- rep(0.0, dim(Y)[1])
} else {
Intb0 <- intTD_base(func=rateTD_gamma0alphabeta, intFrom=Y[,1], intTo=Y[,2], intToStatus=Y[,3],
Spline=Spline_t0,
step=step, Nstep=Nstep, intweightsfunc=intweightsfunc,
gamma0=allparam[igamma0], Zalphabeta=Zalphabeta,
Spline_t0=Spt0g, Intercept_t0=Intercept_t0,
Spline_t = Spline_t, Intercept_t=TRUE,
debug=debug.gr)
}
if( identical(Spline_t0, Spline_t)){
Intb <- Intb0
}
else {
Intb <- intTD_base(func=rateTD_gamma0alphabeta, intFrom=Y[,1], intTo=Y[,2], intToStatus=Y[,3],
Spline=Spline_t,
step=step, Nstep=Nstep, intweightsfunc=intweightsfunc,
gamma0=allparam[igamma0], Zalphabeta=Zalphabeta,
Spline_t0=Spt0g, Intercept_t0=Intercept_t0,
Spline_t = Spline_t, Intercept_t=TRUE)
}
if(!Intercept_t0 & !is.null(Spline_t0)){
Intb0<- Intb0[,-1]
}
indx_without_intercept <- 2:getNBases(Spline_t)
}
else {
NPHterm <- intTD(rateTD_gamma0, intFrom=Y[,1], intTo=Y[,2], intToStatus=Y[,3],
step=step, Nstep=Nstep, intweightsfunc=intweightsfunc,
gamma0=allparam[igamma0],
Spline_t0=Spt0g, Intercept_t0=Intercept_t0)
if(is.null(Spline_t0)){
Intb0 <- rep(0.0, dim(Y)[1])
} else {
Intb0 <- intTD_base(func=rateTD_gamma0, intFrom=Y[,1], intTo=Y[,2], intToStatus=Y[,3],
Spline=Spline_t0,
step=step, Nstep=Nstep, intweightsfunc=intweightsfunc,
gamma0=allparam[igamma0],
Spline_t0=Spt0g, Intercept_t0=Intercept_t0,
debug=debug.gr)
if(!Intercept_t0){
Intb0<- Intb0[,-1]
}
}
Intb <- NULL
}
}
################################################################################
################################################################################
################################################################################
################################################################################
################################################################################
################################################################################
################################################################################
################################################################################
################################################################################
################################################################################
################################################################################
#*****
# the contribution of the WCE to the excess rate at TFinal is in WCEContrib[LastId, ]
if(nW){
# eta0 = NULL because IS_W = ISpline_W * eta0
# WCEcontrib <- weighted_cummulative_exposure_old(Increment=W, fromT=Y[,1], finalT=Y[,4], Id=Id,
# eta0=NULL, iWbeg=iWbeg, iWend=iWend, ISpline_W = IS_W)
WCEcontrib <- weighted_cummulative_exposure(Increment=W, fromT=Y[,1], toT=, Y[,2], FirstId=FirstId, LastId=LastId,
theT=Y[,4], tId=LastId,
eta0=NULL, iWbeg=iWbeg, iWend=iWend, ISpline_W = IS_W, Intercept_W=Intercept_W)
}
else {
WCEcontrib <- NULL
}
################################################################################
# control group
# only Brass model
if(!is.null(Ycontrol)){
# computes intermediates
if(is.null(Spline_B)){
if( nBX0){
BX0_byperiodcontrol <- BX0control[Id_byperiodcontrol,]
BPHtermcontrol <-exp(BX0control %*% allparam[ibalpha0])
modified_ratecontrol <- expected_ratecontrol * BPHtermcontrol
modified_cumratecontrol <- log((1 + exp( expected_logit_endcontrol))/(1 + exp(expected_logit_entercontrol))) * BPHtermcontrol
BPHtermbyPcontrol <-exp(BX0_byperiodcontrol %*% allparam[ibalpha0])
modified_cumratebyPcontrol <- log((1 + exp( expected_logit_end_byperiodcontrol))/(1 + exp(expected_logit_enter_byperiodcontrol))) * BPHtermbyPcontrol
}
else {
BPHtermcontrol <-1.0
modified_ratecontrol <- expected_ratecontrol
modified_cumratecontrol <- log((1 + exp( expected_logit_endcontrol))/(1 + exp(expected_logit_entercontrol)))
modified_cumratebyPcontrol <- log((1 + exp( expected_logit_end_byperiodcontrol))/(1 + exp(expected_logit_enter_byperiodcontrol)))
BPHtermbyPcontrol <-1.0
}
}
else {
# parameter of the first basis is one
brass0 <- c(1.0, allparam[ibrass0])
S_B <- Spline_B * brass0
Y2C <- exp(predictSpline(S_B, expected_logit_endcontrol))
Y1C <- exp(predictSpline(S_B, expected_logit_entercontrol))
evalderivbrasscontrol <- predictSpline(deriv(S_B), expected_logit_endcontrol)
# E(x2) spline bases of the brass transformation at exit
E2C <- evaluate(Spline_B, expected_logit_endcontrol)[,-1]
# E(x1) spline bases of the brass transformation at enter
E1C <- evaluate(Spline_B, expected_logit_entercontrol)[,-1]
# E'(x2) derivative of the spline bases of the brass transformation at exit
DE2C <- evaluate(deriv(Spline_B), expected_logit_endcontrol)[,-1]
# contribution of non time dependant variables
modified_ratecontrol <- expected_ratecontrol * (1 + exp(-expected_logit_endcontrol))/(1+ 1/Y2C) * evalderivbrasscontrol
# by period
Y2CbyP <- exp(predictSpline(S_B, expected_logit_end_byperiodcontrol))
Y1CbyP <- exp(predictSpline(S_B, expected_logit_enter_byperiodcontrol))
evalderivbrassbyPcontrol <- predictSpline(deriv(S_B), expected_logit_end_byperiodcontrol)
# E(x2) spline bases of the brass transformation at exit
E2CbyP <- evaluate(Spline_B, expected_logit_end_byperiodcontrol)[,-1]
# E(x1) spline bases of the brass transformation at enter
E1CbyP <- evaluate(Spline_B, expected_logit_enter_byperiodcontrol)[,-1]
# E'(x2) derivative of the spline bases of the brass transformation at exit
DE2CbyP <- evaluate(deriv(Spline_B), expected_logit_end_byperiodcontrol)[,-1] # contribution of non time dependant variables
modified_cumratebyPcontrol <- log((1 + Y2CbyP)/(1 + Y1CbyP))
# modified cumrate is computed once for each individual (aggregated accors periods from t_enter to t_end of folowup)
# modified_cumratecontrol <- log((1 + Y2C)/(1 + Y1C))
modified_cumratecontrol <- tapply(modified_cumratebyPcontrol, as.factor(Id_byperiodcontrol), FUN=sum)
if( nBX0){
BPHtermcontrol <-exp(BX0control %*% allparam[ibalpha0])
BPHtermbyPcontrol <-exp(BX0_byperiodcontrol %*% allparam[ibalpha0])
modified_ratecontrol <- modified_ratecontrol * BPHtermcontrol
# modified cumrate is computed once for each individual (from t_enter to t_end of folowup)
modified_cumratecontrol <- modified_cumratecontrol * BPHtermcontrol
# by period
modified_cumratebyPcontrol <- modified_cumratebyPcontrol * BPHtermcontrol
} else {
BPHtermcontrol <- 1
BPHtermbyPcontrol <-1.0
}
if(sum(is.na(modified_ratecontrol)) | sum(is.na(modified_cumratecontrol))){
warning(paste0(sum(is.na(modified_ratecontrol)),
" NA rate control and ",
sum(is.na(modified_cumratecontrol)),
" NA cumrate control with Brass coef",
paste(format(brass0), collapse = " ")))
}
if(min(modified_ratecontrol, na.rm=TRUE)<0 | min(modified_cumratecontrol, na.rm=TRUE)<0){
warning(paste0(sum(modified_ratecontrol<0, na.rm=TRUE),
" negative rate control and ",
sum(modified_cumratecontrol<0, na.rm=TRUE),
" negative cumrate control with Brass coef",
paste(format(brass0), collapse = " ")))
}
}
###################
# compute dL/d brass0
if(is.null(Spline_B)){
dLdbrass0 <- NULL
}
else {
if (!is.null(weightscontrol)) {
dLdbrass0 <- crossprod(DE2C, Ycontrol[,3]*weightscontrol/evalderivbrasscontrol) +
crossprod(E2C, Ycontrol[,3] * weightscontrol /(1+ Y2C) ) +
# cumulative part
crossprod(E1CbyP, Y1CbyP * BPHtermbyPcontrol * weights_byperiodcontrol /(1+ Y1CbyP) ) -
crossprod(E2CbyP, ( Y2CbyP * BPHtermbyPcontrol)* weights_byperiodcontrol /(1+ Y2CbyP) )
} else {
dLdbrass0 <- crossprod(DE2C, Ycontrol[,3]/evalderivbrasscontrol) +
crossprod(E2C, Ycontrol[,3]/(1+ Y2C) ) +
# cumulative part
crossprod(E1CbyP, Y1CbyP * BPHtermbyPcontrol /(1+ Y1CbyP) ) -
crossprod(E2CbyP, (Y2CbyP * BPHtermbyPcontrol)/(1+ Y2CbyP) )
}
}
if( nBX0){
# compute dL/d balpha0
if (!is.null(weightscontrol)) {
dLdbalpha0 <- crossprod(BX0control ,(Ycontrol[,3] * weightscontrol) ) - crossprod(BX0_byperiodcontrol , modified_cumratebyPcontrol * weights_byperiodcontrol)
} else {
dLdbalpha0 <- crossprod(BX0control ,Ycontrol[,3]) - crossprod(BX0_byperiodcontrol ,modified_cumratebyPcontrol )
}
}
else {
dLdbalpha0 <- NULL
}
gr_control <- c(rep(0, length(allparam) - nbrass0 - nBX0),
dLdbrass0,
dLdbalpha0)
}
else {
modified_ratecontrol <- NULL
modified_cumratecontrol <- NULL
modified_cumratebyPcontrol <- NULL
gr_control <- 0.0
}
# print("*************************************************gr_control")
# print(gr_control)
################################################################################
# exposed group
# Brass model
# computes intermediates
if(is.null(Spline_B)){
modified_rate <- expected_rate
modified_cumrate <- log((1 + exp( expected_logit_end))/(1 + exp(expected_logit_enter)))
modified_cumratebyP <- log((1 + exp( expected_logit_end_byperiod))/(1 + exp(expected_logit_enter_byperiod)))
}
else {
# parameter of the first basis is one
brass0 <- c(1.0, allparam[ibrass0])
S_B <- Spline_B * brass0
Y2E <- exp(predictSpline(S_B, expected_logit_end))
Y1E <- exp(predictSpline(S_B, expected_logit_enter))
evalderivbrass <- predictSpline(deriv(S_B), expected_logit_end)
# E(x2) spline bases of the brass transformation at exit
E2E <- evaluate(Spline_B, expected_logit_end)[,-1]
# E(x1) spline bases of the brass transformation at enter
E1E <- evaluate(Spline_B, expected_logit_enter)[,-1]
# E'(x2) derivative of the spline bases of the brass transformation at exit
DE2E <- evaluate(deriv(Spline_B), expected_logit_end)[,-1]
# contribution of non time dependant variables
modified_rate <- expected_rate * (1 + exp(-expected_logit_end))/(1+ 1/Y2E) * evalderivbrass
# by period
Y2EbyP <- exp(predictSpline(S_B, expected_logit_end_byperiod))
Y1EbyP <- exp(predictSpline(S_B, expected_logit_enter_byperiod))
evalderivbrassbyP <- predictSpline(deriv(S_B), expected_logit_end_byperiod)
# E(x2) spline bases of the brass transformation at exit
E2EbyP <- evaluate(Spline_B, expected_logit_end_byperiod)[,-1]
# E(x1) spline bases of the brass transformation at enter
E1EbyP <- evaluate(Spline_B, expected_logit_enter_byperiod)[,-1]
# E'(x2) derivative of the spline bases of the brass transformation at exit
DE2EbyP <- evaluate(deriv(Spline_B), expected_logit_end_byperiod)[,-1]
# contribution of non time dependant variables
modified_cumratebyP <- log((1 + Y2EbyP)/(1 + Y1EbyP))
# modified_cumratecontrol <- log((1 + Y2C)/(1 + Y1C))
modified_cumrate <- tapply(modified_cumratebyP, as.factor(Id_byperiod), FUN=sum)
}
if( nBX0){
BPHterm <-exp(BX0 %*% allparam[ibalpha0])
modified_rate <- modified_rate * BPHterm
modified_cumrate <- modified_cumrate * BPHterm
BX0_byperiod <- BX0[Id_byperiod,]
BPHtermbyP <-exp(BX0_byperiod %*% allparam[ibalpha0])
modified_cumratebyP <- modified_cumratebyP * BPHtermbyP
}
else {
BPHterm <- 1.0
BPHtermbyP <- 1.0
}
if(sum(is.na(modified_rate)) | sum(is.na(modified_cumrate))){
warning(paste0(sum(is.na(modified_rate)),
" NA rate and ",
sum(is.na(modified_cumrate)),
" NA cumrate with Brass coef",
paste(format(brass0), collapse = " ")))
}
if(min(modified_rate, na.rm=TRUE)<0 | min(modified_cumrate, na.rm=TRUE)<0){
warning(paste0(sum(modified_rate<0, na.rm=TRUE),
" negative rate and ",
sum(modified_cumrate<0, na.rm=TRUE),
" negative cumrate with Brass coef",
paste(format(brass0), collapse = " ")))
}
# spline bases for each TD effect
if(nX + nZ){
# spline bases for each TD effect at the end of the interval
YT <- evaluate(Spline_t, Y[,2], intercept=TRUE)
if(nW){
RatePred <- ifelse(Y[,3] ,
PHterm * exp(YT0Gamma0 + apply(YT * Zalphabeta, 1, sum) + apply(WCEcontrib, 1, sum)),
0)
}
else {
RatePred <- ifelse(Y[,3] ,
PHterm * exp(YT0Gamma0 + apply(YT * Zalphabeta, 1, sum)),
0)
}
} else {
if(nW){
RatePred <- ifelse(Y[,3] ,
PHterm * exp(YT0Gamma0 + apply(WCEcontrib, 1, sum)),
0)
}
else {
RatePred <- ifelse(Y[,3] ,
PHterm * exp(YT0Gamma0),
0)
}
}
F <- ifelse(Y[,3] ,
RatePred/(RatePred + modified_rate ),
0)
Ftable <- ifelse(Y[,3] ,
modified_rate/(RatePred + modified_rate ),
0)
# for each row i of an Id, FId[i] <- F[final_time of the id]
FId <- F[LastId]
if(nX + nZ) {
if(nX0>0) {
Intb <- Intb * c(PHterm)
}
IntbF <- YT*F - Intb
}
else {
IntbF <- NULL
}
Intb0 <- Intb0 * c(PHterm)
WF <- list()
if(nW){
for(i in 1:nW){
if(nX0>0) {
# rescale IndbW by PHterm
IntbW[[i]] <- IntbW[[i]] * c(PHterm)
}
WF[[i]] <- evaluate(ISpline_W[[i]], Y[,4] - Y[,1], intercept=Intercept_W[i]) * FId
}
}
else {
WF <- NULL
}
#####################################################################"
# now computes the mean score and the gradients
#^parameters of the correction of the life table
if(is.null(Spline_B)){
dLdbrass0 <- NULL
}
else {
if (!is.null(weights)) {
# compute dL/d brass0
dLdbrass0 <- crossprod(DE2E , Ftable *weights/evalderivbrass) +
crossprod(E2E, Ftable * weights /(1+ Y2E) ) +
# cumulative part
crossprod(E1EbyP, (Y1EbyP * BPHtermbyP) * weights_byperiod /(1+ Y1EbyP) ) -
crossprod(E2EbyP, (Y2EbyP * BPHtermbyP) * weights_byperiod /(1+ Y2EbyP) )
} else {
# compute dL/d brass0
dLdbrass0 <- crossprod(DE2E, Ftable / evalderivbrass) +
crossprod(E2E, Ftable/(1+ Y2E) ) +
# cumulative part
crossprod(E1EbyP, (Y1EbyP * BPHtermbyP) /(1+ Y1EbyP) ) -
crossprod(E2EbyP, (Y2EbyP * BPHtermbyP) /(1+ Y2EbyP) )
}
}
if( nBX0){
# compute dL/d balpha0
if (!is.null(weights)) {
dLdbalpha0 <- crossprod(BX0 ,( Ftable - modified_cumrate )* weights )
} else {
dLdbalpha0 <- crossprod(BX0 , ( Ftable - modified_cumrate ) )
}
}
else {
dLdbalpha0 <- NULL
}
if (!is.null(weights)) {
# dldgamma0
if(is.null(Spline_t0)){
dLdgamma0 <- NULL
}
else {
dLdgamma0 <- crossprod( YT0 * F - Intb0 , weights)
}
if (nX0) {
dLdalpha0 <- crossprod(X0 , (F - PHterm * NPHterm) * weights )
}
else {
dLdalpha0 <- NULL
}
if (nX){
# traiter les Intercept_t_NPH
dLdbeta0 <- NULL
for(i in 1:nX){
if ( Intercept_t_NPH[i] ){
dLdbeta0 <- c(dLdbeta0, crossprod(X[,i] , IntbF * weights))
}
else {
dLdbeta0 <- c(dLdbeta0, crossprod(X[,i] , IntbF[,indx_without_intercept] * weights))
}
}
}
else {
dLdbeta0 <- NULL
}
if (nZ) {
baseIntbF <- IntbF %*% t(tBeta)
dLdalpha <- rep(0,getNparam(Z) )
indZ <- getIndex(Z)
for(iZ in 1:nZ){
if ( debug.gr > 200 ){
}
dLdalpha[indZ[iZ,1]:indZ[iZ,2]] <- crossprod(Z@DM[,indZ[iZ,1]:indZ[iZ,2]], baseIntbF[,iZ] * weights )
}
dLdbeta <- c(crossprod((IntbF[,-1, drop=FALSE]),Zalpha * weights))
}
else {
dLdalpha <- NULL
dLdbeta <- NULL
}
if(nW){
dLdeta0 <- NULL
for(i in 1:nW){
dLdeta0 <- cbind(dLdeta0, crossprod(weights, W[,i] * WF[[i]] - IntbW[[i]]))
}
}
else{
dLdeta0 <- NULL
}
} # end weights!=NULL
else {
# d<dgamma0
if(is.null(Spline_t0)){
dLdgamma0 <- NULL
}
else {
dLdgamma0 <- apply( YT0 * F - Intb0 , 2, sum)
}
if (nX0) {
dLdalpha0 <- crossprod(X0 , F - PHterm* NPHterm )
}
else {
dLdalpha0 <- NULL
}
if (nX){
# traiter les Intercept_t_NPH
dLdbeta0 <- NULL
for(i in 1:nX){
if ( Intercept_t_NPH[i] ){
dLdbeta0 <- c(dLdbeta0, crossprod(X[,i] , IntbF))
}
else {
dLdbeta0 <- c(dLdbeta0, crossprod(X[,i] , IntbF[,indx_without_intercept]))
}
}
}
else {
dLdbeta0 <- NULL
}
if (nZ) {
baseIntbF <- IntbF %*% t(tBeta)
dLdalpha <- rep(0,getNparam(Z) )
indZ <- getIndex(Z)
for(iZ in 1:nZ){
dLdalpha[indZ[iZ,1]:indZ[iZ,2]] <- crossprod(Z@DM[,indZ[iZ,1]:indZ[iZ,2]], baseIntbF[,iZ] )
}
dLdbeta <- c(crossprod((IntbF[,-1, drop=FALSE]),Zalpha ))
}
else {
dLdalpha <- NULL
dLdbeta <- NULL
}
# WCE effects
if(nW){
dLdeta0 <- NULL
for(i in 1:nW){
dLdeta0 <- c(dLdeta0, crossprod(W[,i] , WF[[i]]) - apply(IntbW[[i]], 2, sum))
}
}
else{
dLdeta0 <- NULL
}
} # end weights==NULL
gr_exposed <- c(dLdgamma0,
dLdalpha0,
dLdbeta0,
dLdalpha,
dLdbeta,
dLdeta0,
dLdbrass0,
dLdbalpha0)
# print("debdLdeta0grad")
# print(summary(F))
# print(summary(PHterm))
# print(summary(NPHterm ))
# print(summary(X0))
# print(summary(c(PHterm)* NPHterm ) )
# print(summary(( F - c(PHterm)* NPHterm ) ))
# print(summary(( F - c(PHterm)* NPHterm ) * X0))
# print("findLdeta0grad")
# print("*************************************************gr_exposed")
# print(gr_exposed)
ret <- gr_control + gr_exposed
#cat("gr ")
#print(ret)
#cat("gC ")
#print(gr_control)
#cat("gE ")
#print(gr_exposed)
if(debug.gr){
attr(rep, "intb0") <- Intb0
attr(rep, "F") <- F
attr(rep, "YT0") <- YT0
if(nX+nZ){
attr(rep, "YT") <- YT
attr(rep, "intb") <- Intb
attr(rep, "intbF") <- IntbF
}
if(nW){
attr(rep, "intbW") <- IntbW
}
attr(rep, "RatePred") <- RatePred
if(debug.gr > 1000){
cat("grad value and parameters :", "\n")
print(cbind( rep, allparam))
}
}
if ( debug.gr) {
attr(ret, "PHterm") <- PHterm
attr(ret, "NPHterm") <- NPHterm
attr(ret, "WCEcontrib") <- WCEcontrib
attr(ret, "modified_rate") <- modified_rate
attr(ret, "modified_cumrate") <- modified_cumrate
attr(ret, "modified_cumratebyP") <- modified_cumratebyP
attr(ret, "gr_exposed") <- gr_exposed
attr(ret, "modified_ratecontrol") <- modified_ratecontrol
attr(ret, "modified_cumratecontrol") <- modified_cumratecontrol
attr(ret, "modified_cumratebyPcontrol") <- modified_cumratebyPcontrol
attr(ret, "gr_control") <- gr_control
if ( debug.gr > 1000) cat("fin gr_flexrsurv_GA0B0ABE0Br0Control **", ret, "++ \n")
}
#cat("************gr_flexrsurv_fromto_1WCEaddBr0Control ")
#print(cbind(allparam, ret), digits=12)
ret
}
|
# dl dataset
download.file(url = "https://mq-software-carpentry.github.io/R-git-for-research/data/SAFI_messy.xlsx",
destfile = "./data/SAFI_messy.xlsx", mode = "wb")
download.file(url = "https://mq-software-carpentry.github.io/R-git-for-research/data/SAFI_clean.csv",
destfile = "./data/SAFI_clean.csv")
download.file(url = "https://mq-software-carpentry.github.io/R-git-for-research/data/SAFI_dates.xlsx",
destfile = "./data/SAFI_dates.xlsx", mode = "wb")
download.file(url = "https://mq-software-carpentry.github.io/R-git-for-research/data/SAFI_openrefine.csv",
destfile = "./data/SAFI_openrefine.csv", mode = "wb")
|
/scripts/data_downloads.R
|
no_license
|
Sambam210/data-carpentry-r
|
R
| false
| false
| 683
|
r
|
# dl dataset
download.file(url = "https://mq-software-carpentry.github.io/R-git-for-research/data/SAFI_messy.xlsx",
destfile = "./data/SAFI_messy.xlsx", mode = "wb")
download.file(url = "https://mq-software-carpentry.github.io/R-git-for-research/data/SAFI_clean.csv",
destfile = "./data/SAFI_clean.csv")
download.file(url = "https://mq-software-carpentry.github.io/R-git-for-research/data/SAFI_dates.xlsx",
destfile = "./data/SAFI_dates.xlsx", mode = "wb")
download.file(url = "https://mq-software-carpentry.github.io/R-git-for-research/data/SAFI_openrefine.csv",
destfile = "./data/SAFI_openrefine.csv", mode = "wb")
|
#calculating within vs. between species variance, following Anderegg et al. 2018 Ecology Letters.
rm(list=ls())
source('paths.r')
library(lme4)
library(caper)
library(MuMIn)
#set output path.----
output.path <- variance_decomp_output.path
#load data.----
d <- readRDS(intra_specific_analysis_data.path)
#Filter based on interspecific observations actually used in the analysis.
inter <- readRDS(inter_specific_analysis_data.path)
phy <- read.tree(phylogeny_raw.path) #'colin_2018-12--2.tre'
#Some data manipulation so I can match intra-specific observations to species included in interspecific analysis.
inter$biome_trop <- ifelse(inter$biome3 == 'b_tropical',1,0)
inter$biome_bore <- ifelse(inter$biome3 == 'c_boreal' ,1,0)
phy$tip.label <- paste0(toupper(substr(phy$tip.label, 1, 1)), substr(phy$tip.label, 2, nchar(phy$tip.label)))
phy$tip.label <- gsub('_',' ',phy$tip.label)
phy$node.label <- NULL
inter <- inter[inter$Species %in% phy$tip.label,]
drop <- inter[is.na(inter$Ngreen) & is.na(inter$Nsenes) & is.na(inter$Nroots) & is.na(inter$Pgreen) & is.na(inter$Psenes) & is.na(inter$Proots) & is.na(inter$log.LL) & is.na(inter$root_lifespan),]
inter <- inter[,c('tpl.Species','biome_trop','biome_bore','MYCO_ASSO','nfix','pgf','mat.c','map.c','deciduous')]
inter <- inter[complete.cases(inter),]
inter <- inter[!(inter$tpl.Species %in% drop$tpl.Species),]
#Filter intra-specific observations.
d <- d[d$tpl.Species %in% inter$tpl.Species,]
#subset to species that have at least 3 observations.
drop <- table(d$tpl.Species)
drop <- drop[drop >= 3]
d <- d[d$tpl.Species %in% names(drop),]
#fit lme models.----
Ngreen <- lmer(log10(Ngreen) ~ 1 + (1|tpl.Species) + (1|tpl.Genus) + (1|tpl.Family), data = d)
Nsenes <- lmer(log10(Nsenes) ~ 1 + (1|tpl.Species) + (1|tpl.Genus) + (1|tpl.Family), data = d)
Nroots <- lmer(log10(Nroots) ~ 1 + (1|tpl.Species) + (1|tpl.Genus) + (1|tpl.Family), data = d)
Pgreen <- lmer(log10(Pgreen) ~ 1 + (1|tpl.Species) + (1|tpl.Genus) + (1|tpl.Family), data = d)
Psenes <- lmer(log10(Psenes) ~ 1 + (1|tpl.Species) + (1|tpl.Genus) + (1|tpl.Family), data = d)
Proots <- lmer(log10(Proots) ~ 1 + (1|tpl.Species) + (1|tpl.Genus) + (1|tpl.Family), data = d)
#get variances.----
Ngreen_var <- data.frame(VarCorr(Ngreen))[,4]
Nsenes_var <- data.frame(VarCorr(Nsenes))[,4]
Nroots_var <- data.frame(VarCorr(Nroots))[,4]
Pgreen_var <- data.frame(VarCorr(Pgreen))[,4]
Psenes_var <- data.frame(VarCorr(Psenes))[,4]
Proots_var <- data.frame(VarCorr(Proots))[,4]
all <- data.frame(Ngreen_var,Nsenes_var,Nroots_var,Pgreen_var,Psenes_var,Proots_var)
rownames(all) <- c('inter_species','inter_genus','inter_family','intra_species')
#Normalize variances to proportions, set order.----
for(i in 1:ncol(all)){
all[,i] <- all[,i] / sum(all[,i])
}
my_order <- c('intra_species','inter_species','inter_genus','inter_family')
all <- all[match(my_order, rownames(all)),]
#Save output.----
saveRDS(all, output.path)
|
/data_analysis/4._trait_variance_decomposition.r
|
no_license
|
colinaverill/Averill_et_al_2019_myco.traits
|
R
| false
| false
| 2,979
|
r
|
#calculating within vs. between species variance, following Anderegg et al. 2018 Ecology Letters.
rm(list=ls())
source('paths.r')
library(lme4)
library(caper)
library(MuMIn)
#set output path.----
output.path <- variance_decomp_output.path
#load data.----
d <- readRDS(intra_specific_analysis_data.path)
#Filter based on interspecific observations actually used in the analysis.
inter <- readRDS(inter_specific_analysis_data.path)
phy <- read.tree(phylogeny_raw.path) #'colin_2018-12--2.tre'
#Some data manipulation so I can match intra-specific observations to species included in interspecific analysis.
inter$biome_trop <- ifelse(inter$biome3 == 'b_tropical',1,0)
inter$biome_bore <- ifelse(inter$biome3 == 'c_boreal' ,1,0)
phy$tip.label <- paste0(toupper(substr(phy$tip.label, 1, 1)), substr(phy$tip.label, 2, nchar(phy$tip.label)))
phy$tip.label <- gsub('_',' ',phy$tip.label)
phy$node.label <- NULL
inter <- inter[inter$Species %in% phy$tip.label,]
drop <- inter[is.na(inter$Ngreen) & is.na(inter$Nsenes) & is.na(inter$Nroots) & is.na(inter$Pgreen) & is.na(inter$Psenes) & is.na(inter$Proots) & is.na(inter$log.LL) & is.na(inter$root_lifespan),]
inter <- inter[,c('tpl.Species','biome_trop','biome_bore','MYCO_ASSO','nfix','pgf','mat.c','map.c','deciduous')]
inter <- inter[complete.cases(inter),]
inter <- inter[!(inter$tpl.Species %in% drop$tpl.Species),]
#Filter intra-specific observations.
d <- d[d$tpl.Species %in% inter$tpl.Species,]
#subset to species that have at least 3 observations.
drop <- table(d$tpl.Species)
drop <- drop[drop >= 3]
d <- d[d$tpl.Species %in% names(drop),]
#fit lme models.----
Ngreen <- lmer(log10(Ngreen) ~ 1 + (1|tpl.Species) + (1|tpl.Genus) + (1|tpl.Family), data = d)
Nsenes <- lmer(log10(Nsenes) ~ 1 + (1|tpl.Species) + (1|tpl.Genus) + (1|tpl.Family), data = d)
Nroots <- lmer(log10(Nroots) ~ 1 + (1|tpl.Species) + (1|tpl.Genus) + (1|tpl.Family), data = d)
Pgreen <- lmer(log10(Pgreen) ~ 1 + (1|tpl.Species) + (1|tpl.Genus) + (1|tpl.Family), data = d)
Psenes <- lmer(log10(Psenes) ~ 1 + (1|tpl.Species) + (1|tpl.Genus) + (1|tpl.Family), data = d)
Proots <- lmer(log10(Proots) ~ 1 + (1|tpl.Species) + (1|tpl.Genus) + (1|tpl.Family), data = d)
#get variances.----
Ngreen_var <- data.frame(VarCorr(Ngreen))[,4]
Nsenes_var <- data.frame(VarCorr(Nsenes))[,4]
Nroots_var <- data.frame(VarCorr(Nroots))[,4]
Pgreen_var <- data.frame(VarCorr(Pgreen))[,4]
Psenes_var <- data.frame(VarCorr(Psenes))[,4]
Proots_var <- data.frame(VarCorr(Proots))[,4]
all <- data.frame(Ngreen_var,Nsenes_var,Nroots_var,Pgreen_var,Psenes_var,Proots_var)
rownames(all) <- c('inter_species','inter_genus','inter_family','intra_species')
#Normalize variances to proportions, set order.----
for(i in 1:ncol(all)){
all[,i] <- all[,i] / sum(all[,i])
}
my_order <- c('intra_species','inter_species','inter_genus','inter_family')
all <- all[match(my_order, rownames(all)),]
#Save output.----
saveRDS(all, output.path)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/analysis_functions.R
\name{countCustomers}
\alias{countCustomers}
\title{Count customers by group}
\usage{
countCustomers(df, groupVars = NULL)
}
\arguments{
\item{df}{A data frame with a column named customerUID}
\item{groupVars}{A character vector of variable names to group by}
}
\value{
A data frame with columns for grouping variables and a column named
\code{customers} for number of customers Data frame is passed through
\code{\link{prettyData}} function.
}
\description{
Count number of customers by group. This function will collect data from
the database if using SQL backend.
}
\examples{
# Demo data: Count number of customers each year purchasing a fishing
# license between 2010 and 2017
filterData(
dataSource = "csv",
activeFilters = list(itemType = "Fish", itemYear = c(2010, 2017))
) \%>\%
countCustomers(c("itemYear", "itemType"))
\dontrun{
# Database connection. Suggest using keyring package to avoid hardcoding
# passwords
myConn <- DBI::dbConnect(odbc::odbc(),
dsn = "HuntFishApp", # Your datasource name
uid = keyring::key_get("HuntFishAppUID"), # Your username
pwd = keyring::key_get("HuntFishAppPWD")
) # Your password
# SQL Backend: Count number of customers each year purchasing a fishing
# license between 2010 and 2017
filterData(
dataSource = "sql",
conn = myConn,
activeFilters = list(itemType = "Fish", itemYear = c(2010, 2017))
) \%>\%
countCustomers(c("itemYear", "itemType"))
}
}
\seealso{
Other analysis functions: \code{\link{calcChurn}},
\code{\link{calcGenderProportion}},
\code{\link{calcParticipation}},
\code{\link{calcRecruitment}}, \code{\link{countItems}},
\code{\link{itemGroupCount}}, \code{\link{sumRevenue}}
}
\concept{analysis functions}
|
/man/countCustomers.Rd
|
permissive
|
chrischizinski/huntfishapp
|
R
| false
| true
| 1,799
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/analysis_functions.R
\name{countCustomers}
\alias{countCustomers}
\title{Count customers by group}
\usage{
countCustomers(df, groupVars = NULL)
}
\arguments{
\item{df}{A data frame with a column named customerUID}
\item{groupVars}{A character vector of variable names to group by}
}
\value{
A data frame with columns for grouping variables and a column named
\code{customers} for number of customers Data frame is passed through
\code{\link{prettyData}} function.
}
\description{
Count number of customers by group. This function will collect data from
the database if using SQL backend.
}
\examples{
# Demo data: Count number of customers each year purchasing a fishing
# license between 2010 and 2017
filterData(
dataSource = "csv",
activeFilters = list(itemType = "Fish", itemYear = c(2010, 2017))
) \%>\%
countCustomers(c("itemYear", "itemType"))
\dontrun{
# Database connection. Suggest using keyring package to avoid hardcoding
# passwords
myConn <- DBI::dbConnect(odbc::odbc(),
dsn = "HuntFishApp", # Your datasource name
uid = keyring::key_get("HuntFishAppUID"), # Your username
pwd = keyring::key_get("HuntFishAppPWD")
) # Your password
# SQL Backend: Count number of customers each year purchasing a fishing
# license between 2010 and 2017
filterData(
dataSource = "sql",
conn = myConn,
activeFilters = list(itemType = "Fish", itemYear = c(2010, 2017))
) \%>\%
countCustomers(c("itemYear", "itemType"))
}
}
\seealso{
Other analysis functions: \code{\link{calcChurn}},
\code{\link{calcGenderProportion}},
\code{\link{calcParticipation}},
\code{\link{calcRecruitment}}, \code{\link{countItems}},
\code{\link{itemGroupCount}}, \code{\link{sumRevenue}}
}
\concept{analysis functions}
|
#' Produces the possible permutations of a set of nodes
#'
#' @param max A vector of integers. The maximum value of an integer value starting at 0. Defaults to 1. The number of permutation is defined by \code{max}'s length
#' @keywords internal
#' @return A \code{matrix} of permutations
#' @importFrom rlang exprs
#' @examples
#
#' \donttest{
#' CausalQueries:::perm(3)
#' }
perm <- function(max = rep(1, 2)) {
grid <- sapply(max, function(m) exprs(0:!!m))
x <- do.call(expand.grid, grid)
colnames(x) <- NULL
x
}
#' Get string between two regular expression patterns
#'
#' Returns a substring enclosed by two regular expression patterns. By default returns the name of the arguments being indexed by squared brackets (\code{[]}) in a string containing an expression.
#'
#' @param x A character string.
#' @param left A character string. Regular expression to serve as look ahead.
#' @param right A character string. Regular expression to serve as a look behind.
#' @param rm_left An integer. Number of bites after left-side match to remove from result. Defaults to -1.
#' @param rm_right An integer. Number of bites after right-side match to remove from result. Defaults to 0.
#' @return A character vector.
#' @keywords internal
#' @examples
#' a <- '(XX[Y=0] == 1) > (XX[Y=1] == 0)'
#' CausalQueries:::st_within(a)
#' b <- '(XXX[[Y=0]] == 1 + XXX[[Y=1]] == 0)'
#' CausalQueries:::st_within(b)
st_within <- function(x, left = "[^_[:^punct:]]|\\b", right = "\\[", rm_left = 0, rm_right = -1) {
if (!is.character(x))
stop("`x` must be a string.")
puncts <- gregexpr(left, x, perl = TRUE)[[1]]
stops <- gregexpr(right, x, perl = TRUE)[[1]]
# only index the first of the same boundary when there are consecutive ones (eg. '[[')
consec_brackets <- diff(stops)
if (any(consec_brackets == 1)) {
remov <- which(consec_brackets == 1) + 1
stops <- stops[-remov]
}
# find the closest punctuation or space
starts <- sapply(stops, function(s) {
dif <- s - puncts
dif <- dif[dif > 0]
ifelse(length(dif) == 0, ret <- NA, ret <- puncts[which(dif == min(dif))])
return(ret)
})
drop <- is.na(starts) | is.na(stops)
sapply(1:length(starts), function(i) if (!drop[i])
substr(x, starts[i] + rm_left, stops[i] + rm_right))
}
#' Recursive substitution
#'
#' Applies \code{gsub()} from multiple patterns to multiple replacements with 1:1 mapping.
#' @return Returns multiple expression with substituted elements
#' @keywords internal
#' @param x A character vector.
#' @param pattern_vector A character vector.
#' @param replacement_vector A character vector.
#' @param ... Options passed onto \code{gsub()} call.
#'
gsub_many <- function(x, pattern_vector, replacement_vector, ...) {
if (!identical(length(pattern_vector), length(replacement_vector)))
stop("pattern and replacement vectors must be the same length")
for (i in seq_along(pattern_vector)) {
x <- gsub(pattern_vector[i], replacement_vector[i], x, ...)
}
x
}
#' Clean condition
#'
#' Takes a string specifying condition and returns properly spaced string.
#' @keywords internal
#' @return A properly spaced string.
#' @param condition A character string. Condition that refers to a unique position (possible outcome) in a nodal type.
clean_condition <- function(condition) {
spliced <- strsplit(condition, split = "")[[1]]
spaces <- grepl("[[:space:]]", spliced, perl = TRUE)
paste(spliced[!spaces], collapse = " ")
}
#' Interpret or find position in nodal type
#'
#' Interprets the position of one or more digits (specified by \code{position}) in a nodal type. Alternatively returns nodal type digit positions that correspond to one or more given \code{condition}.
#' @inheritParams CausalQueries_internal_inherit_params
#' @param condition A vector of characters. Strings specifying the child node, followed by '|' (given) and the values of its parent nodes in \code{model}.
#' @param position A named list of integers. The name is the name of the child node in \code{model}, and its value a vector of digit positions in that node's nodal type to be interpreted. See `Details`.
#' @return A named \code{list} with interpretation of positions of the digits in a nodal type
#' @details A node for a child node X with \code{k} parents has a nodal type represented by X followed by \code{2^k} digits. Argument \code{position} allows user to interpret the meaning of one or more digit positions in any nodal type. For example \code{position = list(X = 1:3)} will return the interpretation of the first three digits in causal types for X. Argument \code{condition} allows users to query the digit position in the nodal type by providing instead the values of the parent nodes of a given child. For example, \code{condition = 'X | Z=0 & R=1'} returns the digit position that corresponds to values X takes when Z = 0 and R = 1.
#' @examples
#' model <- make_model('R -> X; Z -> X; X -> Y')
#' #Example using digit position
#' interpret_type(model, position = list(X = c(3,4), Y = 1))
#' #Example using condition
#' interpret_type(model, condition = c('X | Z=0 & R=1', 'X | Z=0 & R=0'))
#' #Return interpretation of all digit positions of all nodes
#' interpret_type(model)
#' @export
interpret_type <- function(model, condition = NULL, position = NULL) {
if (!is.null(condition) & !is.null(position))
stop("Must specify either `query` or `nodal_position`, but not both.")
parents <- get_parents(model)
types <- lapply(lapply(parents, length), function(l) perm(rep(1, l)))
if (is.null(position)) {
position <- lapply(types, function(i) ifelse(length(i) == 0, return(NA), return(1:nrow(i))))
} else {
if (!all(names(position) %in% names(types)))
stop("One or more names in `position` not found in model.")
}
interpret <- lapply(1:length(position), function(i) {
positions <- position[[i]]
type <- types[[names(position)[i]]]
pos_elements <- type[positions, ]
if (!all(is.na(positions))) {
interpret <- sapply(1:nrow(pos_elements), function(row) paste0(parents[[names(position)[i]]],
" = ", pos_elements[row, ], collapse = " & "))
interpret <- paste0(paste0(c(names(position)[i], " | "), collapse = ""), interpret)
# Create 'Y*[*]**'-type representations
asterisks <- rep("*", nrow(type))
asterisks_ <- sapply(positions, function(s) {
if (s < length(asterisks)) {
if (s == 1)
paste0(c("[*]", asterisks[(s + 1):length(asterisks)]), collapse = "") else paste0(c(asterisks[1:(s - 1)], "[*]", asterisks[(s + 1):length(asterisks)]),
collapse = "")
} else {
paste0(c(asterisks[1:(s - 1)], "[*]"), collapse = "")
}
})
display <- paste0(names(position)[i], asterisks_)
} else {
interpret <- paste0(paste0(c(names(position)[i], " = "), collapse = ""), c(0, 1))
display <- paste0(names(position)[i], c(0, 1))
}
data.frame(node = names(position)[i], position = position[[i]], display = display, interpretation = interpret,
stringsAsFactors = FALSE)
})
names(interpret) <- names(position)
if (!is.null(condition)) {
conditions <- sapply(condition, clean_condition)
interpret_ <- lapply(interpret, function(i) {
slct <- sapply(conditions, function(cond) {
a <- trimws(strsplit(cond, "&|\\|")[[1]])
sapply(i$interpretation, function(bi) {
b <- trimws(strsplit(bi, "&|\\|")[[1]])
all(a %in% b)
})
})
i <- i[rowSums(slct) > 0, ]
if (nrow(i) == 0)
i <- NULL
i
})
interpret <- interpret_[!sapply(interpret_, is.null)]
}
return(interpret)
}
#' Expand wildcard
#'
#' Expand statement containing wildcard
#'
#' @inheritParams CausalQueries_internal_inherit_params
#' @param to_expand A character vector of length 1L.
#' @param verbose Logical. Whether to print expanded query on the console.
#' @return A character string with the expanded expression. Wildcard '.' is replaced by 0 and 1.
#' @importFrom rlang expr
#' @export
#' @examples
#'
#' # Position of parentheses matters for type of expansion
#' # In the "global expansion" versions of the entire statement are joined
#' expand_wildcard('(Y[X=1, M=.] > Y[X=1, M=.])')
#' # In the "local expansion" versions of indicated parts are joined
#' expand_wildcard('(Y[X=1, M=.]) > (Y[X=1, M=.])')
#'
#' # If parentheses are missing global expansion used.
#' expand_wildcard('Y[X=1, M=.] > Y[X=1, M=.]')
#'
#' # Expressions not requiring expansion are allowed
#' expand_wildcard('(Y[X=1])')
#'
expand_wildcard <- function(to_expand, join_by = "|", verbose = TRUE) {
orig <- st_within(to_expand, left = "\\(", right = "\\)", rm_left = 1)
if (is.list(orig)) {
if (is.null(orig[[1]])){
message("No parentheses indicated. Global expansion assumed. See expand_wildcard.")
orig <- to_expand}
}
skeleton <- gsub_many(to_expand, orig, paste0("%expand%", 1:length(orig)), fixed = TRUE)
expand_it <- grepl("\\.", orig)
expanded_types <- lapply(1:length(orig), function(i) {
if (!expand_it[i])
return(orig[i]) else {
exp_types <- strsplit(orig[i], ".", fixed = TRUE)[[1]]
a <- gregexpr("\\w{1}\\s*(?=(=\\s*\\.){1})", orig[i], perl = TRUE)
matcha <- trimws(unlist(regmatches(orig[i], a)))
rep_n <- sapply(unique(matcha), function(e) sum(matcha == e))
n_types <- length(unique(matcha))
grid <- replicate(n_types, expr(c(0, 1)))
type_values <- do.call(expand.grid, grid)
colnames(type_values) <- unique(matcha)
apply(type_values, 1, function(s) {
to_sub <- paste0(colnames(type_values), "(\\s)*=(\\s)*$")
subbed <- gsub_many(exp_types, to_sub, paste0(colnames(type_values), "=", s), perl = TRUE)
paste0(subbed, collapse = "")
})
}
})
if (!is.null(join_by)) {
oper <- sapply(expanded_types, function(l) {
paste0(l, collapse = paste0(" ", join_by, " "))
})
oper_return <- gsub_many(skeleton, paste0("%expand%", 1:length(orig)), oper)
} else {
oper <- do.call(cbind, expanded_types)
oper_return <- apply(oper, 1, function(i) gsub_many(skeleton, paste0("%expand%", 1:length(orig)),
i))
}
if (verbose) {
cat("Generated expanded expression:\n")
cat(unlist(oper_return), sep = "\n")
}
oper_return
}
#' Get parameter names
#'
#' Parameter names taken from \code{P} matrix or model if no \code{P} matrix provided
#'
#' @inheritParams CausalQueries_internal_inherit_params
#' @param include_paramset Logical. Whether to include the param set prefix as part of the name.
#' @return A character vector with the names of the parameters in the model
#' @export
#' @examples
#'
#' get_parameter_names(make_model('X->Y'))
#'
get_parameter_names <- function(model, include_paramset = TRUE) {
if (include_paramset)
return(model$parameters_df$param_names)
if (!include_paramset)
return(model$parameters_df$nodal_type)
}
#' Whether a query contains an exact string
#' @param var Variable name
#' @param query An expression in string format.
#' @return A logical expression indicating whether a variable is included in a query
#' @keywords internal
#' Used in map_query_to_nodal_types
#'
includes_var <- function(var, query)
length(grep(paste0("\\<", var, "\\>"), query)) > 0
#' List of nodes contained in query
#' @inheritParams CausalQueries_internal_inherit_params
#' @return A vector indicating which variables are included in a query
#' @keywords internal
var_in_query <- function(model, query) {
v <- model$nodes
v[sapply(v, includes_var, query = query)]
}
|
/R/helpers.R
|
no_license
|
yadmasu1/CausalQueries
|
R
| false
| false
| 12,183
|
r
|
#' Produces the possible permutations of a set of nodes
#'
#' @param max A vector of integers. The maximum value of an integer value starting at 0. Defaults to 1. The number of permutation is defined by \code{max}'s length
#' @keywords internal
#' @return A \code{matrix} of permutations
#' @importFrom rlang exprs
#' @examples
#
#' \donttest{
#' CausalQueries:::perm(3)
#' }
perm <- function(max = rep(1, 2)) {
grid <- sapply(max, function(m) exprs(0:!!m))
x <- do.call(expand.grid, grid)
colnames(x) <- NULL
x
}
#' Get string between two regular expression patterns
#'
#' Returns a substring enclosed by two regular expression patterns. By default returns the name of the arguments being indexed by squared brackets (\code{[]}) in a string containing an expression.
#'
#' @param x A character string.
#' @param left A character string. Regular expression to serve as look ahead.
#' @param right A character string. Regular expression to serve as a look behind.
#' @param rm_left An integer. Number of bites after left-side match to remove from result. Defaults to -1.
#' @param rm_right An integer. Number of bites after right-side match to remove from result. Defaults to 0.
#' @return A character vector.
#' @keywords internal
#' @examples
#' a <- '(XX[Y=0] == 1) > (XX[Y=1] == 0)'
#' CausalQueries:::st_within(a)
#' b <- '(XXX[[Y=0]] == 1 + XXX[[Y=1]] == 0)'
#' CausalQueries:::st_within(b)
st_within <- function(x, left = "[^_[:^punct:]]|\\b", right = "\\[", rm_left = 0, rm_right = -1) {
if (!is.character(x))
stop("`x` must be a string.")
puncts <- gregexpr(left, x, perl = TRUE)[[1]]
stops <- gregexpr(right, x, perl = TRUE)[[1]]
# only index the first of the same boundary when there are consecutive ones (eg. '[[')
consec_brackets <- diff(stops)
if (any(consec_brackets == 1)) {
remov <- which(consec_brackets == 1) + 1
stops <- stops[-remov]
}
# find the closest punctuation or space
starts <- sapply(stops, function(s) {
dif <- s - puncts
dif <- dif[dif > 0]
ifelse(length(dif) == 0, ret <- NA, ret <- puncts[which(dif == min(dif))])
return(ret)
})
drop <- is.na(starts) | is.na(stops)
sapply(1:length(starts), function(i) if (!drop[i])
substr(x, starts[i] + rm_left, stops[i] + rm_right))
}
#' Recursive substitution
#'
#' Applies \code{gsub()} from multiple patterns to multiple replacements with 1:1 mapping.
#' @return Returns multiple expression with substituted elements
#' @keywords internal
#' @param x A character vector.
#' @param pattern_vector A character vector.
#' @param replacement_vector A character vector.
#' @param ... Options passed onto \code{gsub()} call.
#'
gsub_many <- function(x, pattern_vector, replacement_vector, ...) {
if (!identical(length(pattern_vector), length(replacement_vector)))
stop("pattern and replacement vectors must be the same length")
for (i in seq_along(pattern_vector)) {
x <- gsub(pattern_vector[i], replacement_vector[i], x, ...)
}
x
}
#' Clean condition
#'
#' Takes a string specifying condition and returns properly spaced string.
#' @keywords internal
#' @return A properly spaced string.
#' @param condition A character string. Condition that refers to a unique position (possible outcome) in a nodal type.
clean_condition <- function(condition) {
spliced <- strsplit(condition, split = "")[[1]]
spaces <- grepl("[[:space:]]", spliced, perl = TRUE)
paste(spliced[!spaces], collapse = " ")
}
#' Interpret or find position in nodal type
#'
#' Interprets the position of one or more digits (specified by \code{position}) in a nodal type. Alternatively returns nodal type digit positions that correspond to one or more given \code{condition}.
#' @inheritParams CausalQueries_internal_inherit_params
#' @param condition A vector of characters. Strings specifying the child node, followed by '|' (given) and the values of its parent nodes in \code{model}.
#' @param position A named list of integers. The name is the name of the child node in \code{model}, and its value a vector of digit positions in that node's nodal type to be interpreted. See `Details`.
#' @return A named \code{list} with interpretation of positions of the digits in a nodal type
#' @details A node for a child node X with \code{k} parents has a nodal type represented by X followed by \code{2^k} digits. Argument \code{position} allows user to interpret the meaning of one or more digit positions in any nodal type. For example \code{position = list(X = 1:3)} will return the interpretation of the first three digits in causal types for X. Argument \code{condition} allows users to query the digit position in the nodal type by providing instead the values of the parent nodes of a given child. For example, \code{condition = 'X | Z=0 & R=1'} returns the digit position that corresponds to values X takes when Z = 0 and R = 1.
#' @examples
#' model <- make_model('R -> X; Z -> X; X -> Y')
#' #Example using digit position
#' interpret_type(model, position = list(X = c(3,4), Y = 1))
#' #Example using condition
#' interpret_type(model, condition = c('X | Z=0 & R=1', 'X | Z=0 & R=0'))
#' #Return interpretation of all digit positions of all nodes
#' interpret_type(model)
#' @export
interpret_type <- function(model, condition = NULL, position = NULL) {
if (!is.null(condition) & !is.null(position))
stop("Must specify either `query` or `nodal_position`, but not both.")
parents <- get_parents(model)
types <- lapply(lapply(parents, length), function(l) perm(rep(1, l)))
if (is.null(position)) {
position <- lapply(types, function(i) ifelse(length(i) == 0, return(NA), return(1:nrow(i))))
} else {
if (!all(names(position) %in% names(types)))
stop("One or more names in `position` not found in model.")
}
interpret <- lapply(1:length(position), function(i) {
positions <- position[[i]]
type <- types[[names(position)[i]]]
pos_elements <- type[positions, ]
if (!all(is.na(positions))) {
interpret <- sapply(1:nrow(pos_elements), function(row) paste0(parents[[names(position)[i]]],
" = ", pos_elements[row, ], collapse = " & "))
interpret <- paste0(paste0(c(names(position)[i], " | "), collapse = ""), interpret)
# Create 'Y*[*]**'-type representations
asterisks <- rep("*", nrow(type))
asterisks_ <- sapply(positions, function(s) {
if (s < length(asterisks)) {
if (s == 1)
paste0(c("[*]", asterisks[(s + 1):length(asterisks)]), collapse = "") else paste0(c(asterisks[1:(s - 1)], "[*]", asterisks[(s + 1):length(asterisks)]),
collapse = "")
} else {
paste0(c(asterisks[1:(s - 1)], "[*]"), collapse = "")
}
})
display <- paste0(names(position)[i], asterisks_)
} else {
interpret <- paste0(paste0(c(names(position)[i], " = "), collapse = ""), c(0, 1))
display <- paste0(names(position)[i], c(0, 1))
}
data.frame(node = names(position)[i], position = position[[i]], display = display, interpretation = interpret,
stringsAsFactors = FALSE)
})
names(interpret) <- names(position)
if (!is.null(condition)) {
conditions <- sapply(condition, clean_condition)
interpret_ <- lapply(interpret, function(i) {
slct <- sapply(conditions, function(cond) {
a <- trimws(strsplit(cond, "&|\\|")[[1]])
sapply(i$interpretation, function(bi) {
b <- trimws(strsplit(bi, "&|\\|")[[1]])
all(a %in% b)
})
})
i <- i[rowSums(slct) > 0, ]
if (nrow(i) == 0)
i <- NULL
i
})
interpret <- interpret_[!sapply(interpret_, is.null)]
}
return(interpret)
}
#' Expand wildcard
#'
#' Expand statement containing wildcard
#'
#' @inheritParams CausalQueries_internal_inherit_params
#' @param to_expand A character vector of length 1L.
#' @param verbose Logical. Whether to print expanded query on the console.
#' @return A character string with the expanded expression. Wildcard '.' is replaced by 0 and 1.
#' @importFrom rlang expr
#' @export
#' @examples
#'
#' # Position of parentheses matters for type of expansion
#' # In the "global expansion" versions of the entire statement are joined
#' expand_wildcard('(Y[X=1, M=.] > Y[X=1, M=.])')
#' # In the "local expansion" versions of indicated parts are joined
#' expand_wildcard('(Y[X=1, M=.]) > (Y[X=1, M=.])')
#'
#' # If parentheses are missing global expansion used.
#' expand_wildcard('Y[X=1, M=.] > Y[X=1, M=.]')
#'
#' # Expressions not requiring expansion are allowed
#' expand_wildcard('(Y[X=1])')
#'
expand_wildcard <- function(to_expand, join_by = "|", verbose = TRUE) {
orig <- st_within(to_expand, left = "\\(", right = "\\)", rm_left = 1)
if (is.list(orig)) {
if (is.null(orig[[1]])){
message("No parentheses indicated. Global expansion assumed. See expand_wildcard.")
orig <- to_expand}
}
skeleton <- gsub_many(to_expand, orig, paste0("%expand%", 1:length(orig)), fixed = TRUE)
expand_it <- grepl("\\.", orig)
expanded_types <- lapply(1:length(orig), function(i) {
if (!expand_it[i])
return(orig[i]) else {
exp_types <- strsplit(orig[i], ".", fixed = TRUE)[[1]]
a <- gregexpr("\\w{1}\\s*(?=(=\\s*\\.){1})", orig[i], perl = TRUE)
matcha <- trimws(unlist(regmatches(orig[i], a)))
rep_n <- sapply(unique(matcha), function(e) sum(matcha == e))
n_types <- length(unique(matcha))
grid <- replicate(n_types, expr(c(0, 1)))
type_values <- do.call(expand.grid, grid)
colnames(type_values) <- unique(matcha)
apply(type_values, 1, function(s) {
to_sub <- paste0(colnames(type_values), "(\\s)*=(\\s)*$")
subbed <- gsub_many(exp_types, to_sub, paste0(colnames(type_values), "=", s), perl = TRUE)
paste0(subbed, collapse = "")
})
}
})
if (!is.null(join_by)) {
oper <- sapply(expanded_types, function(l) {
paste0(l, collapse = paste0(" ", join_by, " "))
})
oper_return <- gsub_many(skeleton, paste0("%expand%", 1:length(orig)), oper)
} else {
oper <- do.call(cbind, expanded_types)
oper_return <- apply(oper, 1, function(i) gsub_many(skeleton, paste0("%expand%", 1:length(orig)),
i))
}
if (verbose) {
cat("Generated expanded expression:\n")
cat(unlist(oper_return), sep = "\n")
}
oper_return
}
#' Get parameter names
#'
#' Parameter names taken from \code{P} matrix or model if no \code{P} matrix provided
#'
#' @inheritParams CausalQueries_internal_inherit_params
#' @param include_paramset Logical. Whether to include the param set prefix as part of the name.
#' @return A character vector with the names of the parameters in the model
#' @export
#' @examples
#'
#' get_parameter_names(make_model('X->Y'))
#'
get_parameter_names <- function(model, include_paramset = TRUE) {
if (include_paramset)
return(model$parameters_df$param_names)
if (!include_paramset)
return(model$parameters_df$nodal_type)
}
#' Whether a query contains an exact string
#' @param var Variable name
#' @param query An expression in string format.
#' @return A logical expression indicating whether a variable is included in a query
#' @keywords internal
#' Used in map_query_to_nodal_types
#'
includes_var <- function(var, query)
length(grep(paste0("\\<", var, "\\>"), query)) > 0
#' List of nodes contained in query
#' @inheritParams CausalQueries_internal_inherit_params
#' @return A vector indicating which variables are included in a query
#' @keywords internal
var_in_query <- function(model, query) {
v <- model$nodes
v[sapply(v, includes_var, query = query)]
}
|
# Plot 1 for exploratory data analysis couse
# Script assumes that source data is present in the parent folder of script
# Read the power consumption data
power = read.csv('../household_power_consumption.txt', sep=';', stringsAsFactors=F,
na.strings="?")
# Convert date to 'date'
power$Date = as.Date(power$Date, format = '%d/%m/%Y')
# Subset the data for the 2 days
power_subset <- subset(power, subset=((Date == '2007-02-01') |
(Date == '2007-02-02')))
unique(power_subset$Date) #to check correct subset
# Convert active power to numeric
power_subset$Global_active_power <- as.numeric(power_subset$Global_active_power)
# Plot histogram for Global active power, name the plot and axis, and set plot
# color to Red.
hist(power_subset$Global_active_power, main="Global Active Power",
xlab="Global Active Power (kilowatts)", ylab="Frequency", col="Red")
# Save the plot as png with given height and width.
dev.copy(png, file="plot1.png", height=480, width=480)
dev.off()
|
/plot1.R
|
no_license
|
bolero/ExData_Plotting1
|
R
| false
| false
| 1,041
|
r
|
# Plot 1 for exploratory data analysis couse
# Script assumes that source data is present in the parent folder of script
# Read the power consumption data
power = read.csv('../household_power_consumption.txt', sep=';', stringsAsFactors=F,
na.strings="?")
# Convert date to 'date'
power$Date = as.Date(power$Date, format = '%d/%m/%Y')
# Subset the data for the 2 days
power_subset <- subset(power, subset=((Date == '2007-02-01') |
(Date == '2007-02-02')))
unique(power_subset$Date) #to check correct subset
# Convert active power to numeric
power_subset$Global_active_power <- as.numeric(power_subset$Global_active_power)
# Plot histogram for Global active power, name the plot and axis, and set plot
# color to Red.
hist(power_subset$Global_active_power, main="Global Active Power",
xlab="Global Active Power (kilowatts)", ylab="Frequency", col="Red")
# Save the plot as png with given height and width.
dev.copy(png, file="plot1.png", height=480, width=480)
dev.off()
|
#:# libraries
library(digest)
library(mlr)
library(OpenML)
library(farff)
#:# config
set.seed(1)
#:# data
dataset <- getOMLDataSet(data.name = "analcatdata_apnea2")
head(dataset$data)
#:# preprocessing
head(dataset$data)
#:# model
task = makeClassifTask(id = "task", data = dataset$data, target = "binaryClass")
lrn = makeLearner("classif.rda", par.vals = list(), predict.type = "prob")
#:# hash
#:# 6011f400d7eaa7b495ddac3f1b4760cb
hash <- digest(list(task, lrn))
hash
#:# audit
cv <- makeResampleDesc("CV", iters = 5)
r <- mlr::resample(lrn, task, cv, measures = list(acc, auc, tnr, tpr, ppv, f1))
ACC <- r$aggr
ACC
#:# session info
sink(paste0("sessionInfo.txt"))
sessionInfo()
sink()
|
/models/openml_analcatdata_apnea2/classification_binaryClass/6011f400d7eaa7b495ddac3f1b4760cb/code.R
|
no_license
|
pysiakk/CaseStudies2019S
|
R
| false
| false
| 695
|
r
|
#:# libraries
library(digest)
library(mlr)
library(OpenML)
library(farff)
#:# config
set.seed(1)
#:# data
dataset <- getOMLDataSet(data.name = "analcatdata_apnea2")
head(dataset$data)
#:# preprocessing
head(dataset$data)
#:# model
task = makeClassifTask(id = "task", data = dataset$data, target = "binaryClass")
lrn = makeLearner("classif.rda", par.vals = list(), predict.type = "prob")
#:# hash
#:# 6011f400d7eaa7b495ddac3f1b4760cb
hash <- digest(list(task, lrn))
hash
#:# audit
cv <- makeResampleDesc("CV", iters = 5)
r <- mlr::resample(lrn, task, cv, measures = list(acc, auc, tnr, tpr, ppv, f1))
ACC <- r$aggr
ACC
#:# session info
sink(paste0("sessionInfo.txt"))
sessionInfo()
sink()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/successions.R
\name{Successions}
\alias{Successions}
\alias{successions}
\title{Count successions}
\usage{
successions(x)
}
\arguments{
\item{x}{An atomic vector.}
}
\value{
A list containing the indices of the successions (\code{index}), the total number of successions (\code{successions}), the unique value of each succession (\code{value}) and the lengths of the successions (\code{length})
}
\description{
Counts the lengths of successions of identical values in a vector.
}
\examples{
set.seed(7)
x <- sample(LETTERS[1:3], 10, replace=TRUE, prob = c(0.2, 0.2, 0.6))
x
successions(x)
}
|
/man/successions.Rd
|
no_license
|
kaldhusdal/temporal
|
R
| false
| true
| 670
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/successions.R
\name{Successions}
\alias{Successions}
\alias{successions}
\title{Count successions}
\usage{
successions(x)
}
\arguments{
\item{x}{An atomic vector.}
}
\value{
A list containing the indices of the successions (\code{index}), the total number of successions (\code{successions}), the unique value of each succession (\code{value}) and the lengths of the successions (\code{length})
}
\description{
Counts the lengths of successions of identical values in a vector.
}
\examples{
set.seed(7)
x <- sample(LETTERS[1:3], 10, replace=TRUE, prob = c(0.2, 0.2, 0.6))
x
successions(x)
}
|
# Com base no modelo implementado abaixo, escreva o `for` loop
# necessário p/ implementar o Mini-batch SGD.
# O tamanho do batch deve ser especificado por meio de uma variável
# chamada batch_size.
# Data generation ----------------------------------------------
n <- 1000
x <- runif(n)
W <- 0.9
B <- 0.1
y <- W * x + B
# Model definition ---------------------------------------------
model <- function(w, b, x) {
w * x + b
}
loss <- function(y, y_hat) {
mean((y - y_hat)^2)
}
# Estimating via SGD ------------------------------------------------------
dl_dyhat <- function(y_hat) {
2 * (y - y_hat) * (-1)
}
dyhat_dw <- function(w) {
x
}
dyhat_db <- function(b) {
1
}
# Inicializando os pesos --------------------------------------------------
# Estou invertendo os pesos, quero ver se há convergência neste método...
w <- 0.1
b <- 0.9
lr <- 0.1
batch_size <- 64
for (step in 1:10000) {
y_hat <- model(w, b, x)
amostrados <- sort(sample(x = 1:1000, size = batch_size, replace = FALSE))
w <- w - lr * mean(dl_dyhat(y_hat)[amostrados] * dyhat_dw(w)[amostrados])
b <- b - lr * mean(dl_dyhat(y_hat)[amostrados] * dyhat_db(b))
if (((step %% 10 == 0 & step <= 1000) | step %% 100 == 0) & loss(y, y_hat) > 1E-30) {
cat("Passo: ", step, "; w: ", w, "; b: ", b, "; Funcao Perda: ", loss(y, y_hat), "\n", sep = "")
}
}
w
b
|
/exercicios/02-mini-batch-sgd.R
|
no_license
|
brunocp76/CursoDeepLearning
|
R
| false
| false
| 1,373
|
r
|
# Com base no modelo implementado abaixo, escreva o `for` loop
# necessário p/ implementar o Mini-batch SGD.
# O tamanho do batch deve ser especificado por meio de uma variável
# chamada batch_size.
# Data generation ----------------------------------------------
n <- 1000
x <- runif(n)
W <- 0.9
B <- 0.1
y <- W * x + B
# Model definition ---------------------------------------------
model <- function(w, b, x) {
w * x + b
}
loss <- function(y, y_hat) {
mean((y - y_hat)^2)
}
# Estimating via SGD ------------------------------------------------------
dl_dyhat <- function(y_hat) {
2 * (y - y_hat) * (-1)
}
dyhat_dw <- function(w) {
x
}
dyhat_db <- function(b) {
1
}
# Inicializando os pesos --------------------------------------------------
# Estou invertendo os pesos, quero ver se há convergência neste método...
w <- 0.1
b <- 0.9
lr <- 0.1
batch_size <- 64
for (step in 1:10000) {
y_hat <- model(w, b, x)
amostrados <- sort(sample(x = 1:1000, size = batch_size, replace = FALSE))
w <- w - lr * mean(dl_dyhat(y_hat)[amostrados] * dyhat_dw(w)[amostrados])
b <- b - lr * mean(dl_dyhat(y_hat)[amostrados] * dyhat_db(b))
if (((step %% 10 == 0 & step <= 1000) | step %% 100 == 0) & loss(y, y_hat) > 1E-30) {
cat("Passo: ", step, "; w: ", w, "; b: ", b, "; Funcao Perda: ", loss(y, y_hat), "\n", sep = "")
}
}
w
b
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ddr.R
\name{ddr}
\alias{ddr}
\title{Get DDR data}
\usage{
ddr(date, asset_class, show_col_types = FALSE)
}
\arguments{
\item{date}{the date for which data is required as Date or DateTime object.
Only the year, month and day elements of the object are used and it must of
be length one.}
\item{asset_class}{the asset class for which you would like to download trade
data. Valid inputs are \code{"CR"} (credit), \code{"IR"} (rates),
\code{"EQ"} (equities), \code{"FX"} (foreign exchange), \code{"CO"}
(commodities). This must be a string.}
\item{show_col_types}{if \code{FALSE} (default), do not show the guessed column
types. If \code{TRUE} always show the column types, even if they are supplied.
If \code{NULL} only show the column types if they are not explicitly supplied by
the col_types argument.}
}
\value{
a tibble that contains the requested data. If no data exists
on that date, an empty tibble is returned.
}
\description{
The DTCC Data Repository is a registered U.S. swap data repository that
allows market participants to fulfil their public disclosure obligations
under U.S. legislation. This function will give you the ability to download
trade-level data that is reported by market participants. Column specs are
inferred from all records in the file (i.e. \code{guess_max} is set to \code{Inf}
when calling \link[readr:read_delim]{readr::read_csv}).
}
\examples{
\dontrun{
ddr(as.Date("2017-05-25"), "IR") # Not empty
ddr(as.Date("2020-12-01"), "CR") # Not empty
}
}
\references{
\href{https://pddata.dtcc.com/gtr/}{DDR Real Time Dissemination Platform}
}
|
/man/ddr.Rd
|
no_license
|
imanuelcostigan/dataonderivatives
|
R
| false
| true
| 1,653
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ddr.R
\name{ddr}
\alias{ddr}
\title{Get DDR data}
\usage{
ddr(date, asset_class, show_col_types = FALSE)
}
\arguments{
\item{date}{the date for which data is required as Date or DateTime object.
Only the year, month and day elements of the object are used and it must of
be length one.}
\item{asset_class}{the asset class for which you would like to download trade
data. Valid inputs are \code{"CR"} (credit), \code{"IR"} (rates),
\code{"EQ"} (equities), \code{"FX"} (foreign exchange), \code{"CO"}
(commodities). This must be a string.}
\item{show_col_types}{if \code{FALSE} (default), do not show the guessed column
types. If \code{TRUE} always show the column types, even if they are supplied.
If \code{NULL} only show the column types if they are not explicitly supplied by
the col_types argument.}
}
\value{
a tibble that contains the requested data. If no data exists
on that date, an empty tibble is returned.
}
\description{
The DTCC Data Repository is a registered U.S. swap data repository that
allows market participants to fulfil their public disclosure obligations
under U.S. legislation. This function will give you the ability to download
trade-level data that is reported by market participants. Column specs are
inferred from all records in the file (i.e. \code{guess_max} is set to \code{Inf}
when calling \link[readr:read_delim]{readr::read_csv}).
}
\examples{
\dontrun{
ddr(as.Date("2017-05-25"), "IR") # Not empty
ddr(as.Date("2020-12-01"), "CR") # Not empty
}
}
\references{
\href{https://pddata.dtcc.com/gtr/}{DDR Real Time Dissemination Platform}
}
|
setwd("~/Desktop/Data Science/p8105_maternity_leave_nyc/map")
library(tidyverse)
library(spdep)
library(maptools)
library(rgdal)
library(spatialreg)
library(sf)
unpaid_poly <- readOGR(dsn = "nyc_only_zips.shp", layer = "nyc_only_zips")
names(unpaid_poly)
###Creating a queen's neighborhood weight matrix using the poly2nb command.
unpaid_nbq <- poly2nb(unpaid_poly)
###extracting coordinates to plot the connectivity matrix for potential visualization.
coords <- coordinates(unpaid_poly)
plot(unpaid_poly)
plot(unpaid_nbq, coords, add=T)
###converting the neighborhood matrix into a list so that the connections between counties can be used in
###Moran's I test.
summary(unpaid_nbq)
unpaid_nbq_w <- nb2listw(unpaid_nbq, zero.policy=TRUE)
###Converting Exposure variable to z-form and then create the lag of that variable.
unpaid_poly$swksunpaid <- scale(as.numeric(unpaid_poly$wksunpaid))
unpaid_poly$lag_sWU <- lag.listw(unpaid_nbq_w, unpaid_poly$swksunpaid, zero.policy=TRUE, NAOK=TRUE)
summary(unpaid_poly$swksunpaid)
summary(unpaid_poly$lag_sWU)
names(unpaid_poly)
head(unpaid_poly)
unpaid_data <- as.data.frame(unpaid_poly)
head(unpaid_data)
###Running the morans I test.
moran.test(unpaid_poly$swksunpaid, listw=unpaid_nbq_w, na.action = na.omit, zero.policy = TRUE)
###moran's I statistic: 0.055, p-value = 0.2131
#******REGRESSION ***********************
###Test baseline linear model.
unpaid.lm <- lm(wksunpaid~jobtypefix + parenttype + as.numeric(leaveweeks) + edtype + race + X.family_in + X.borough, data=unpaid_poly)
summary(unpaid.lm)
###how to make the reference category different?
unpaid.lm %>%
broom::glance()
lm_output =
unpaid.lm %>%
broom::tidy() %>%
select(term, estimate, p.value) %>%
mutate(
term = str_replace(term, "^jobtypefix", "Job type: "),
term = str_replace(term, "^parenttype", "Partner: "),
term = str_replace(term, "^as.numeric(leaveweeks)", "Number of weeks on leave"),
term = str_replace(term, "^edtype", "Education: "),
term = str_replace(term, "^race", "Race: "),
term = str_replace(term, "^X.family_in", "Family Income: "),
term = str_replace(term, "^X.borough", "Borough: "),
term = str_replace(term, "^Job type: 2", "Job type: Private"),
term = str_replace(term, "^Job type: 3", "Job type: Non-profit"),
term = str_replace(term, "^Job type: 4", "Job type: Self-employed"),
term = str_replace(term, "^Partner: 2", "Partner: Single parent"),
term = str_replace(term, "^Education: 3", "Education: No high school degree"),
term = str_replace(term, "^Education: 4", "Education: High school degree/GED"),
term = str_replace(term, "^Education: 5", "Education: Some college or technical school"),
term = str_replace(term, "^Education: 6", "Education: Four-year college or higher"),
term = str_replace(term, "^Race: 2", "Race: Black/African American"),
term = str_replace(term, "^Race: 3", "Race: Asian"),
term = str_replace(term, "^Race: 8", "Race: Other")) %>%
knitr::kable(digits = 3)
###redoing regression with just csv to run residuals (can't with shapefile)
library(readr)
linear_df =
read_csv("~/Desktop/Data Science/p8105_maternity_leave_nyc/data/merged_wfls.csv")
View(merged_wfls)
merged.lm <- lm(unpaid_leave_weeks~job_type + partner + leave_weeks + education + race + family_income + borough, data=linear_df)
summary(merged.lm)
modelr::add_residuals(linear_df, merged.lm)
modelr::add_predictions(linear_df, merged.lm)
linear_df %>%
modelr::add_residuals(merged.lm) %>%
ggplot(aes(x = job_type, y = resid)) + geom_violin()
linear_df %>%
modelr::add_residuals(merged.lm) %>%
ggplot(aes(x = partner, y = resid)) + geom_violin()
linear_df %>%
modelr::add_residuals(merged.lm) %>%
ggplot(aes(x = leave_weeks, y = resid)) + geom_violin()
linear_df %>%
modelr::add_residuals(merged.lm) %>%
ggplot(aes(x = education, y = resid)) + geom_violin()
linear_df %>%
modelr::add_residuals(merged.lm) %>%
ggplot(aes(x = race, y = resid)) + geom_violin()
linear_df %>%
modelr::add_residuals(merged.lm) %>%
ggplot(aes(x = family_income, y = resid)) + geom_violin()
linear_df %>%
modelr::add_residuals(merged.lm) %>%
ggplot(aes(x = borough, y = resid)) + geom_violin()
###EXPLANATION
###Because unpaid leave is likely related to income, and income is related to
###where a person lives, it is likely that there are spatial effects impacting
###rates of unpaid leave. To understand this,
###we ran a Univariate Local Moran's I test, which was looking for spatial clusters
###of high and low unpaid leave weeks. This test seeks clusters that have a high
###number of unpaid leave weeks within the zipcode, but also in the zipcodes surrounding it.
### It also looks for clusters that have low local unpaid leave weeks and low unpaid leave
###weeks in the zip codes surrounding it. Finally, it looks for places where there is low leave
###locally but high leave amongst the zip code's neighbors, and vice versa. This resulted in
###a Moran's I statistic of 0.055 and a p-value = 0.2131, signifying that there were not
###significant spatial clusters. We could not move ahead with spatial analysis due to
###this finding, but it is likely that spatial clustering would exist with a more
###representative sample of NYC.
###After conducting this spatial analysis, we also ran a linear regression including a number
###of important covariates identified in our exploratory analysis. These covariates included job
###type, whether the parent had a partner, the number of weeks of leave they took total, the level
###of education they attained, and their race, income, and borough. The outcome was the number of
###weeks of unpaid leave they took. We found an F-statistic of 6.907 with a p-value of < 0.01 on the
###overall regression. More specifically, we found sigificance among the number of weeks of total
###leave (p < 0.001). As weeks of total leave increases, so does the number of unpaid leave weeks.
###No other covariate was significant. This accounts for 64.35% of the relationship.
###This was complex model and it should be noted that we did not conduct cross-validation on the
###relationships here. However, we did look at the residuals for each covariate, which can be seen
###below.
|
/gwr_unpaid.R
|
no_license
|
meb2308/p8105_maternity_leave_nyc
|
R
| false
| false
| 6,300
|
r
|
setwd("~/Desktop/Data Science/p8105_maternity_leave_nyc/map")
library(tidyverse)
library(spdep)
library(maptools)
library(rgdal)
library(spatialreg)
library(sf)
unpaid_poly <- readOGR(dsn = "nyc_only_zips.shp", layer = "nyc_only_zips")
names(unpaid_poly)
###Creating a queen's neighborhood weight matrix using the poly2nb command.
unpaid_nbq <- poly2nb(unpaid_poly)
###extracting coordinates to plot the connectivity matrix for potential visualization.
coords <- coordinates(unpaid_poly)
plot(unpaid_poly)
plot(unpaid_nbq, coords, add=T)
###converting the neighborhood matrix into a list so that the connections between counties can be used in
###Moran's I test.
summary(unpaid_nbq)
unpaid_nbq_w <- nb2listw(unpaid_nbq, zero.policy=TRUE)
###Converting Exposure variable to z-form and then create the lag of that variable.
unpaid_poly$swksunpaid <- scale(as.numeric(unpaid_poly$wksunpaid))
unpaid_poly$lag_sWU <- lag.listw(unpaid_nbq_w, unpaid_poly$swksunpaid, zero.policy=TRUE, NAOK=TRUE)
summary(unpaid_poly$swksunpaid)
summary(unpaid_poly$lag_sWU)
names(unpaid_poly)
head(unpaid_poly)
unpaid_data <- as.data.frame(unpaid_poly)
head(unpaid_data)
###Running the morans I test.
moran.test(unpaid_poly$swksunpaid, listw=unpaid_nbq_w, na.action = na.omit, zero.policy = TRUE)
###moran's I statistic: 0.055, p-value = 0.2131
#******REGRESSION ***********************
###Test baseline linear model.
unpaid.lm <- lm(wksunpaid~jobtypefix + parenttype + as.numeric(leaveweeks) + edtype + race + X.family_in + X.borough, data=unpaid_poly)
summary(unpaid.lm)
###how to make the reference category different?
unpaid.lm %>%
broom::glance()
lm_output =
unpaid.lm %>%
broom::tidy() %>%
select(term, estimate, p.value) %>%
mutate(
term = str_replace(term, "^jobtypefix", "Job type: "),
term = str_replace(term, "^parenttype", "Partner: "),
term = str_replace(term, "^as.numeric(leaveweeks)", "Number of weeks on leave"),
term = str_replace(term, "^edtype", "Education: "),
term = str_replace(term, "^race", "Race: "),
term = str_replace(term, "^X.family_in", "Family Income: "),
term = str_replace(term, "^X.borough", "Borough: "),
term = str_replace(term, "^Job type: 2", "Job type: Private"),
term = str_replace(term, "^Job type: 3", "Job type: Non-profit"),
term = str_replace(term, "^Job type: 4", "Job type: Self-employed"),
term = str_replace(term, "^Partner: 2", "Partner: Single parent"),
term = str_replace(term, "^Education: 3", "Education: No high school degree"),
term = str_replace(term, "^Education: 4", "Education: High school degree/GED"),
term = str_replace(term, "^Education: 5", "Education: Some college or technical school"),
term = str_replace(term, "^Education: 6", "Education: Four-year college or higher"),
term = str_replace(term, "^Race: 2", "Race: Black/African American"),
term = str_replace(term, "^Race: 3", "Race: Asian"),
term = str_replace(term, "^Race: 8", "Race: Other")) %>%
knitr::kable(digits = 3)
###redoing regression with just csv to run residuals (can't with shapefile)
library(readr)
linear_df =
read_csv("~/Desktop/Data Science/p8105_maternity_leave_nyc/data/merged_wfls.csv")
View(merged_wfls)
merged.lm <- lm(unpaid_leave_weeks~job_type + partner + leave_weeks + education + race + family_income + borough, data=linear_df)
summary(merged.lm)
modelr::add_residuals(linear_df, merged.lm)
modelr::add_predictions(linear_df, merged.lm)
linear_df %>%
modelr::add_residuals(merged.lm) %>%
ggplot(aes(x = job_type, y = resid)) + geom_violin()
linear_df %>%
modelr::add_residuals(merged.lm) %>%
ggplot(aes(x = partner, y = resid)) + geom_violin()
linear_df %>%
modelr::add_residuals(merged.lm) %>%
ggplot(aes(x = leave_weeks, y = resid)) + geom_violin()
linear_df %>%
modelr::add_residuals(merged.lm) %>%
ggplot(aes(x = education, y = resid)) + geom_violin()
linear_df %>%
modelr::add_residuals(merged.lm) %>%
ggplot(aes(x = race, y = resid)) + geom_violin()
linear_df %>%
modelr::add_residuals(merged.lm) %>%
ggplot(aes(x = family_income, y = resid)) + geom_violin()
linear_df %>%
modelr::add_residuals(merged.lm) %>%
ggplot(aes(x = borough, y = resid)) + geom_violin()
###EXPLANATION
###Because unpaid leave is likely related to income, and income is related to
###where a person lives, it is likely that there are spatial effects impacting
###rates of unpaid leave. To understand this,
###we ran a Univariate Local Moran's I test, which was looking for spatial clusters
###of high and low unpaid leave weeks. This test seeks clusters that have a high
###number of unpaid leave weeks within the zipcode, but also in the zipcodes surrounding it.
### It also looks for clusters that have low local unpaid leave weeks and low unpaid leave
###weeks in the zip codes surrounding it. Finally, it looks for places where there is low leave
###locally but high leave amongst the zip code's neighbors, and vice versa. This resulted in
###a Moran's I statistic of 0.055 and a p-value = 0.2131, signifying that there were not
###significant spatial clusters. We could not move ahead with spatial analysis due to
###this finding, but it is likely that spatial clustering would exist with a more
###representative sample of NYC.
###After conducting this spatial analysis, we also ran a linear regression including a number
###of important covariates identified in our exploratory analysis. These covariates included job
###type, whether the parent had a partner, the number of weeks of leave they took total, the level
###of education they attained, and their race, income, and borough. The outcome was the number of
###weeks of unpaid leave they took. We found an F-statistic of 6.907 with a p-value of < 0.01 on the
###overall regression. More specifically, we found sigificance among the number of weeks of total
###leave (p < 0.001). As weeks of total leave increases, so does the number of unpaid leave weeks.
###No other covariate was significant. This accounts for 64.35% of the relationship.
###This was complex model and it should be noted that we did not conduct cross-validation on the
###relationships here. However, we did look at the residuals for each covariate, which can be seen
###below.
|
# mostrar at� 2 casas decimais
options("scipen" = 2)
# Ler arquivo csv
Vinhos <- read.csv2("BaseWine_Red_e_White2018.csv", row.names=1)
#Base
View(Vinhos)
#mostrar as vari�veis
str(Vinhos)
#primeiros avalia��o das vari�veis]
summary(Vinhos)
attach(Vinhos)
## Criando label para cada coluna do dataset_##
attr(Vinhos$fixedacidity, 'label') <- 'acidez fixa'
attr(Vinhos$volatileacidity, 'label') <- 'acidez volatil'
attr(Vinhos$citricacid, 'label') <- 'acido citrico'
attr(Vinhos$residualsugar, 'label') <- 'acucar residual'
attr(Vinhos$chlorides, 'label') <- 'cloretos'
attr(Vinhos$freesulfurdioxide, 'label') <- 'dioxido de enxofre livre'
attr(Vinhos$totalsulfurdioxide, 'label') <- 'dioxido de enxofre total'
attr(Vinhos$density, 'label') <- 'densidade'
attr(Vinhos$pH, 'label') <- 'pH'
attr(Vinhos$sulphates, 'label') <- 'sulfatos'
attr(Vinhos$alcohol, 'label') <- 'alcool'
attr(Vinhos$quality, 'label') <- 'qualidade'
attr(Vinhos$Vinho, 'label') <- 'vinho'
##
sapply(Vinhos, function(x)all(is.na(x)))
# Frequ�ncia absoluta
table(as.factor(Vinhos$quality), Vinhos$Vinho)
# Frequ�ncia relativa
prop.table(table(as.factor(Vinhos$quality), Vinhos$Vinho),2)
attach(Vinhos)
aggregate(Vinhos,
by = list(Vinho),
FUN = mean)
#comando para gerar em 3 linhas e 4 colunas os histogramas
par (mfrow=c(3,4))
hist(fixedacidity)
hist(volatileacidity)
hist(citricacid )
hist(residualsugar)
hist(chlorides)
hist(freesulfurdioxide)
hist(totalsulfurdioxide)
hist(density)
hist(pH)
hist(sulphates)
hist(alcohol)
hist(quality)
par (mfrow=c(1,1))
hist(quality, col=c("pink"), col.main="darkgray", prob=T)
install.packages("plotly")
library(plotly)
plot_ly(x = Vinhos$volatileacidity, type = 'histogram')
attach(Vinhos)
#comando para gerar em 3 linhas e 4 colunas os histogramas
par (mfrow=c(3,4))
boxplot(fixedacidity, main='fixedacidity')
boxplot(volatileacidity , main='volatileacidity')
boxplot(citricacid , main='citricacid')
boxplot(residualsugar, main='residualsugar')
boxplot(chlorides, main='chlorides')
boxplot(freesulfurdioxide, main='freesulfurdioxide')
boxplot(totalsulfurdioxide, main='totalsulfurdioxide')
boxplot(density, main='density')
boxplot(pH, main='pH')
boxplot(sulphates, main='sulphates')
boxplot(alcohol, main='alcohol')
boxplot(Vinhos$quality, main='quality')
par (mfrow=c(1,1))
boxplot(quality ~ Vinho, main='quality')
boxplot(fixedacidity ~ Vinho, main='fixedacidity',col=c('red','blue'))
boxplot(volatileacidity ~ Vinho , main='volatileacidity',col=c('red','blue'))
boxplot(citricacid ~ Vinho, main='citricacid',col=c('red','blue'))
boxplot(residualsugar ~ Vinho, main='residualsugar',col=c('red','blue'))
boxplot(chlorides ~ Vinho, main='chlorides',col=c('red','blue'))
boxplot(freesulfurdioxide ~ Vinho, main='freesulfurdioxide' ,col=c('red','blue'))
boxplot(totalsulfurdioxide ~ Vinho, main='totalsulfurdioxide',col=c('red','blue'))
boxplot(density ~ Vinho, main='density',col=c('red','blue'))
boxplot(pH ~ Vinho, main='pH',col=c('red','blue'))
boxplot(sulphates ~ Vinho, main='sulphates',col=c('red','blue'))
boxplot(alcohol ~ Vinho, main='alcohol',col=c('red','blue'))
par (mfrow=c(1,1))
white <- subset(Vinhos, Vinho=="WHITE", select=c(quality,fixedacidity,volatileacidity,citricacid,residualsugar,
chlorides,freesulfurdioxide,totalsulfurdioxide,density,pH,
sulphates,alcohol))
red<- subset(Vinhos, Vinho=="RED", select=c(quality,fixedacidity,volatileacidity,citricacid,residualsugar,
chlorides,freesulfurdioxide,totalsulfurdioxide,density,pH,
sulphates,alcohol))
comparing_hist <- plot_ly(alpha = 0.6) %>%
add_histogram(x = ~red$volatileacidity, type = 'histogram', name='Vinho Tinto' ) %>%
add_histogram(x = ~white$volatileacidity, name='Vinho Branco') %>%
layout(barmode = 'overlay')
comparing_hist
# Gr�fico de dispers�o ( pch=caracter, lwd=largura)
plot(freesulfurdioxide~totalsulfurdioxide)
plot(freesulfurdioxide~totalsulfurdioxide, pch=1, lwd=3)
plot(freesulfurdioxide~totalsulfurdioxide)
abline(v=mean(freesulfurdioxide), col="red")
abline(h=mean(totalsulfurdioxide), col="green")
# Com base na an�lise explorat�ria inicial o que fazer?
# an�lise espec�fica - para vinho="WHITE"
white <- subset(Vinhos, Vinho=="WHITE", select=c(quality,fixedacidity,volatileacidity,citricacid,residualsugar,
chlorides,freesulfurdioxide,totalsulfurdioxide,density,pH,
sulphates,alcohol))
hist(white$quality)
#Estat�sticas descritivas
summary(white)
attach(white)
# matriz de correla��es
matcor <- cor(white)
print(matcor, digits = 2)
install.packages("corrgram")
library(corrgram)
corrgram(matcor, type = "cor", lower.panel = panel.shade, upper.panel = panel.pie)
panel.cor <- function(x, y, digits=2, prefix ="", cex.cor,
...) {
usr <- par("usr")
on.exit(par(usr))
par(usr = c(0, 1, 0, 1))
r <- cor(x, y , use = "pairwise.complete.obs")
txt <- format(c(r, 0.123456789), digits = digits) [1]
txt <- paste(prefix, txt, sep = "")
if (missing(cex.cor))
cex <- 0.8/strwidth(txt)
# abs(r) � para que na sa�da as correla��es ficam proporcionais
text(0.5, 0.5, txt, cex = cex * abs(r))
}
#pdf(file = "grafico.pdf")
pairs(white, lower.panel=panel.smooth, upper.panel=panel.cor)
# fim
# h� outlies? Alguma sele�ao? Explique?
# Tem necessidade de fazer componentes principais? Explique?
|
/Estatistica/analise_anexo1.R
|
no_license
|
pd2f/modelos_basicos
|
R
| false
| false
| 5,664
|
r
|
# mostrar at� 2 casas decimais
options("scipen" = 2)
# Ler arquivo csv
Vinhos <- read.csv2("BaseWine_Red_e_White2018.csv", row.names=1)
#Base
View(Vinhos)
#mostrar as vari�veis
str(Vinhos)
#primeiros avalia��o das vari�veis]
summary(Vinhos)
attach(Vinhos)
## Criando label para cada coluna do dataset_##
attr(Vinhos$fixedacidity, 'label') <- 'acidez fixa'
attr(Vinhos$volatileacidity, 'label') <- 'acidez volatil'
attr(Vinhos$citricacid, 'label') <- 'acido citrico'
attr(Vinhos$residualsugar, 'label') <- 'acucar residual'
attr(Vinhos$chlorides, 'label') <- 'cloretos'
attr(Vinhos$freesulfurdioxide, 'label') <- 'dioxido de enxofre livre'
attr(Vinhos$totalsulfurdioxide, 'label') <- 'dioxido de enxofre total'
attr(Vinhos$density, 'label') <- 'densidade'
attr(Vinhos$pH, 'label') <- 'pH'
attr(Vinhos$sulphates, 'label') <- 'sulfatos'
attr(Vinhos$alcohol, 'label') <- 'alcool'
attr(Vinhos$quality, 'label') <- 'qualidade'
attr(Vinhos$Vinho, 'label') <- 'vinho'
##
sapply(Vinhos, function(x)all(is.na(x)))
# Frequ�ncia absoluta
table(as.factor(Vinhos$quality), Vinhos$Vinho)
# Frequ�ncia relativa
prop.table(table(as.factor(Vinhos$quality), Vinhos$Vinho),2)
attach(Vinhos)
aggregate(Vinhos,
by = list(Vinho),
FUN = mean)
#comando para gerar em 3 linhas e 4 colunas os histogramas
par (mfrow=c(3,4))
hist(fixedacidity)
hist(volatileacidity)
hist(citricacid )
hist(residualsugar)
hist(chlorides)
hist(freesulfurdioxide)
hist(totalsulfurdioxide)
hist(density)
hist(pH)
hist(sulphates)
hist(alcohol)
hist(quality)
par (mfrow=c(1,1))
hist(quality, col=c("pink"), col.main="darkgray", prob=T)
install.packages("plotly")
library(plotly)
plot_ly(x = Vinhos$volatileacidity, type = 'histogram')
attach(Vinhos)
#comando para gerar em 3 linhas e 4 colunas os histogramas
par (mfrow=c(3,4))
boxplot(fixedacidity, main='fixedacidity')
boxplot(volatileacidity , main='volatileacidity')
boxplot(citricacid , main='citricacid')
boxplot(residualsugar, main='residualsugar')
boxplot(chlorides, main='chlorides')
boxplot(freesulfurdioxide, main='freesulfurdioxide')
boxplot(totalsulfurdioxide, main='totalsulfurdioxide')
boxplot(density, main='density')
boxplot(pH, main='pH')
boxplot(sulphates, main='sulphates')
boxplot(alcohol, main='alcohol')
boxplot(Vinhos$quality, main='quality')
par (mfrow=c(1,1))
boxplot(quality ~ Vinho, main='quality')
boxplot(fixedacidity ~ Vinho, main='fixedacidity',col=c('red','blue'))
boxplot(volatileacidity ~ Vinho , main='volatileacidity',col=c('red','blue'))
boxplot(citricacid ~ Vinho, main='citricacid',col=c('red','blue'))
boxplot(residualsugar ~ Vinho, main='residualsugar',col=c('red','blue'))
boxplot(chlorides ~ Vinho, main='chlorides',col=c('red','blue'))
boxplot(freesulfurdioxide ~ Vinho, main='freesulfurdioxide' ,col=c('red','blue'))
boxplot(totalsulfurdioxide ~ Vinho, main='totalsulfurdioxide',col=c('red','blue'))
boxplot(density ~ Vinho, main='density',col=c('red','blue'))
boxplot(pH ~ Vinho, main='pH',col=c('red','blue'))
boxplot(sulphates ~ Vinho, main='sulphates',col=c('red','blue'))
boxplot(alcohol ~ Vinho, main='alcohol',col=c('red','blue'))
par (mfrow=c(1,1))
white <- subset(Vinhos, Vinho=="WHITE", select=c(quality,fixedacidity,volatileacidity,citricacid,residualsugar,
chlorides,freesulfurdioxide,totalsulfurdioxide,density,pH,
sulphates,alcohol))
red<- subset(Vinhos, Vinho=="RED", select=c(quality,fixedacidity,volatileacidity,citricacid,residualsugar,
chlorides,freesulfurdioxide,totalsulfurdioxide,density,pH,
sulphates,alcohol))
comparing_hist <- plot_ly(alpha = 0.6) %>%
add_histogram(x = ~red$volatileacidity, type = 'histogram', name='Vinho Tinto' ) %>%
add_histogram(x = ~white$volatileacidity, name='Vinho Branco') %>%
layout(barmode = 'overlay')
comparing_hist
# Gr�fico de dispers�o ( pch=caracter, lwd=largura)
plot(freesulfurdioxide~totalsulfurdioxide)
plot(freesulfurdioxide~totalsulfurdioxide, pch=1, lwd=3)
plot(freesulfurdioxide~totalsulfurdioxide)
abline(v=mean(freesulfurdioxide), col="red")
abline(h=mean(totalsulfurdioxide), col="green")
# Com base na an�lise explorat�ria inicial o que fazer?
# an�lise espec�fica - para vinho="WHITE"
white <- subset(Vinhos, Vinho=="WHITE", select=c(quality,fixedacidity,volatileacidity,citricacid,residualsugar,
chlorides,freesulfurdioxide,totalsulfurdioxide,density,pH,
sulphates,alcohol))
hist(white$quality)
#Estat�sticas descritivas
summary(white)
attach(white)
# matriz de correla��es
matcor <- cor(white)
print(matcor, digits = 2)
install.packages("corrgram")
library(corrgram)
corrgram(matcor, type = "cor", lower.panel = panel.shade, upper.panel = panel.pie)
panel.cor <- function(x, y, digits=2, prefix ="", cex.cor,
...) {
usr <- par("usr")
on.exit(par(usr))
par(usr = c(0, 1, 0, 1))
r <- cor(x, y , use = "pairwise.complete.obs")
txt <- format(c(r, 0.123456789), digits = digits) [1]
txt <- paste(prefix, txt, sep = "")
if (missing(cex.cor))
cex <- 0.8/strwidth(txt)
# abs(r) � para que na sa�da as correla��es ficam proporcionais
text(0.5, 0.5, txt, cex = cex * abs(r))
}
#pdf(file = "grafico.pdf")
pairs(white, lower.panel=panel.smooth, upper.panel=panel.cor)
# fim
# h� outlies? Alguma sele�ao? Explique?
# Tem necessidade de fazer componentes principais? Explique?
|
library(glmnet)
library(rbenchmark)
library(Rcpp)
sourceCpp("lassoALO.cpp")
sourceCpp("matUpdate.cpp")
source("aloWrappers.R")
#########
# setup
n = 500
p = 200
k = 20
true_beta = rnorm(p, 0, 1)
true_beta[-(1:k)] = 0
# misspecification example
X = matrix(rnorm(n * p, 0, sqrt(1 / k)), n, p)
y = X %*% true_beta + rnorm(n, 0, 0.5)
y[y >= 0] = sqrt(y[y >= 0])
y[y < 0] = -sqrt(-y[y < 0])
sd = c(sd(y) * sqrt(n - 1) / sqrt(n))
y = y / sd
#tune_param = 10^seq(-3, -1.5, length.out = 25)
fit = glmnet(X, y, nlambda = 25, standardize = F, intercept = F)
ptm = proc.time()
#mse0 = cv.glmnet(X, y, nfolds = n, nlambda = 25, standardize = F, grouped = F)$cvm
proc.time() - ptm
ptm = proc.time()
mse1 = lassoALO.Vanilla(X, y, fit)
proc.time() - ptm
ptm = proc.time()
mse2 = lassoALO.Woodbury(X, y, fit)
proc.time() - ptm
plot(mse1, type = "l", col = "orange", lwd = 2)
# lines(mse2, type = "b", col = 6, lwd = 2)
lines(mse2, type = "b", col = 4, pch = 3, lwd = 2)
benchmark(lassoALO_vanilla(X, y, fit), lassoALO_woodbury(X, y, fit), replications = 50)
###########################
a = 0.5
fit = glmnet(X, y, nlambda = 25, standardize = F, intercept = F, alpha = a)
fit = cv.glmnet(X, y, nfolds = n, nlambda = 25, standardize = F, grouped = F, intercept = F, alpha = a)
mse0 = fit$cvm
ptm = proc.time()
mse1 = elnetALO.Vanilla(X, y, a, fit$glmnet.fit)
proc.time() - ptm
ptm = proc.time()
mse2 = elnetALO.Approx(X, y, a, fit$glmnet.fit)
proc.time() - ptm
plot(mse0, type = "l", col = "orange", lwd = 2)
lines(mse1, type = "b", col = 6, lwd = 2)
lines(mse2, type = "b", col = 4, pch = 3, lwd = 2)
|
/R/Update_Test.R
|
permissive
|
Francis-Hsu/Summer-ALO
|
R
| false
| false
| 1,595
|
r
|
library(glmnet)
library(rbenchmark)
library(Rcpp)
sourceCpp("lassoALO.cpp")
sourceCpp("matUpdate.cpp")
source("aloWrappers.R")
#########
# setup
n = 500
p = 200
k = 20
true_beta = rnorm(p, 0, 1)
true_beta[-(1:k)] = 0
# misspecification example
X = matrix(rnorm(n * p, 0, sqrt(1 / k)), n, p)
y = X %*% true_beta + rnorm(n, 0, 0.5)
y[y >= 0] = sqrt(y[y >= 0])
y[y < 0] = -sqrt(-y[y < 0])
sd = c(sd(y) * sqrt(n - 1) / sqrt(n))
y = y / sd
#tune_param = 10^seq(-3, -1.5, length.out = 25)
fit = glmnet(X, y, nlambda = 25, standardize = F, intercept = F)
ptm = proc.time()
#mse0 = cv.glmnet(X, y, nfolds = n, nlambda = 25, standardize = F, grouped = F)$cvm
proc.time() - ptm
ptm = proc.time()
mse1 = lassoALO.Vanilla(X, y, fit)
proc.time() - ptm
ptm = proc.time()
mse2 = lassoALO.Woodbury(X, y, fit)
proc.time() - ptm
plot(mse1, type = "l", col = "orange", lwd = 2)
# lines(mse2, type = "b", col = 6, lwd = 2)
lines(mse2, type = "b", col = 4, pch = 3, lwd = 2)
benchmark(lassoALO_vanilla(X, y, fit), lassoALO_woodbury(X, y, fit), replications = 50)
###########################
a = 0.5
fit = glmnet(X, y, nlambda = 25, standardize = F, intercept = F, alpha = a)
fit = cv.glmnet(X, y, nfolds = n, nlambda = 25, standardize = F, grouped = F, intercept = F, alpha = a)
mse0 = fit$cvm
ptm = proc.time()
mse1 = elnetALO.Vanilla(X, y, a, fit$glmnet.fit)
proc.time() - ptm
ptm = proc.time()
mse2 = elnetALO.Approx(X, y, a, fit$glmnet.fit)
proc.time() - ptm
plot(mse0, type = "l", col = "orange", lwd = 2)
lines(mse1, type = "b", col = 6, lwd = 2)
lines(mse2, type = "b", col = 4, pch = 3, lwd = 2)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/glue_operations.R
\name{glue_stop_trigger}
\alias{glue_stop_trigger}
\title{Stops a specified trigger}
\usage{
glue_stop_trigger(Name)
}
\arguments{
\item{Name}{[required] The name of the trigger to stop.}
}
\description{
Stops a specified trigger.
See \url{https://www.paws-r-sdk.com/docs/glue_stop_trigger/} for full documentation.
}
\keyword{internal}
|
/cran/paws.analytics/man/glue_stop_trigger.Rd
|
permissive
|
paws-r/paws
|
R
| false
| true
| 434
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/glue_operations.R
\name{glue_stop_trigger}
\alias{glue_stop_trigger}
\title{Stops a specified trigger}
\usage{
glue_stop_trigger(Name)
}
\arguments{
\item{Name}{[required] The name of the trigger to stop.}
}
\description{
Stops a specified trigger.
See \url{https://www.paws-r-sdk.com/docs/glue_stop_trigger/} for full documentation.
}
\keyword{internal}
|
library(epiGWAS)
### Name: robust_outcome
### Title: Implements the robust modified outcome approach
### Aliases: robust_outcome
### ** Examples
n <- 30
p <- 10
X <- matrix((runif(n * p) < 0.4) + (runif(n * p) < 0.4),
ncol = p, nrow = n) # SNP matrix
A <- rbinom(n, 1, 0.3)
propensity <- runif(n, min = 0.4, max = 0.8)
Y <- runif(n) < 0.4
robust_scores <- robust_outcome(A, X, Y, propensity,
lambda_min_ratio = 0.01 , n_subsample = 1)
|
/data/genthat_extracted_code/epiGWAS/examples/robust_outcome.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 487
|
r
|
library(epiGWAS)
### Name: robust_outcome
### Title: Implements the robust modified outcome approach
### Aliases: robust_outcome
### ** Examples
n <- 30
p <- 10
X <- matrix((runif(n * p) < 0.4) + (runif(n * p) < 0.4),
ncol = p, nrow = n) # SNP matrix
A <- rbinom(n, 1, 0.3)
propensity <- runif(n, min = 0.4, max = 0.8)
Y <- runif(n) < 0.4
robust_scores <- robust_outcome(A, X, Y, propensity,
lambda_min_ratio = 0.01 , n_subsample = 1)
|
sports <- read.csv("~/Desktop/UCSD/Project-Release/evaluation/threshold2/sports.csv")
sports = sports$cosign_similarity
news <- read.csv("~/Desktop/UCSD/Project-Release/evaluation/threshold2/news.csv")
news = news$cosign_similarity
business <- read.csv("~/Desktop/UCSD/Project-Release/evaluation/threshold2/business.csv")
business = business$cosign_similarity
plot(density(sports),
col='blue',
main="Distribution of Cosign Similarity >.3 between Topics (one day)", xlab="Cosign Similarity")
lines(density(business), col='green')
lines(density(news), col='red')
legend('topright', c('News', 'Business', 'Sports'), text.col=c('red','green','blue'))
|
/evaluation/threshold2/plot.R
|
no_license
|
paulcnichols/EventTracker-V2
|
R
| false
| false
| 659
|
r
|
sports <- read.csv("~/Desktop/UCSD/Project-Release/evaluation/threshold2/sports.csv")
sports = sports$cosign_similarity
news <- read.csv("~/Desktop/UCSD/Project-Release/evaluation/threshold2/news.csv")
news = news$cosign_similarity
business <- read.csv("~/Desktop/UCSD/Project-Release/evaluation/threshold2/business.csv")
business = business$cosign_similarity
plot(density(sports),
col='blue',
main="Distribution of Cosign Similarity >.3 between Topics (one day)", xlab="Cosign Similarity")
lines(density(business), col='green')
lines(density(news), col='red')
legend('topright', c('News', 'Business', 'Sports'), text.col=c('red','green','blue'))
|
library(ggplot2)
library(reshape2)
box_dat<-dat[,c("URXUAS3", "URXUAS5","URXUAB", "URXUAC", "URXUDMA","URXUMMA","URXUCL")]
box_dat<-melt(box_dat,id.vars = 'URXUCL')
ggplot(data = na.omit(box_dat),aes(x = factor(URXUCL), y=value,fill=factor(URXUCL)))+
geom_boxplot()+
facet_wrap(~variable,scales = 'free')+
labs(x='Chlamydia - Urine',y = expression(mu * g/L))+
theme(legend.position = "none")
box_dat<-dat[,c("URXUAS3", "URXUAS5","URXUAB", "URXUAC", "URXUDMA","URXUMMA","URXUTRI")]
box_dat<-melt(box_dat,id.vars = 'URXUTRI')
ggplot(data = na.omit(box_dat),aes(x = factor(URXUTRI), y=value,fill=factor(URXUTRI)))+
geom_boxplot()+
facet_wrap(~variable,scales = 'free')+
labs(x='Trichomonas - Urine',y = expression(mu * g/L))+
theme(legend.position = "none")
|
/plots.R
|
no_license
|
spaul-genetics/arsenic
|
R
| false
| false
| 775
|
r
|
library(ggplot2)
library(reshape2)
box_dat<-dat[,c("URXUAS3", "URXUAS5","URXUAB", "URXUAC", "URXUDMA","URXUMMA","URXUCL")]
box_dat<-melt(box_dat,id.vars = 'URXUCL')
ggplot(data = na.omit(box_dat),aes(x = factor(URXUCL), y=value,fill=factor(URXUCL)))+
geom_boxplot()+
facet_wrap(~variable,scales = 'free')+
labs(x='Chlamydia - Urine',y = expression(mu * g/L))+
theme(legend.position = "none")
box_dat<-dat[,c("URXUAS3", "URXUAS5","URXUAB", "URXUAC", "URXUDMA","URXUMMA","URXUTRI")]
box_dat<-melt(box_dat,id.vars = 'URXUTRI')
ggplot(data = na.omit(box_dat),aes(x = factor(URXUTRI), y=value,fill=factor(URXUTRI)))+
geom_boxplot()+
facet_wrap(~variable,scales = 'free')+
labs(x='Trichomonas - Urine',y = expression(mu * g/L))+
theme(legend.position = "none")
|
\docType{package}
\name{mlgt-package}
\alias{mlgt-package}
\title{mlgt: Multi-locus geno-typing}
\description{
\tabular{ll}{ Package: \tab mlgt\cr Type: \tab Package\cr
Version: \tab 0.16\cr Date: \tab 2012-03-27\cr Author:
\tab Dave T. Gerrard <david.gerrard@manchester.ac.uk>\cr
License: \tab GPL (>= 2)\cr LazyLoad: \tab yes\cr }
}
\details{
mlgt sorts a batch of sequence by barcode and identity to
templates. It makes use of external applications BLAST
and MUSCLE. Genotypes are called and alleles can be
compared to a reference list of sequences. More
information about each function can be found in its help
documentation.
Some text
The main functions are: \code{\link{prepareMlgtRun}},
\code{\link{mlgt}}, \code{\link{callGenotypes}},
\code{\link{createKnownAlleleList}},
...
}
\references{
BLAST - Altschul, S. F., W. Gish, W. Miller, E. W. Myers,
and D. J. Lipman (1990). Basic local alignment search
tool. Journal of molecular biology 215 (3), 403-410.
MUSCLE - Robert C. Edgar (2004) MUSCLE: multiple sequence
alignment with high accuracy and high throughput. Nucleic
Acids Research 32(5), 1792-97.
IMGT/HLA database - Robinson J, Mistry K, McWilliam H,
Lopez R, Parham P, Marsh SGE (2011) The IMGT/HLA
Database. Nucleic Acids Research 39 Suppl 1:D1171-6
}
|
/man/mlgt-package.Rd
|
no_license
|
cran/mlgt
|
R
| false
| false
| 1,363
|
rd
|
\docType{package}
\name{mlgt-package}
\alias{mlgt-package}
\title{mlgt: Multi-locus geno-typing}
\description{
\tabular{ll}{ Package: \tab mlgt\cr Type: \tab Package\cr
Version: \tab 0.16\cr Date: \tab 2012-03-27\cr Author:
\tab Dave T. Gerrard <david.gerrard@manchester.ac.uk>\cr
License: \tab GPL (>= 2)\cr LazyLoad: \tab yes\cr }
}
\details{
mlgt sorts a batch of sequence by barcode and identity to
templates. It makes use of external applications BLAST
and MUSCLE. Genotypes are called and alleles can be
compared to a reference list of sequences. More
information about each function can be found in its help
documentation.
Some text
The main functions are: \code{\link{prepareMlgtRun}},
\code{\link{mlgt}}, \code{\link{callGenotypes}},
\code{\link{createKnownAlleleList}},
...
}
\references{
BLAST - Altschul, S. F., W. Gish, W. Miller, E. W. Myers,
and D. J. Lipman (1990). Basic local alignment search
tool. Journal of molecular biology 215 (3), 403-410.
MUSCLE - Robert C. Edgar (2004) MUSCLE: multiple sequence
alignment with high accuracy and high throughput. Nucleic
Acids Research 32(5), 1792-97.
IMGT/HLA database - Robinson J, Mistry K, McWilliam H,
Lopez R, Parham P, Marsh SGE (2011) The IMGT/HLA
Database. Nucleic Acids Research 39 Suppl 1:D1171-6
}
|
# loading library
library(pROC)
library(e1071)
NaiveBayesClassification <- function(X_train,y,X_test=data.frame(),cv=5,seed=123,metric="auc")
{
# defining evaluation metric
score <- function(a,b,metric)
{
switch(metric,
auc = auc(a,b),
mae = sum(abs(a-b))/length(a),
rmse = sqrt(sum((a-b)^2)/length(a)),
rmspe = sqrt(sum(((a-b)/a)^2)/length(a)),
logloss = -(sum(log(1-b[a==0])) + sum(log(b[a==1])))/length(a),
precision = length(a[a==b])/length(a))
}
cat("Preparing Data\n")
X_train$order <- seq(1, nrow(X_train))
X_train$result <- as.numeric(y)
set.seed(seed)
X_train$randomCV <- floor(runif(nrow(X_train), 1, (cv+1)))
# cross validation
cat(cv, "-fold Cross Validation\n", sep = "")
for (i in 1:cv)
{
X_build <- subset(X_train, randomCV != i, select = -c(order, randomCV))
X_val <- subset(X_train, randomCV == i)
# building model
model_nb <- naiveBayes(result ~., data=X_build)
# predicting on validation data
pred_nb <- predict(model_nb, X_val, type="raw")[,2]
X_val <- cbind(X_val, pred_nb)
# predicting on test data
if (nrow(X_test) > 0)
{
pred_nb <- predict(model_nb, X_test, type = "raw")[,2]
}
cat("CV Fold-", i, " ", metric, ": ", score(X_val$result, X_val$pred_nb, metric), "\n", sep = "")
# initializing outputs
if (i == 1)
{
output <- X_val
if (nrow(X_test) > 0)
{
X_test <- cbind(X_test, pred_nb)
}
}
# appending to outputs
if (i > 1)
{
output <- rbind(output, X_val)
if (nrow(X_test) > 0)
{
X_test$pred_nb <- (X_test$pred_nb * (i-1) + pred_nb)/i
}
}
gc()
}
# final evaluation score
output <- output[order(output$order),]
cat("\nnaiveBayes ", cv, "-Fold CV ", metric, ": ", score(output$result, output$pred_nb, metric), "\n", sep = "")
output <- subset(output, select = c("order", "pred_nb"))
# returning CV predictions and test data with predictions
return(list(output, X_test))
}
|
/NaiveBayes.R
|
permissive
|
vasanthgx/Models_CV
|
R
| false
| false
| 2,136
|
r
|
# loading library
library(pROC)
library(e1071)
NaiveBayesClassification <- function(X_train,y,X_test=data.frame(),cv=5,seed=123,metric="auc")
{
# defining evaluation metric
score <- function(a,b,metric)
{
switch(metric,
auc = auc(a,b),
mae = sum(abs(a-b))/length(a),
rmse = sqrt(sum((a-b)^2)/length(a)),
rmspe = sqrt(sum(((a-b)/a)^2)/length(a)),
logloss = -(sum(log(1-b[a==0])) + sum(log(b[a==1])))/length(a),
precision = length(a[a==b])/length(a))
}
cat("Preparing Data\n")
X_train$order <- seq(1, nrow(X_train))
X_train$result <- as.numeric(y)
set.seed(seed)
X_train$randomCV <- floor(runif(nrow(X_train), 1, (cv+1)))
# cross validation
cat(cv, "-fold Cross Validation\n", sep = "")
for (i in 1:cv)
{
X_build <- subset(X_train, randomCV != i, select = -c(order, randomCV))
X_val <- subset(X_train, randomCV == i)
# building model
model_nb <- naiveBayes(result ~., data=X_build)
# predicting on validation data
pred_nb <- predict(model_nb, X_val, type="raw")[,2]
X_val <- cbind(X_val, pred_nb)
# predicting on test data
if (nrow(X_test) > 0)
{
pred_nb <- predict(model_nb, X_test, type = "raw")[,2]
}
cat("CV Fold-", i, " ", metric, ": ", score(X_val$result, X_val$pred_nb, metric), "\n", sep = "")
# initializing outputs
if (i == 1)
{
output <- X_val
if (nrow(X_test) > 0)
{
X_test <- cbind(X_test, pred_nb)
}
}
# appending to outputs
if (i > 1)
{
output <- rbind(output, X_val)
if (nrow(X_test) > 0)
{
X_test$pred_nb <- (X_test$pred_nb * (i-1) + pred_nb)/i
}
}
gc()
}
# final evaluation score
output <- output[order(output$order),]
cat("\nnaiveBayes ", cv, "-Fold CV ", metric, ": ", score(output$result, output$pred_nb, metric), "\n", sep = "")
output <- subset(output, select = c("order", "pred_nb"))
# returning CV predictions and test data with predictions
return(list(output, X_test))
}
|
library('vtreat')
context("Test Score Stability")
test_that("testStability: Stability of estimates", {
expandTab <- function(tab) {
# expand out into data
d <- c()
for(vLevelI in seq_len(nrow(tab))) {
for(yLevelI in seq_len(ncol(tab))) {
count <- tab[vLevelI,yLevelI]
if(count>0) {
di <- data.frame(x=character(count),
y=logical(count))
di$x <- rownames(tab)[vLevelI]
di$y <- as.logical(colnames(tab)[yLevelI])
d <- rbind(d,di)
}
}
}
d
}
# # table describing data
# tab <- matrix(data=c(1131,583,6538,2969,136,78),
# byrow=TRUE,ncol=2)
# rownames(tab) <- c('1','2','unknown')
# colnames(tab) <- c(FALSE,TRUE)
# #print(tab)
# d <- expandTab(tab)
# #print(table(d)) # should match tab
# tP <- vtreat::designTreatmentsC(d,'x','y',TRUE,rareSig=1,verbose=FALSE)
# print(tp$scoreFrame) # why did "unknown" not show up?
tab <- matrix(
data = c(
202,89,913,419,498,214,8,0,3,0,
1260,651,70,31,24,4,225,107,1900,
921,1810,853,10,1,778,282,104,58
),
byrow = TRUE,ncol = 2
)
rownames(tab) <-
c(
'Beige', 'Blau', 'Braun', 'Gelb', 'Gold', 'Grau', 'Grün', 'Orange',
'Rot', 'Schwarz', 'Silber', 'Violett', 'Weiß', 'unknown'
)
colnames(tab) <- c(FALSE,TRUE)
d <- expandTab(tab)
d$x[d$x!='Weiß'] <- 'unknown'
nRun <- 5
set.seed(235235)
# vtreat run: max arount 0.5 min ~ 5e-5
csig <- numeric(nRun)
for(i in seq_len(nRun)) {
tP <- vtreat::designTreatmentsC(d,'x','y',TRUE,rareSig=1,verbose=FALSE)
# looking at instability in csig of WeiB level
csig[[i]] <- tP$scoreFrame$csig[tP$scoreFrame$varName=='x_lev_x.Weiß']
}
expect_true((max(csig)-min(csig))<1.0e-5)
# # direct run same instability max ~ 0.5, min ~ 0.007
# dsig <- numeric(nRun)
# for(i in seq_len(nRun)) {
# dsub <- d[sample(nrow(d),2859),]
# model <- stats::glm(stats::as.formula('y~x=="Weiß"'),
# data=dsub,
# family=stats::binomial(link='logit'))
# if(model$converged) {
# delta_deviance = model$null.deviance - model$deviance
# delta_df = model$df.null - model$df.residual
# sig <- 1.0
# pRsq <- 1.0 - model$deviance/model$null.deviance
# if(pRsq>0) {
# dsig[[i]] <- stats::pchisq(delta_deviance, delta_df, lower.tail=FALSE)
# }
# }
# }
})
|
/vtreat/tests/testthat/testStability.R
|
no_license
|
ingted/R-Examples
|
R
| false
| false
| 2,465
|
r
|
library('vtreat')
context("Test Score Stability")
test_that("testStability: Stability of estimates", {
expandTab <- function(tab) {
# expand out into data
d <- c()
for(vLevelI in seq_len(nrow(tab))) {
for(yLevelI in seq_len(ncol(tab))) {
count <- tab[vLevelI,yLevelI]
if(count>0) {
di <- data.frame(x=character(count),
y=logical(count))
di$x <- rownames(tab)[vLevelI]
di$y <- as.logical(colnames(tab)[yLevelI])
d <- rbind(d,di)
}
}
}
d
}
# # table describing data
# tab <- matrix(data=c(1131,583,6538,2969,136,78),
# byrow=TRUE,ncol=2)
# rownames(tab) <- c('1','2','unknown')
# colnames(tab) <- c(FALSE,TRUE)
# #print(tab)
# d <- expandTab(tab)
# #print(table(d)) # should match tab
# tP <- vtreat::designTreatmentsC(d,'x','y',TRUE,rareSig=1,verbose=FALSE)
# print(tp$scoreFrame) # why did "unknown" not show up?
tab <- matrix(
data = c(
202,89,913,419,498,214,8,0,3,0,
1260,651,70,31,24,4,225,107,1900,
921,1810,853,10,1,778,282,104,58
),
byrow = TRUE,ncol = 2
)
rownames(tab) <-
c(
'Beige', 'Blau', 'Braun', 'Gelb', 'Gold', 'Grau', 'Grün', 'Orange',
'Rot', 'Schwarz', 'Silber', 'Violett', 'Weiß', 'unknown'
)
colnames(tab) <- c(FALSE,TRUE)
d <- expandTab(tab)
d$x[d$x!='Weiß'] <- 'unknown'
nRun <- 5
set.seed(235235)
# vtreat run: max arount 0.5 min ~ 5e-5
csig <- numeric(nRun)
for(i in seq_len(nRun)) {
tP <- vtreat::designTreatmentsC(d,'x','y',TRUE,rareSig=1,verbose=FALSE)
# looking at instability in csig of WeiB level
csig[[i]] <- tP$scoreFrame$csig[tP$scoreFrame$varName=='x_lev_x.Weiß']
}
expect_true((max(csig)-min(csig))<1.0e-5)
# # direct run same instability max ~ 0.5, min ~ 0.007
# dsig <- numeric(nRun)
# for(i in seq_len(nRun)) {
# dsub <- d[sample(nrow(d),2859),]
# model <- stats::glm(stats::as.formula('y~x=="Weiß"'),
# data=dsub,
# family=stats::binomial(link='logit'))
# if(model$converged) {
# delta_deviance = model$null.deviance - model$deviance
# delta_df = model$df.null - model$df.residual
# sig <- 1.0
# pRsq <- 1.0 - model$deviance/model$null.deviance
# if(pRsq>0) {
# dsig[[i]] <- stats::pchisq(delta_deviance, delta_df, lower.tail=FALSE)
# }
# }
# }
})
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/reverse.R
\name{reverse}
\alias{reverse}
\title{Reverse}
\usage{
reverse(x)
}
\arguments{
\item{x}{}
}
\value{
a vector
}
\description{
Reverse returns a copy of a vector whose elements are in
the reverse order. The end.
}
\details{
See also \code{\link{rev}}
}
\examples{
reverse(1:10)
}
|
/man/reverse.Rd
|
no_license
|
garrettgman/centering
|
R
| false
| false
| 376
|
rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/reverse.R
\name{reverse}
\alias{reverse}
\title{Reverse}
\usage{
reverse(x)
}
\arguments{
\item{x}{}
}
\value{
a vector
}
\description{
Reverse returns a copy of a vector whose elements are in
the reverse order. The end.
}
\details{
See also \code{\link{rev}}
}
\examples{
reverse(1:10)
}
|
## Load required packages https://rstudio-pubs-static.s3.amazonaws.com/55939_3a149c3034c4469ca938b3d9ce964546.html
library(dplyr)
library(data.table)
library(tidyr)
filesPath <- C:/Project/UCI HAR Dataset/UCI HAR Dataset"
setwd(filesPath)
if(!file.exists("./data")) {dir.create("./data")}
fileUrl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(fileUrl,destfile="./data/Dataset.zip",method="curl")
## Unzip DataSet to /data directory
unzip(zipfile="./data/Dataset.zip",exdir="./data")
## Read data from the files into the variables
dataSubjectTrain <- tbl_df(read.table(file.path(filesPath, "subject_train.txt")))
dataSubjectTest <- tbl_df(read.table(file.path(filesPath, "subject_test.txt" )))
dataActivityTrain <- tbl_df(read.table(file.path(filesPath, "Y_train.txt")))
dataActivityTest <- tbl_df(read.table(file.path(filesPath, "Y_test.txt" )))
dataTrain <- tbl_df(read.table(file.path(filesPath, "X_train.txt" )))
dataTest <- tbl_df(read.table(file.path(filesPath, "X_test.txt" )))
## 1_Merges the training and the test sets to create one data set
# for both Activity and Subject files this
In both Activity and Subject files, it will be merged the training and the test sets by row binding and the variables "subject" and "activityNum" will be renamed.
alldataSubject <- rbind(dataSubjectTrain, dataSubjectTest)
setnames(alldataSubject, "V1", "subject")
alldataActivity<- rbind(dataActivityTrain, dataActivityTest)
setnames(alldataActivity, "V1", "activityNum")
#DATA training and test files combination
dataTable <- rbind(dataTrain, dataTest)
# name variables according to feature
dataFeatures <- tbl_df(read.table(file.path(filesPath, "features.txt")))
setnames(dataFeatures, names(dataFeatures), c("featureNum", "featureName"))
colnames(dataTable) <- dataFeatures$featureName
#column names for activity labels
activityLabels<- tbl_df(read.table(file.path(filesPath, "activity_labels.txt")))
setnames(activityLabels, names(activityLabels), c("activityNum","activityName"))
# Merge columns
alldataSubjAct<- cbind(alldataSubject, alldataActivity)
dataTable <- cbind(alldataSubjAct, dataTable)
## 2_Extracts only the measurements on the mean and standard deviation for each measurement
# Reading "features.txt"
dataFeaturesMeanStd <- grep("mean\\(\\)|std\\(\\)",dataFeatures$featureName,value=TRUE)
# Taking only measurements for the mean and standard deviation and add "subject","activityNum"
dataFeaturesMeanStd <- union(c("subject","activityNum"), dataFeaturesMeanStd)
dataTable<- subset(dataTable,select=dataFeaturesMeanStd)
## 3_Uses descriptive activity names to name the activities in the data set
# put name of activity into dataTable
dataTable <- merge(activityLabels, dataTable , by="activityNum", all.x=TRUE)
dataTable$activityName <- as.character(dataTable$activityName)
# create dataTable with variable means sorted by subject and Activity
dataTable$activityName <- as.character(dataTable$activityName)
dataAggr<- aggregate(. ~ subject - activityName, data = dataTable, mean)
dataTable<- tbl_df(arrange(dataAggr,subject,activityName))
## 4_Appropriately labels the data set with descriptive variable names
names(dataTable)<-gsub("std()", "SD", names(dataTable))
names(dataTable)<-gsub("mean()", "MEAN", names(dataTable))
names(dataTable)<-gsub("^t", "time", names(dataTable))
names(dataTable)<-gsub("^f", "frequency", names(dataTable))
names(dataTable)<-gsub("Acc", "Accelerometer", names(dataTable))
names(dataTable)<-gsub("Gyro", "Gyroscope", names(dataTable))
names(dataTable)<-gsub("Mag", "Magnitude", names(dataTable))
names(dataTable)<-gsub("BodyBody", "Body", names(dataTable))
head(str(dataTable),6)
## 5_From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject.
write.table(dataTable, "TidyData.txt", row.name=FALSE)
|
/run_analysis.R
|
no_license
|
Marta9/Getting-and-Cleaning-Data-Course-Project
|
R
| false
| false
| 3,928
|
r
|
## Load required packages https://rstudio-pubs-static.s3.amazonaws.com/55939_3a149c3034c4469ca938b3d9ce964546.html
library(dplyr)
library(data.table)
library(tidyr)
filesPath <- C:/Project/UCI HAR Dataset/UCI HAR Dataset"
setwd(filesPath)
if(!file.exists("./data")) {dir.create("./data")}
fileUrl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(fileUrl,destfile="./data/Dataset.zip",method="curl")
## Unzip DataSet to /data directory
unzip(zipfile="./data/Dataset.zip",exdir="./data")
## Read data from the files into the variables
dataSubjectTrain <- tbl_df(read.table(file.path(filesPath, "subject_train.txt")))
dataSubjectTest <- tbl_df(read.table(file.path(filesPath, "subject_test.txt" )))
dataActivityTrain <- tbl_df(read.table(file.path(filesPath, "Y_train.txt")))
dataActivityTest <- tbl_df(read.table(file.path(filesPath, "Y_test.txt" )))
dataTrain <- tbl_df(read.table(file.path(filesPath, "X_train.txt" )))
dataTest <- tbl_df(read.table(file.path(filesPath, "X_test.txt" )))
## 1_Merges the training and the test sets to create one data set
# for both Activity and Subject files this
In both Activity and Subject files, it will be merged the training and the test sets by row binding and the variables "subject" and "activityNum" will be renamed.
alldataSubject <- rbind(dataSubjectTrain, dataSubjectTest)
setnames(alldataSubject, "V1", "subject")
alldataActivity<- rbind(dataActivityTrain, dataActivityTest)
setnames(alldataActivity, "V1", "activityNum")
#DATA training and test files combination
dataTable <- rbind(dataTrain, dataTest)
# name variables according to feature
dataFeatures <- tbl_df(read.table(file.path(filesPath, "features.txt")))
setnames(dataFeatures, names(dataFeatures), c("featureNum", "featureName"))
colnames(dataTable) <- dataFeatures$featureName
#column names for activity labels
activityLabels<- tbl_df(read.table(file.path(filesPath, "activity_labels.txt")))
setnames(activityLabels, names(activityLabels), c("activityNum","activityName"))
# Merge columns
alldataSubjAct<- cbind(alldataSubject, alldataActivity)
dataTable <- cbind(alldataSubjAct, dataTable)
## 2_Extracts only the measurements on the mean and standard deviation for each measurement
# Reading "features.txt"
dataFeaturesMeanStd <- grep("mean\\(\\)|std\\(\\)",dataFeatures$featureName,value=TRUE)
# Taking only measurements for the mean and standard deviation and add "subject","activityNum"
dataFeaturesMeanStd <- union(c("subject","activityNum"), dataFeaturesMeanStd)
dataTable<- subset(dataTable,select=dataFeaturesMeanStd)
## 3_Uses descriptive activity names to name the activities in the data set
# put name of activity into dataTable
dataTable <- merge(activityLabels, dataTable , by="activityNum", all.x=TRUE)
dataTable$activityName <- as.character(dataTable$activityName)
# create dataTable with variable means sorted by subject and Activity
dataTable$activityName <- as.character(dataTable$activityName)
dataAggr<- aggregate(. ~ subject - activityName, data = dataTable, mean)
dataTable<- tbl_df(arrange(dataAggr,subject,activityName))
## 4_Appropriately labels the data set with descriptive variable names
names(dataTable)<-gsub("std()", "SD", names(dataTable))
names(dataTable)<-gsub("mean()", "MEAN", names(dataTable))
names(dataTable)<-gsub("^t", "time", names(dataTable))
names(dataTable)<-gsub("^f", "frequency", names(dataTable))
names(dataTable)<-gsub("Acc", "Accelerometer", names(dataTable))
names(dataTable)<-gsub("Gyro", "Gyroscope", names(dataTable))
names(dataTable)<-gsub("Mag", "Magnitude", names(dataTable))
names(dataTable)<-gsub("BodyBody", "Body", names(dataTable))
head(str(dataTable),6)
## 5_From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject.
write.table(dataTable, "TidyData.txt", row.name=FALSE)
|
#Library install and setup
# install.packages("ggplot2")
# install.packages("ggbeeswarm")
library(ggplot2)
library(ggbeeswarm)
library("RODBC")
library("beeswarm")
#GLOBAL VARIABLES#
filesource = FALSE
dummydata = TRUE
dbhandle <- odbcDriverConnect('driver={SQL Server};server=10.134.13.36;database=WICMASTER;uid=saVoc;pwd=saVoc45')
#Create datasets from DB with list of consult id's, consult and consult response details.
if (filesource == FALSE){
write.csv(ds_users <- sqlQuery(dbhandle, "SELECT * FROM [WICMASTER].[dbo].[Users] WHERE ID > 0 AND Login NOT LIKE '%Android%' AND login NOT LIKE 'iOS%' AND Active = 'Y'"), 'dataset/ds_users.csv')
write.csv(ds_consult_ids <- sqlQuery(dbhandle, "SELECT [ID] FROM [WICMASTER].[dbo].[TextConversations] WHERE Subject IN ('! Urgent Clinical Consultation !', '* Clinical Consultation *')"), 'dataset/ds_consult_ids.csv')
# ds_consult_ids <- paste(ds_consult_ids$ID, collapse=",")
write.csv(ds_consult_details <- sqlQuery(dbhandle, paste("SELECT * FROM [WICMASTER].[dbo].[TextMessages] WHERE ConversationID IN (",paste(ds_consult_ids$ID, collapse=","),") AND SeqNo =0", sep="")), 'dataset/ds_consult_details.csv')
write.csv(ds_consult_response_details <- sqlQuery(dbhandle, paste("SELECT * FROM [WICMASTER].[dbo].[TextMessages] WHERE ConversationID IN (",paste(ds_consult_ids$ID, collapse=","),") AND SeqNo =1", sep="")), 'dataset/ds_consult_response_details.csv')
odbcCloseAll()
} else{
ds_users <- read.csv('dataset/ds_users.csv', fileEncoding="UTF-8-BOM")
ds_consult_ids <- read.csv('dataset/ds_consult_ids.csv', fileEncoding="UTF-8-BOM")
ds_consult_details <- read.csv('dataset/ds_consult_details.csv', fileEncoding="UTF-8-BOM")
ds_consult_response_details <- read.csv('dataset/ds_consult_response_details.csv', fileEncoding="UTF-8-BOM")
}
#create message response data set
ds_response_times <- data.frame(conversationID= numeric(0),
severity= numeric(0),
consult_requestor_id= numeric(0),
consult_requestor_title= character(0),
consult_responder_id = numeric(0),
consult_responder_title= character(0),
request_datetime = character(0),
response_datetime = character(0),
response_time = character(0),
stringsAsFactors=FALSE)
#Cycle through the ds_consult_details data set, and populate the ds_response_times with consult and response details
for (i in 1:nrow(ds_consult_details)){
if (ds_consult_details$ConversationID[i] %in% ds_consult_response_details$ConversationID)
ds_response_times[nrow(ds_response_times)+1,] <- c(ds_consult_details$ConversationID[i], ds_consult_details$Severity[i], ds_consult_details$CreatorUserID[i], toString(ds_users$Title[match(ds_consult_details$CreatorUserID[i], ds_users$ID)]), ds_consult_response_details$CreatorUserID[i], toString(ds_users$Title[match(ds_consult_response_details$CreatorUserID[i], ds_users$ID)]), as.Date(ds_consult_details$InTime[i], "%Y-%M-%D %H:%M:%S"), as.Date(ds_consult_response_details$InTime[i], "%Y-%M-%D %H:%M:%S"), difftime(ds_consult_response_details$InTime[i],ds_consult_details$InTime[i], units="min"))
}
ds_response_times$severity[ds_response_times$severity ==0] <-"Regular Consult"
ds_response_times$severity[ds_response_times$severity ==2] <-"Urgent Consult"
ds_response_times$response_time <- round(as.numeric(ds_response_times$response_time), digits = 1)
write.csv(ds_response_times, 'dataset/ds_response_times.csv')
#
if (dummydata == TRUE){
ds_response_times <- read.csv("dataset/ds_dummy_response_times.csv")
}
#Plot the results using ggplot using BeeSwarm
# ggplot(ds_response_times, aes(severity, response_time)) + geom_beeswarm(dodge.width=0, show.legend = TRUE)
#Plot the resuklts using the Beeswarm box plot
beeswarm(response_time ~ severity, data= ds_response_times,
method = 'swarm',
pch=16, pwcol = as.numeric(consult_responder_id),
xlim = c(0, 4), ylim = NULL,
xlab = '', ylab = 'Consult Response Time (Mins)')
legend("bottomright", legend = unique(ds_response_times$consult_responder_title),
title = 'Consult Responders', pch = 16, col=unique(as.numeric(ds_response_times$consult_responder_id)))
boxplot(response_time ~ consult_responder_title, data= ds_response_times, add = T, names = c("",""), col="#0000ff22",
main = "Vocera Messaging Consult Response Times", range =0)
#Plot results with plotly
# install.packages("plotly")
library(plotly)
p <- plot_ly(ds_response_times, y = ~response_time, x=~consult_responder_title, color = ~severity, type = "box", split = ~severity)
p
p <- plot_ly(ds_response_times, y = ~response_time, x=~consult_responder_title, color = ~consult_responder_title, type = "box")
p
|
/Vocera Message Response Rate-NON-PROD-TEST.R
|
no_license
|
mo-g/vocera-messaging
|
R
| false
| false
| 4,920
|
r
|
#Library install and setup
# install.packages("ggplot2")
# install.packages("ggbeeswarm")
library(ggplot2)
library(ggbeeswarm)
library("RODBC")
library("beeswarm")
#GLOBAL VARIABLES#
filesource = FALSE
dummydata = TRUE
dbhandle <- odbcDriverConnect('driver={SQL Server};server=10.134.13.36;database=WICMASTER;uid=saVoc;pwd=saVoc45')
#Create datasets from DB with list of consult id's, consult and consult response details.
if (filesource == FALSE){
write.csv(ds_users <- sqlQuery(dbhandle, "SELECT * FROM [WICMASTER].[dbo].[Users] WHERE ID > 0 AND Login NOT LIKE '%Android%' AND login NOT LIKE 'iOS%' AND Active = 'Y'"), 'dataset/ds_users.csv')
write.csv(ds_consult_ids <- sqlQuery(dbhandle, "SELECT [ID] FROM [WICMASTER].[dbo].[TextConversations] WHERE Subject IN ('! Urgent Clinical Consultation !', '* Clinical Consultation *')"), 'dataset/ds_consult_ids.csv')
# ds_consult_ids <- paste(ds_consult_ids$ID, collapse=",")
write.csv(ds_consult_details <- sqlQuery(dbhandle, paste("SELECT * FROM [WICMASTER].[dbo].[TextMessages] WHERE ConversationID IN (",paste(ds_consult_ids$ID, collapse=","),") AND SeqNo =0", sep="")), 'dataset/ds_consult_details.csv')
write.csv(ds_consult_response_details <- sqlQuery(dbhandle, paste("SELECT * FROM [WICMASTER].[dbo].[TextMessages] WHERE ConversationID IN (",paste(ds_consult_ids$ID, collapse=","),") AND SeqNo =1", sep="")), 'dataset/ds_consult_response_details.csv')
odbcCloseAll()
} else{
ds_users <- read.csv('dataset/ds_users.csv', fileEncoding="UTF-8-BOM")
ds_consult_ids <- read.csv('dataset/ds_consult_ids.csv', fileEncoding="UTF-8-BOM")
ds_consult_details <- read.csv('dataset/ds_consult_details.csv', fileEncoding="UTF-8-BOM")
ds_consult_response_details <- read.csv('dataset/ds_consult_response_details.csv', fileEncoding="UTF-8-BOM")
}
#create message response data set
ds_response_times <- data.frame(conversationID= numeric(0),
severity= numeric(0),
consult_requestor_id= numeric(0),
consult_requestor_title= character(0),
consult_responder_id = numeric(0),
consult_responder_title= character(0),
request_datetime = character(0),
response_datetime = character(0),
response_time = character(0),
stringsAsFactors=FALSE)
#Cycle through the ds_consult_details data set, and populate the ds_response_times with consult and response details
for (i in 1:nrow(ds_consult_details)){
if (ds_consult_details$ConversationID[i] %in% ds_consult_response_details$ConversationID)
ds_response_times[nrow(ds_response_times)+1,] <- c(ds_consult_details$ConversationID[i], ds_consult_details$Severity[i], ds_consult_details$CreatorUserID[i], toString(ds_users$Title[match(ds_consult_details$CreatorUserID[i], ds_users$ID)]), ds_consult_response_details$CreatorUserID[i], toString(ds_users$Title[match(ds_consult_response_details$CreatorUserID[i], ds_users$ID)]), as.Date(ds_consult_details$InTime[i], "%Y-%M-%D %H:%M:%S"), as.Date(ds_consult_response_details$InTime[i], "%Y-%M-%D %H:%M:%S"), difftime(ds_consult_response_details$InTime[i],ds_consult_details$InTime[i], units="min"))
}
ds_response_times$severity[ds_response_times$severity ==0] <-"Regular Consult"
ds_response_times$severity[ds_response_times$severity ==2] <-"Urgent Consult"
ds_response_times$response_time <- round(as.numeric(ds_response_times$response_time), digits = 1)
write.csv(ds_response_times, 'dataset/ds_response_times.csv')
#
if (dummydata == TRUE){
ds_response_times <- read.csv("dataset/ds_dummy_response_times.csv")
}
#Plot the results using ggplot using BeeSwarm
# ggplot(ds_response_times, aes(severity, response_time)) + geom_beeswarm(dodge.width=0, show.legend = TRUE)
#Plot the resuklts using the Beeswarm box plot
beeswarm(response_time ~ severity, data= ds_response_times,
method = 'swarm',
pch=16, pwcol = as.numeric(consult_responder_id),
xlim = c(0, 4), ylim = NULL,
xlab = '', ylab = 'Consult Response Time (Mins)')
legend("bottomright", legend = unique(ds_response_times$consult_responder_title),
title = 'Consult Responders', pch = 16, col=unique(as.numeric(ds_response_times$consult_responder_id)))
boxplot(response_time ~ consult_responder_title, data= ds_response_times, add = T, names = c("",""), col="#0000ff22",
main = "Vocera Messaging Consult Response Times", range =0)
#Plot results with plotly
# install.packages("plotly")
library(plotly)
p <- plot_ly(ds_response_times, y = ~response_time, x=~consult_responder_title, color = ~severity, type = "box", split = ~severity)
p
p <- plot_ly(ds_response_times, y = ~response_time, x=~consult_responder_title, color = ~consult_responder_title, type = "box")
p
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ParseDW.R
\name{ParseDW}
\alias{ParseDW}
\title{ParseDW}
\usage{
ParseDW(report.data)
}
\arguments{
\item{report.data}{jsonlite formatted data frame of report data returned from the API}
}
\value{
Formatted data frame
}
\description{
Internal Function - Parses a ranked report returned from the API
}
\seealso{
Other internal: \code{\link{GetEndpoint}},
\code{\link{GetUsageLog}}, \code{\link{ParseFallout}},
\code{\link{ParseOvertime}}, \code{\link{ParsePathing}},
\code{\link{ParseRanked}}, \code{\link{ParseSummary}},
\code{\link{ParseTrended}}
}
\keyword{internal}
|
/man/ParseDW.Rd
|
no_license
|
framingeinstein/RSiteCatalyst
|
R
| false
| true
| 656
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ParseDW.R
\name{ParseDW}
\alias{ParseDW}
\title{ParseDW}
\usage{
ParseDW(report.data)
}
\arguments{
\item{report.data}{jsonlite formatted data frame of report data returned from the API}
}
\value{
Formatted data frame
}
\description{
Internal Function - Parses a ranked report returned from the API
}
\seealso{
Other internal: \code{\link{GetEndpoint}},
\code{\link{GetUsageLog}}, \code{\link{ParseFallout}},
\code{\link{ParseOvertime}}, \code{\link{ParsePathing}},
\code{\link{ParseRanked}}, \code{\link{ParseSummary}},
\code{\link{ParseTrended}}
}
\keyword{internal}
|
\name{inferHyperparam}
\alias{inferHyperparam}
\title{
Function to infer the hyperparameters for Bayesian inference from an a priori matrix or a data set
}
\description{
Since the Bayesian inference approach implemented in the package is based on conjugate priors, hyperparameters must be provided to model the prior probability distribution of the chain parameters. The hyperparameters are inferred from a given a priori matrix under the assumption that the matrix provided corresponds to the mean (expected) values of the chain parameters. A scaling factor vector must be provided too. Alternatively, the hyperparameters can be inferred from a data set.
}
\usage{
inferHyperparam(transMatr = matrix(), scale = numeric(), data = character())
}
\arguments{
\item{transMatr}{
A valid transition matrix, with dimension names.
}
\item{scale}{
A vector of scaling factors, each element corresponds to the row names of the provided transition matrix transMatr, in the same order.
}
\item{data}{
A data set from which the hyperparameters are inferred.
}
}
\details{
transMatr and scale need not be provided if data is provided.
}
\value{
Returns the hyperparameter matrix in a list.
}
\note{
The hyperparameter matrix returned is such that the row and column names are sorted alphanumerically, and the elements in the matrix are correspondingly permuted.
}
\references{
Yalamanchi SB, Spedicato GA (2015). Bayesian Inference of First Order Markov Chains. R
package version 0.2.5
}
\author{
Sai Bhargav Yalamanchi, Giorgio Spedicato
}
\seealso{
\code{\link{markovchainFit}}, \code{\link{predictiveDistribution}}
}
\examples{
data(rain, package = "markovchain")
inferHyperparam(data = rain$rain)
weatherStates <- c("sunny", "cloudy", "rain")
weatherMatrix <- matrix(data = c(0.7, 0.2, 0.1,
0.3, 0.4, 0.3,
0.2, 0.4, 0.4),
byrow = TRUE, nrow = 3,
dimnames = list(weatherStates, weatherStates))
inferHyperparam(transMatr = weatherMatrix, scale = c(10, 10, 10))
}
|
/man/inferHyperparam.Rd
|
no_license
|
cryptomanic/markovchain
|
R
| false
| false
| 2,087
|
rd
|
\name{inferHyperparam}
\alias{inferHyperparam}
\title{
Function to infer the hyperparameters for Bayesian inference from an a priori matrix or a data set
}
\description{
Since the Bayesian inference approach implemented in the package is based on conjugate priors, hyperparameters must be provided to model the prior probability distribution of the chain parameters. The hyperparameters are inferred from a given a priori matrix under the assumption that the matrix provided corresponds to the mean (expected) values of the chain parameters. A scaling factor vector must be provided too. Alternatively, the hyperparameters can be inferred from a data set.
}
\usage{
inferHyperparam(transMatr = matrix(), scale = numeric(), data = character())
}
\arguments{
\item{transMatr}{
A valid transition matrix, with dimension names.
}
\item{scale}{
A vector of scaling factors, each element corresponds to the row names of the provided transition matrix transMatr, in the same order.
}
\item{data}{
A data set from which the hyperparameters are inferred.
}
}
\details{
transMatr and scale need not be provided if data is provided.
}
\value{
Returns the hyperparameter matrix in a list.
}
\note{
The hyperparameter matrix returned is such that the row and column names are sorted alphanumerically, and the elements in the matrix are correspondingly permuted.
}
\references{
Yalamanchi SB, Spedicato GA (2015). Bayesian Inference of First Order Markov Chains. R
package version 0.2.5
}
\author{
Sai Bhargav Yalamanchi, Giorgio Spedicato
}
\seealso{
\code{\link{markovchainFit}}, \code{\link{predictiveDistribution}}
}
\examples{
data(rain, package = "markovchain")
inferHyperparam(data = rain$rain)
weatherStates <- c("sunny", "cloudy", "rain")
weatherMatrix <- matrix(data = c(0.7, 0.2, 0.1,
0.3, 0.4, 0.3,
0.2, 0.4, 0.4),
byrow = TRUE, nrow = 3,
dimnames = list(weatherStates, weatherStates))
inferHyperparam(transMatr = weatherMatrix, scale = c(10, 10, 10))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{castle.htseq.fpkm}
\alias{castle.htseq.fpkm}
\title{Gene expression for Castle et al. (ArrayExpress E-MTAB-305) mapped to Ensembl 76 (GRCh38)
and quantified with HTSeq-count and normalised with the FPKM method.
See for more details chapters 1 and 2 of Barzine, M.P PhD thesis:
Investigating Normal Human Gene Expression in Tissues
with High-throughput Transcriptomic and Proteomic data.}
\format{A data frame with 43921 transcritps (ie rows/observations) for 11 tissues (ie columns/variables)
\describe{
\item{Adipose}{numeric, FPKM}
\item{Colon}{numeric, FPKM}
\item{Heart}{numeric, FPKM}
\item{Hypothalamus}{numeric, FPKM}
\item{Kidney}{numeric, FPKM}
\item{Liver}{numeric, FPKM}
\item{Lung}{numeric, FPKM}
\item{Ovary}{numeric, FPKM}
\item{Skeletal muscle}{numeric, FPKM}
\item{Spleen}{numeric, FPKM}
\item{Testis}{numeric, FPKM}
}}
\usage{
castle.htseq.fpkm
}
\description{
Gene expression for Castle et al. (ArrayExpress E-MTAB-305) mapped to Ensembl 76 (GRCh38)
and quantified with HTSeq-count and normalised with the FPKM method.
See for more details chapters 1 and 2 of Barzine, M.P PhD thesis:
Investigating Normal Human Gene Expression in Tissues
with High-throughput Transcriptomic and Proteomic data.
}
\keyword{datasets}
|
/man/castle.htseq.fpkm.Rd
|
permissive
|
barzine/barzinePhdData
|
R
| false
| true
| 1,340
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{castle.htseq.fpkm}
\alias{castle.htseq.fpkm}
\title{Gene expression for Castle et al. (ArrayExpress E-MTAB-305) mapped to Ensembl 76 (GRCh38)
and quantified with HTSeq-count and normalised with the FPKM method.
See for more details chapters 1 and 2 of Barzine, M.P PhD thesis:
Investigating Normal Human Gene Expression in Tissues
with High-throughput Transcriptomic and Proteomic data.}
\format{A data frame with 43921 transcritps (ie rows/observations) for 11 tissues (ie columns/variables)
\describe{
\item{Adipose}{numeric, FPKM}
\item{Colon}{numeric, FPKM}
\item{Heart}{numeric, FPKM}
\item{Hypothalamus}{numeric, FPKM}
\item{Kidney}{numeric, FPKM}
\item{Liver}{numeric, FPKM}
\item{Lung}{numeric, FPKM}
\item{Ovary}{numeric, FPKM}
\item{Skeletal muscle}{numeric, FPKM}
\item{Spleen}{numeric, FPKM}
\item{Testis}{numeric, FPKM}
}}
\usage{
castle.htseq.fpkm
}
\description{
Gene expression for Castle et al. (ArrayExpress E-MTAB-305) mapped to Ensembl 76 (GRCh38)
and quantified with HTSeq-count and normalised with the FPKM method.
See for more details chapters 1 and 2 of Barzine, M.P PhD thesis:
Investigating Normal Human Gene Expression in Tissues
with High-throughput Transcriptomic and Proteomic data.
}
\keyword{datasets}
|
library(dplyr)
library(lubridate)
#Read in data and extract pertinent dates. Also convert dates from text to date format
mData = tbl_df(read.csv("household_power_consumption.txt", sep=';', na.strings="?", stringsAsFactors=F, comment.char="", quote='\"'))
mData = filter(mData, Date == "1/2/2007" | Date == "2/2/2007")
nData = select(mutate(mData, DateTime = dmy_hms(paste(Date, Time))), -(Date:Time))
#plot
mPlot = hist(nData$Global_active_power, main="Global Active Power", ylab="Frequency", xlab="Global Active Power (kilowatts)", col="Red")
#save
dev.copy(png, file="plot1.png", height=480, width=480)
dev.off()
|
/plot1.R
|
no_license
|
zornosaur/ExData_Plotting1
|
R
| false
| false
| 619
|
r
|
library(dplyr)
library(lubridate)
#Read in data and extract pertinent dates. Also convert dates from text to date format
mData = tbl_df(read.csv("household_power_consumption.txt", sep=';', na.strings="?", stringsAsFactors=F, comment.char="", quote='\"'))
mData = filter(mData, Date == "1/2/2007" | Date == "2/2/2007")
nData = select(mutate(mData, DateTime = dmy_hms(paste(Date, Time))), -(Date:Time))
#plot
mPlot = hist(nData$Global_active_power, main="Global Active Power", ylab="Frequency", xlab="Global Active Power (kilowatts)", col="Red")
#save
dev.copy(png, file="plot1.png", height=480, width=480)
dev.off()
|
\name{price.ani}
\alias{price.ani}
\title{Demonstrate stock prices in animations}
\usage{
price.ani(price, time, time.begin = min(time), span = 15 * 60, ..., xlab = "price",
ylab = "frequency", xlim, ylim, main)
}
\arguments{
\item{price}{stock prices}
\item{time}{time corresponding to prices}
\item{time.begin}{the time for the animation to begin (default to be the
minimum \code{time})}
\item{span}{time span (unit in seconds; default to be 15 minutes)}
\item{\dots}{other arguments passed to \code{\link{plot}}}
\item{xlab,ylab,xlim,ylim,main}{they are passed to \code{\link{plot}} with
reasonable default values}
}
\value{
invisible \code{NULL}
}
\description{
This function can display the frequencies of stock prices in a certain time
span with the span changing.
}
\examples{
## see more examples in ?vanke1127
saveHTML({
price.ani(vanke1127$price, vanke1127$time, lwd = 2)
}, img.name = "vanke1127", htmlfile = "vanke1127.html", title = "Stock prices of Vanke",
description = c("Barplots", "of the stock prices of Vanke Co. Ltd", "on 2009/11/27"))
}
\author{
Yihui Xie
}
|
/man/price.ani.Rd
|
no_license
|
chmue/animation
|
R
| false
| false
| 1,099
|
rd
|
\name{price.ani}
\alias{price.ani}
\title{Demonstrate stock prices in animations}
\usage{
price.ani(price, time, time.begin = min(time), span = 15 * 60, ..., xlab = "price",
ylab = "frequency", xlim, ylim, main)
}
\arguments{
\item{price}{stock prices}
\item{time}{time corresponding to prices}
\item{time.begin}{the time for the animation to begin (default to be the
minimum \code{time})}
\item{span}{time span (unit in seconds; default to be 15 minutes)}
\item{\dots}{other arguments passed to \code{\link{plot}}}
\item{xlab,ylab,xlim,ylim,main}{they are passed to \code{\link{plot}} with
reasonable default values}
}
\value{
invisible \code{NULL}
}
\description{
This function can display the frequencies of stock prices in a certain time
span with the span changing.
}
\examples{
## see more examples in ?vanke1127
saveHTML({
price.ani(vanke1127$price, vanke1127$time, lwd = 2)
}, img.name = "vanke1127", htmlfile = "vanke1127.html", title = "Stock prices of Vanke",
description = c("Barplots", "of the stock prices of Vanke Co. Ltd", "on 2009/11/27"))
}
\author{
Yihui Xie
}
|
testlist <- list(type = 0L, z = 9.83578205428059e-312)
result <- do.call(esreg::G1_fun,testlist)
str(result)
|
/esreg/inst/testfiles/G1_fun/libFuzzer_G1_fun/G1_fun_valgrind_files/1609894068-test.R
|
no_license
|
akhikolla/updated-only-Issues
|
R
| false
| false
| 108
|
r
|
testlist <- list(type = 0L, z = 9.83578205428059e-312)
result <- do.call(esreg::G1_fun,testlist)
str(result)
|
library(CatDyn)
### Name: M.Hoenig
### Title: Estimate natural mortality rate from longevity data
### Aliases: M.Hoenig
### Keywords: ~models
### ** Examples
max.age <- 5.8
time.step <- "day"
M.Hoenig(max.age,time.step)
|
/data/genthat_extracted_code/CatDyn/examples/m.hoenig_1.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 227
|
r
|
library(CatDyn)
### Name: M.Hoenig
### Title: Estimate natural mortality rate from longevity data
### Aliases: M.Hoenig
### Keywords: ~models
### ** Examples
max.age <- 5.8
time.step <- "day"
M.Hoenig(max.age,time.step)
|
# Exercise 1: creating and accessing lists
# Create a vector `my_breakfast` of everything you ate for breakfast
my_breakfast <- c("congee")
# Create a vector `my_lunch` of everything you ate (or will eat) for lunch
my_lunch <- c()
# Create a list `meals` that has contains your breakfast and lunch
meals <- list(breakfast = my_breakfast, lunch = my_lunch)
# Add a "dinner" element to your `meals` list that has what you plan to eat
# for dinner
meals[["dinner"]] = c("shirmp", "celery", "rice")
# Use dollar notation to extract your `dinner` element from your list
# and save it in a vector called 'dinner'
dinner <- meals$dinner
# Use double-bracket notation to extract your `lunch` element from your list
# and save it in your list as the element at index 5 (no reason beyond practice)
meals[[5]] = meals[["lunch"]]
# Use single-bracket notation to extract your breakfast and lunch from your list
# and save them to a list called `early_meals`
early_meals <- meals[c("breakfast", "lunch")]
### Challenge ###
# Create a list that has the number of items you ate for each meal
# Hint: use the `lappy()` function to apply the `length()` function to each item
# Write a function `add_pizza` that adds pizza to a given meal vector, and
# returns the pizza-fied vector
# Create a vector `better_meals` that is all your meals, but with pizza!
|
/chapter-08-exercises/exercise-1/exercise.R
|
permissive
|
keyyyyyx/book-exercises
|
R
| false
| false
| 1,353
|
r
|
# Exercise 1: creating and accessing lists
# Create a vector `my_breakfast` of everything you ate for breakfast
my_breakfast <- c("congee")
# Create a vector `my_lunch` of everything you ate (or will eat) for lunch
my_lunch <- c()
# Create a list `meals` that has contains your breakfast and lunch
meals <- list(breakfast = my_breakfast, lunch = my_lunch)
# Add a "dinner" element to your `meals` list that has what you plan to eat
# for dinner
meals[["dinner"]] = c("shirmp", "celery", "rice")
# Use dollar notation to extract your `dinner` element from your list
# and save it in a vector called 'dinner'
dinner <- meals$dinner
# Use double-bracket notation to extract your `lunch` element from your list
# and save it in your list as the element at index 5 (no reason beyond practice)
meals[[5]] = meals[["lunch"]]
# Use single-bracket notation to extract your breakfast and lunch from your list
# and save them to a list called `early_meals`
early_meals <- meals[c("breakfast", "lunch")]
### Challenge ###
# Create a list that has the number of items you ate for each meal
# Hint: use the `lappy()` function to apply the `length()` function to each item
# Write a function `add_pizza` that adds pizza to a given meal vector, and
# returns the pizza-fied vector
# Create a vector `better_meals` that is all your meals, but with pizza!
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cloudsearchdomain_operations.R
\name{cloudsearchdomain_upload_documents}
\alias{cloudsearchdomain_upload_documents}
\title{Posts a batch of documents to a search domain for indexing}
\usage{
cloudsearchdomain_upload_documents(documents, contentType)
}
\arguments{
\item{documents}{[required] A batch of documents formatted in JSON or HTML.}
\item{contentType}{[required] The format of the batch you are uploading. Amazon CloudSearch supports
two document batch formats:
\itemize{
\item application/json
\item application/xml
}}
}
\description{
Posts a batch of documents to a search domain for indexing. A document
batch is a collection of add and delete operations that represent the
documents you want to add, update, or delete from your domain. Batches
can be described in either JSON or XML. Each item that you want Amazon
CloudSearch to return as a search result (such as a product) is
represented as a document. Every document has a unique ID and one or
more fields that contain the data that you want to search and return in
results. Individual documents cannot contain more than 1 MB of data. The
entire batch cannot exceed 5 MB. To get the best possible upload
performance, group add and delete operations in batches that are close
the 5 MB limit. Submitting a large volume of single-document batches can
overload a domain's document service.
}
\details{
The endpoint for submitting \code{UploadDocuments} requests is
domain-specific. To get the document endpoint for your domain, use the
Amazon CloudSearch configuration service \code{DescribeDomains} action. A
domain's endpoints are also displayed on the domain dashboard in the
Amazon CloudSearch console.
For more information about formatting your data for Amazon CloudSearch,
see \href{http://docs.aws.amazon.com/cloudsearch/latest/developerguide/preparing-data.html}{Preparing Your Data}
in the \emph{Amazon CloudSearch Developer Guide}. For more information about
uploading data for indexing, see \href{http://docs.aws.amazon.com/cloudsearch/latest/developerguide/uploading-data.html}{Uploading Data}
in the \emph{Amazon CloudSearch Developer Guide}.
}
\section{Request syntax}{
\preformatted{svc$upload_documents(
documents = raw,
contentType = "application/json"|"application/xml"
)
}
}
\keyword{internal}
|
/cran/paws.analytics/man/cloudsearchdomain_upload_documents.Rd
|
permissive
|
peoplecure/paws
|
R
| false
| true
| 2,360
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cloudsearchdomain_operations.R
\name{cloudsearchdomain_upload_documents}
\alias{cloudsearchdomain_upload_documents}
\title{Posts a batch of documents to a search domain for indexing}
\usage{
cloudsearchdomain_upload_documents(documents, contentType)
}
\arguments{
\item{documents}{[required] A batch of documents formatted in JSON or HTML.}
\item{contentType}{[required] The format of the batch you are uploading. Amazon CloudSearch supports
two document batch formats:
\itemize{
\item application/json
\item application/xml
}}
}
\description{
Posts a batch of documents to a search domain for indexing. A document
batch is a collection of add and delete operations that represent the
documents you want to add, update, or delete from your domain. Batches
can be described in either JSON or XML. Each item that you want Amazon
CloudSearch to return as a search result (such as a product) is
represented as a document. Every document has a unique ID and one or
more fields that contain the data that you want to search and return in
results. Individual documents cannot contain more than 1 MB of data. The
entire batch cannot exceed 5 MB. To get the best possible upload
performance, group add and delete operations in batches that are close
the 5 MB limit. Submitting a large volume of single-document batches can
overload a domain's document service.
}
\details{
The endpoint for submitting \code{UploadDocuments} requests is
domain-specific. To get the document endpoint for your domain, use the
Amazon CloudSearch configuration service \code{DescribeDomains} action. A
domain's endpoints are also displayed on the domain dashboard in the
Amazon CloudSearch console.
For more information about formatting your data for Amazon CloudSearch,
see \href{http://docs.aws.amazon.com/cloudsearch/latest/developerguide/preparing-data.html}{Preparing Your Data}
in the \emph{Amazon CloudSearch Developer Guide}. For more information about
uploading data for indexing, see \href{http://docs.aws.amazon.com/cloudsearch/latest/developerguide/uploading-data.html}{Uploading Data}
in the \emph{Amazon CloudSearch Developer Guide}.
}
\section{Request syntax}{
\preformatted{svc$upload_documents(
documents = raw,
contentType = "application/json"|"application/xml"
)
}
}
\keyword{internal}
|
#### TESTS FOR strr helpers ####################################################
### Setup ######################################################################
library(dplyr)
context("strr_helpers tests")
data <-
dplyr::tibble(
a = c(rep("A", 200), rep("B", 50), rep("C", 50), rep("D", 50), rep("E", 25),
rep("F", 25), rep("G", 10), rep("H", 10), rep("I", 8), rep("J", 5),
rep("K", 79), rep("L", 39), rep("M", 10), rep("N", 99), rep("O", 39),
rep("P", 211), rep("Q", 2), rep("R", 4), rep("S", 16), rep("T", 29),
rep("U", 11), rep("V", 2), rep("W", 17), rep("X", 3), rep("Y", 5),
rep("Z", 1)),
b = 1:1000
)
data_list <-
data %>%
dplyr::group_split(a)
data_sf <-
dplyr::tibble(
a = c(rep("A", 200), rep("B", 50), rep("C", 50), rep("D", 50), rep("E", 25),
rep("F", 25), rep("G", 10), rep("H", 10), rep("I", 8), rep("J", 5),
rep("K", 79), rep("L", 39), rep("M", 10), rep("N", 99), rep("O", 39),
rep("P", 211), rep("Q", 2), rep("R", 4), rep("S", 16), rep("T", 29),
rep("U", 11), rep("V", 2), rep("W", 17), rep("X", 3), rep("Y", 5),
rep("Z", 1)),
b = 1:1000,
lon = 1:1000,
lat = 1001:2000
)
data_list_sf <-
data_sf %>%
dplyr::group_split(a) %>%
lapply(sf::st_as_sf, coords = c("lon", "lat"))
### Tests ######################################################################
test_that("helper_table_split produces the right number of elements", {
expect_equal(length(helper_table_split(data_list)), 10)
expect_equal(length(helper_table_split(data_list, 6)), 6)
})
test_that("helper_table_split works with sf tables", {
expect_s3_class(helper_table_split(data_list_sf)[[1]], "sf")
})
# test_that("helper_table_split correctly exits its while-loop", {
# # Initial multiplier is ok
# map(1:1000, ~{tibble(id = .x, value = 1)}) %>%
# helper_table_split() %>%
# length() %>%
# expect_equal(16)
# # Initial multiplier is too high
# map(1:25, ~{tibble(id = .x, value = 1)}) %>%
# helper_table_split(10) %>%
# length() %>%
# expect_equal(24)
# # expect_equal(nrow(strr_compress(multi)), 7)
# # All multipliers are too high
# })
|
/tests/testthat/test-strr_helpers.R
|
no_license
|
UPGo-McGill/strr
|
R
| false
| false
| 2,214
|
r
|
#### TESTS FOR strr helpers ####################################################
### Setup ######################################################################
library(dplyr)
context("strr_helpers tests")
data <-
dplyr::tibble(
a = c(rep("A", 200), rep("B", 50), rep("C", 50), rep("D", 50), rep("E", 25),
rep("F", 25), rep("G", 10), rep("H", 10), rep("I", 8), rep("J", 5),
rep("K", 79), rep("L", 39), rep("M", 10), rep("N", 99), rep("O", 39),
rep("P", 211), rep("Q", 2), rep("R", 4), rep("S", 16), rep("T", 29),
rep("U", 11), rep("V", 2), rep("W", 17), rep("X", 3), rep("Y", 5),
rep("Z", 1)),
b = 1:1000
)
data_list <-
data %>%
dplyr::group_split(a)
data_sf <-
dplyr::tibble(
a = c(rep("A", 200), rep("B", 50), rep("C", 50), rep("D", 50), rep("E", 25),
rep("F", 25), rep("G", 10), rep("H", 10), rep("I", 8), rep("J", 5),
rep("K", 79), rep("L", 39), rep("M", 10), rep("N", 99), rep("O", 39),
rep("P", 211), rep("Q", 2), rep("R", 4), rep("S", 16), rep("T", 29),
rep("U", 11), rep("V", 2), rep("W", 17), rep("X", 3), rep("Y", 5),
rep("Z", 1)),
b = 1:1000,
lon = 1:1000,
lat = 1001:2000
)
data_list_sf <-
data_sf %>%
dplyr::group_split(a) %>%
lapply(sf::st_as_sf, coords = c("lon", "lat"))
### Tests ######################################################################
test_that("helper_table_split produces the right number of elements", {
expect_equal(length(helper_table_split(data_list)), 10)
expect_equal(length(helper_table_split(data_list, 6)), 6)
})
test_that("helper_table_split works with sf tables", {
expect_s3_class(helper_table_split(data_list_sf)[[1]], "sf")
})
# test_that("helper_table_split correctly exits its while-loop", {
# # Initial multiplier is ok
# map(1:1000, ~{tibble(id = .x, value = 1)}) %>%
# helper_table_split() %>%
# length() %>%
# expect_equal(16)
# # Initial multiplier is too high
# map(1:25, ~{tibble(id = .x, value = 1)}) %>%
# helper_table_split(10) %>%
# length() %>%
# expect_equal(24)
# # expect_equal(nrow(strr_compress(multi)), 7)
# # All multipliers are too high
# })
|
#清除R記憶體資料
rm(list=ls())
gc()
library(randomForest)
ind=sample(2,nrow(iris),replace = T,prob=c(0.8,0.2))
train_data=iris[ind==1,]
test_data=iris[ind==2,]
rf_model=randomForest(Species~.,data=train_data,ntree=100)
pred_test=predict(rf_model,test_data)
confusion_matrix=table(pred_test,test_data$Species)
confusion_matrix
correct_rate=sum(diag(confusion_matrix))/sum(confusion_matrix)
correct_rate
|
/random forest.R
|
no_license
|
JackPeng1st/Supervised-learning-Iris-data
|
R
| false
| false
| 432
|
r
|
#清除R記憶體資料
rm(list=ls())
gc()
library(randomForest)
ind=sample(2,nrow(iris),replace = T,prob=c(0.8,0.2))
train_data=iris[ind==1,]
test_data=iris[ind==2,]
rf_model=randomForest(Species~.,data=train_data,ntree=100)
pred_test=predict(rf_model,test_data)
confusion_matrix=table(pred_test,test_data$Species)
confusion_matrix
correct_rate=sum(diag(confusion_matrix))/sum(confusion_matrix)
correct_rate
|
testlist <- list(A = structure(c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(4L, 7L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result)
|
/multivariance/inst/testfiles/match_rows/AFL_match_rows/match_rows_valgrind_files/1613112745-test.R
|
no_license
|
akhikolla/updatedatatype-list3
|
R
| false
| false
| 245
|
r
|
testlist <- list(A = structure(c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(4L, 7L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result)
|
## ---- include = FALSE---------------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
## ----setup--------------------------------------------------------------------
library(xtreg2way)
## -----------------------------------------------------------------------------
numgroups <- 1000
T <- 200
## -----------------------------------------------------------------------------
observations <- numgroups * T
e <- 1:observations
## Create groups and weights
hhid <- floor((e - 1) / T + 1)
tid <- e - (hhid - 1) * T
w <- pracma::rand(n = numgroups, m = 1)
w <- w[hhid]
## -----------------------------------------------------------------------------
#Randomly create effects for groups
heffect <- pracma::randn(n = numgroups, m = 1)
teffect <- pracma::randn(n = T, m = 1)
#Generate independent variables
x1 <- pracma::randn(n = observations, m = 1) +
0.5 * heffect[hhid] + 0.25 * teffect[tid]
x2 <- pracma::randn(n = observations, m = 1) -
0.25 * heffect[hhid] + 0.5 * teffect[tid]
## -----------------------------------------------------------------------------
#Generate Random Error
autoc <- pracma::rand(n = numgroups, m = 1)
initialv <- pracma::randn(n = numgroups, m = 1)
u <- pracma::randn(n = observations, m = 1)
for (o in 1:observations) {
if (tid[o] > 1){
u_1 <- u[o-1]
} else {
u_1 <- initialv[hhid[o]]
}
u[o] <- autoc[hhid[o]] * u_1 + u[o]
}
# Generate dependent variable
y <- 1 + x1 - x2 + heffect[hhid] + teffect[tid] + u
## -----------------------------------------------------------------------------
#XTREG2WAY
output <- xtreg2way(y, data.frame(x1,x2), hhid, tid, w, noise="1")
## -----------------------------------------------------------------------------
#XTREG2WAY second time
output2 <- xtreg2way(y, x1, struc=output$struc)
|
/inst/doc/Two-Way-Fixed-Effect-Model.R
|
no_license
|
cran/xtreg2way
|
R
| false
| false
| 1,831
|
r
|
## ---- include = FALSE---------------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
## ----setup--------------------------------------------------------------------
library(xtreg2way)
## -----------------------------------------------------------------------------
numgroups <- 1000
T <- 200
## -----------------------------------------------------------------------------
observations <- numgroups * T
e <- 1:observations
## Create groups and weights
hhid <- floor((e - 1) / T + 1)
tid <- e - (hhid - 1) * T
w <- pracma::rand(n = numgroups, m = 1)
w <- w[hhid]
## -----------------------------------------------------------------------------
#Randomly create effects for groups
heffect <- pracma::randn(n = numgroups, m = 1)
teffect <- pracma::randn(n = T, m = 1)
#Generate independent variables
x1 <- pracma::randn(n = observations, m = 1) +
0.5 * heffect[hhid] + 0.25 * teffect[tid]
x2 <- pracma::randn(n = observations, m = 1) -
0.25 * heffect[hhid] + 0.5 * teffect[tid]
## -----------------------------------------------------------------------------
#Generate Random Error
autoc <- pracma::rand(n = numgroups, m = 1)
initialv <- pracma::randn(n = numgroups, m = 1)
u <- pracma::randn(n = observations, m = 1)
for (o in 1:observations) {
if (tid[o] > 1){
u_1 <- u[o-1]
} else {
u_1 <- initialv[hhid[o]]
}
u[o] <- autoc[hhid[o]] * u_1 + u[o]
}
# Generate dependent variable
y <- 1 + x1 - x2 + heffect[hhid] + teffect[tid] + u
## -----------------------------------------------------------------------------
#XTREG2WAY
output <- xtreg2way(y, data.frame(x1,x2), hhid, tid, w, noise="1")
## -----------------------------------------------------------------------------
#XTREG2WAY second time
output2 <- xtreg2way(y, x1, struc=output$struc)
|
library(glmnet)
mydata = read.table("./TrainingSet/LassoBIC/central_nervous_system.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.01,family="gaussian",standardize=FALSE)
sink('./Model/EN/Lasso/central_nervous_system/central_nervous_system_006.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
/Model/EN/Lasso/central_nervous_system/central_nervous_system_006.R
|
no_license
|
leon1003/QSMART
|
R
| false
| false
| 400
|
r
|
library(glmnet)
mydata = read.table("./TrainingSet/LassoBIC/central_nervous_system.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.01,family="gaussian",standardize=FALSE)
sink('./Model/EN/Lasso/central_nervous_system/central_nervous_system_006.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
library(Delta)
### Name: GetMx
### Title: Get matrix of the problem (Mx) function
### Aliases: GetMx
### Keywords: M1 Mx
### ** Examples
GetMx(matrix(c(1,2,0,3,4,0,0,0,1),3,3))
|
/data/genthat_extracted_code/Delta/examples/GetMx.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 184
|
r
|
library(Delta)
### Name: GetMx
### Title: Get matrix of the problem (Mx) function
### Aliases: GetMx
### Keywords: M1 Mx
### ** Examples
GetMx(matrix(c(1,2,0,3,4,0,0,0,1),3,3))
|
#Demonstrates the data structures in R
#Demonstrates the behaviour of data frames
#To Construct a data frame use the data.frame() function
df<-data.frame("SN"=1:2,"Age"=c(21,15),"Name"=c("Dave","John"))
df
#To read data frame from a csv file from the current working directory.
#na.strings="" is passed so that empty values are read as "" empty strings.
titanic.data<-read.csv("Titanic.csv",header=TRUE,na.strings = "")
titanic.data
#To see the Structure of the data frame.
#we see PassengerId,Survived,Pclass,Name,Sex,Age,Parch,Ticket,Fare,Cabin and Embarked columns
str(titanic.data)
View(titanic.data)
#To get the data from any specific column we can use $ symbol appended to dataframe followed by column name
titanic.data$Name
#To get a subset of rows and columns use : operator to define the rows and columns
#The below statement displays rows 10-14 and columns 3-5
titanic.data[10:14,3:5]
#To get the a subset of non consecutive rows and columns define vectors for rows and a vector for columns
titanic.data[c(5:8,12,14,16),c(3:5,7,9,10)]
#To get the data based on conditional result.
titanic.data[titanic.data$Pclass==1,2:6]
#This can also be done using the subset()
subset(titanic.data[,2:6,titanic.data$Pclass==1])
titanic.data[titanic.data$Sex=="female",1:6]
#This can also be done using the subset()
subset(titanic.data[,1:6],titanic.data$Sex=="female")
#Number of rows can be calculated using nrows
numberOfRows<-nrow(titanic.data)
numberOfRows
#To calculate the number of NA values in the dataset
#We can use is.na() along with sum() function to calculate the number of NA values
#is.na() returns true if the value is NA and false if it is not
sum(is.na(titanic.data))
#Dropping columns
#Column can be dropped by accessing the column using $ and assigning NULL
titanic.data$Pclass<-NULL
str(titanic.data)
#Dropping rows
#Rows can be dropped by passing the number of rows to be dropped as vector preceded by -ve sign
titanic.data<-titanic.data[-c(1:5,7,9,12,18)]
#To get the new number of rows.
numberOfRows<-nrow(titanic.data)
#In Titanic data, a lot of missing values are observed in Cabin and Age columns
#A missingness plot can be created to see the extent of data missing
#library Amelia is required to plot this graph
#The Amelia package is installed using install.packages("Amelia")
library("Amelia")
#To map the missing data
missmap(titanic.data,col=c("black","grey"))
#As there are many NA values in Cabin, this column can be dropped.
#Since the PassengerID is nothing but a unique identifier for the records, it can also be dropped.
#Since, Name, Fare, Embarked, and ticket data does not impact survival, we can drop them as well.
#We will use select() from dplyr package to select only the required columns.
library(dplyr)
data.frame = select(titanic.data, Survived, Pclass, Age, Sex, SibSp, Parch)
# The data.frame now contains only the selected columns.
data.frame
#Dropping the rows with NA values in Age Column
data.frame<-na.omit(data.frame)
data.frame
#After cleansing all the data, check the structure of the data.frame
str(data.frame)
#From the structure of the data frame we can see that the Survived and Pclass are represented as integers
#However, Pclass is an ordinal categorical variable and Survived is a nominal Categorical variable
#Categorical variables can take on one of a limited and usually fixed number of variables.
#These integer variables need to be converted to factors.
#For converting nominal categorical variables use factor method
data.frame$Survived<-factor(data.frame$Survived)
#For converting ordinal categorical variables to factors, pass order=TRUE and pass the level arguments in decending order.
data.frame$Pclass<-factor(data.frame$Pclass,order=TRUE,levels=c(3,2,1))
#Test the changes in structure
str(data.frame)
#VISUALIZING THE DATA
#Correlation Plot
library(GGally)
ggcorr(data.frame,
nbreaks = 6,
label = TRUE,
label_size = 3,
color = "grey50")
#Survived Count
library(ggplot2)
#aes() is used to specify x and y axes values
#+ operator is used to add more details
#geom_bar() is used to specify bar chart, width is the bar width, fill is color of bars.
# geom_text() is to set the text in the plot.
#theme_classic() is a built in theme
ggplot(data.frame, aes(x = Survived)) +
geom_bar(width=0.2, fill = "green") +
geom_text(stat='count', aes(label=stat(count)), vjust=-0.5) +
theme_classic()
#Survived count by sex.
#fill=Sex; Fill is provided with Sex attribute which must be a factor to fill in the data from Sex.
#To make the bars appear side by side position=position_dodge() is used.
#More females survived compared to males
ggplot(data.frame, aes(x = Survived, fill=Sex)) +
geom_bar(position = position_dodge()) +
geom_text(stat='count',
aes(label=stat(count)),
position = position_dodge(width=1), vjust=-0.5)+
theme_classic()
#Survival by class.
#The ggplot() parameters are same
#There are more survivors in Class 1 than from other classes
ggplot(data.frame, aes(x = Survived, fill=Pclass)) +
geom_bar(position = position_dodge()) +
geom_text(stat='count',
aes(label=stat(count)),
position = position_dodge(width=1),
vjust=-0.5)+
theme_classic()
#Age density plot
#density plots can be created using geom_desnsity().
ggplot(data.frame, aes(x = Age)) +
geom_density(fill='coral')
#Survival By Age
# Discretize age to plot survival
data.frame$Discretized.age = cut(data.frame$Age, c(0,10,20,30,40,50,60,70,80,100))
# Plot discretized age
ggplot(data.frame, aes(x = Discretized.age, fill=Survived)) +
geom_bar(position = position_dodge()) +
geom_text(stat='count', aes(label=stat(count)), position = position_dodge(width=1), vjust=-0.5)+
theme_classic()
data.frame$Discretized.age = NULL
#Create train and test data.
#To create a train of data, write a function that takes in a fraction to calculate how many records needs to be selected
train_test_split = function(data, fraction = 0.8, train = TRUE) {
total_rows = nrow(data)
train_rows = fraction * total_rows
sample = 1:train_rows
if (train == TRUE) {
return (data[sample, ])
} else {
return (data[-sample, ])
}
}
#Create train and test sets.
train <- train_test_split(data.frame, 0.8, train = TRUE)
train
test <- train_test_split(data.frame, 0.8, train = FALSE)
test
#Decision Tree Model
#The decision tree model is built using the rpart() available in rpart library
#The attributes on the left of ‘~’ specify the target label and attributes on left specify the features used for training.
#‘data’ argument is your training data and method= ‘class’ tells that we are trying to solve a classification problem.
library(rpart)
library(rpart.plot)
fit <- rpart(Survived~., data = train, method = 'class')
rpart.plot(fit, extra = 106)
#Accuracy
#After training the model, we use it to make predictions on the test set using predict() function. We pass the fitted model, the test data and type = ‘class’ for classification. It returns a vector of predictions. The table() function produces a table of the actual labels vs predicted labels, also called confusion matrix.
predicted = predict(fit, test, type = 'class')
table = table(test$Survived, predicted)
#The accuracy is calculated using (TP + TN)/(TP + TN + FP + FN). I got an accuracy of 81.11%
dt_accuracy = sum(diag(table)) / sum(table)
paste("The accuracy is : ", dt_accuracy)
#Fine tune the decision tree
#You can fine tune your decision tree with the control parameter by selecting the minsplit( min number of samples for decision), minbucket( min number of samples at leaf node), maxdepth( max depth of the tree).
control = rpart.control(minsplit = 8,
minbucket = 2,
maxdepth = 6,
cp = 0)
tuned_fit = rpart(Survived~., data = train, method = 'class', control = control)
dt_predict = predict(tuned_fit, test, type = 'class')
table_mat = table(test$Survived, dt_predict)
dt_accuracy_2 = sum(diag(table_mat)) / sum(table_mat)
paste("The accuracy is : ", dt_accuracy_2)
|
/data_structures_data_frames.R
|
no_license
|
kondapallishashi/R
|
R
| false
| false
| 8,121
|
r
|
#Demonstrates the data structures in R
#Demonstrates the behaviour of data frames
#To Construct a data frame use the data.frame() function
df<-data.frame("SN"=1:2,"Age"=c(21,15),"Name"=c("Dave","John"))
df
#To read data frame from a csv file from the current working directory.
#na.strings="" is passed so that empty values are read as "" empty strings.
titanic.data<-read.csv("Titanic.csv",header=TRUE,na.strings = "")
titanic.data
#To see the Structure of the data frame.
#we see PassengerId,Survived,Pclass,Name,Sex,Age,Parch,Ticket,Fare,Cabin and Embarked columns
str(titanic.data)
View(titanic.data)
#To get the data from any specific column we can use $ symbol appended to dataframe followed by column name
titanic.data$Name
#To get a subset of rows and columns use : operator to define the rows and columns
#The below statement displays rows 10-14 and columns 3-5
titanic.data[10:14,3:5]
#To get the a subset of non consecutive rows and columns define vectors for rows and a vector for columns
titanic.data[c(5:8,12,14,16),c(3:5,7,9,10)]
#To get the data based on conditional result.
titanic.data[titanic.data$Pclass==1,2:6]
#This can also be done using the subset()
subset(titanic.data[,2:6,titanic.data$Pclass==1])
titanic.data[titanic.data$Sex=="female",1:6]
#This can also be done using the subset()
subset(titanic.data[,1:6],titanic.data$Sex=="female")
#Number of rows can be calculated using nrows
numberOfRows<-nrow(titanic.data)
numberOfRows
#To calculate the number of NA values in the dataset
#We can use is.na() along with sum() function to calculate the number of NA values
#is.na() returns true if the value is NA and false if it is not
sum(is.na(titanic.data))
#Dropping columns
#Column can be dropped by accessing the column using $ and assigning NULL
titanic.data$Pclass<-NULL
str(titanic.data)
#Dropping rows
#Rows can be dropped by passing the number of rows to be dropped as vector preceded by -ve sign
titanic.data<-titanic.data[-c(1:5,7,9,12,18)]
#To get the new number of rows.
numberOfRows<-nrow(titanic.data)
#In Titanic data, a lot of missing values are observed in Cabin and Age columns
#A missingness plot can be created to see the extent of data missing
#library Amelia is required to plot this graph
#The Amelia package is installed using install.packages("Amelia")
library("Amelia")
#To map the missing data
missmap(titanic.data,col=c("black","grey"))
#As there are many NA values in Cabin, this column can be dropped.
#Since the PassengerID is nothing but a unique identifier for the records, it can also be dropped.
#Since, Name, Fare, Embarked, and ticket data does not impact survival, we can drop them as well.
#We will use select() from dplyr package to select only the required columns.
library(dplyr)
data.frame = select(titanic.data, Survived, Pclass, Age, Sex, SibSp, Parch)
# The data.frame now contains only the selected columns.
data.frame
#Dropping the rows with NA values in Age Column
data.frame<-na.omit(data.frame)
data.frame
#After cleansing all the data, check the structure of the data.frame
str(data.frame)
#From the structure of the data frame we can see that the Survived and Pclass are represented as integers
#However, Pclass is an ordinal categorical variable and Survived is a nominal Categorical variable
#Categorical variables can take on one of a limited and usually fixed number of variables.
#These integer variables need to be converted to factors.
#For converting nominal categorical variables use factor method
data.frame$Survived<-factor(data.frame$Survived)
#For converting ordinal categorical variables to factors, pass order=TRUE and pass the level arguments in decending order.
data.frame$Pclass<-factor(data.frame$Pclass,order=TRUE,levels=c(3,2,1))
#Test the changes in structure
str(data.frame)
#VISUALIZING THE DATA
#Correlation Plot
library(GGally)
ggcorr(data.frame,
nbreaks = 6,
label = TRUE,
label_size = 3,
color = "grey50")
#Survived Count
library(ggplot2)
#aes() is used to specify x and y axes values
#+ operator is used to add more details
#geom_bar() is used to specify bar chart, width is the bar width, fill is color of bars.
# geom_text() is to set the text in the plot.
#theme_classic() is a built in theme
ggplot(data.frame, aes(x = Survived)) +
geom_bar(width=0.2, fill = "green") +
geom_text(stat='count', aes(label=stat(count)), vjust=-0.5) +
theme_classic()
#Survived count by sex.
#fill=Sex; Fill is provided with Sex attribute which must be a factor to fill in the data from Sex.
#To make the bars appear side by side position=position_dodge() is used.
#More females survived compared to males
ggplot(data.frame, aes(x = Survived, fill=Sex)) +
geom_bar(position = position_dodge()) +
geom_text(stat='count',
aes(label=stat(count)),
position = position_dodge(width=1), vjust=-0.5)+
theme_classic()
#Survival by class.
#The ggplot() parameters are same
#There are more survivors in Class 1 than from other classes
ggplot(data.frame, aes(x = Survived, fill=Pclass)) +
geom_bar(position = position_dodge()) +
geom_text(stat='count',
aes(label=stat(count)),
position = position_dodge(width=1),
vjust=-0.5)+
theme_classic()
#Age density plot
#density plots can be created using geom_desnsity().
ggplot(data.frame, aes(x = Age)) +
geom_density(fill='coral')
#Survival By Age
# Discretize age to plot survival
data.frame$Discretized.age = cut(data.frame$Age, c(0,10,20,30,40,50,60,70,80,100))
# Plot discretized age
ggplot(data.frame, aes(x = Discretized.age, fill=Survived)) +
geom_bar(position = position_dodge()) +
geom_text(stat='count', aes(label=stat(count)), position = position_dodge(width=1), vjust=-0.5)+
theme_classic()
data.frame$Discretized.age = NULL
#Create train and test data.
#To create a train of data, write a function that takes in a fraction to calculate how many records needs to be selected
train_test_split = function(data, fraction = 0.8, train = TRUE) {
total_rows = nrow(data)
train_rows = fraction * total_rows
sample = 1:train_rows
if (train == TRUE) {
return (data[sample, ])
} else {
return (data[-sample, ])
}
}
#Create train and test sets.
train <- train_test_split(data.frame, 0.8, train = TRUE)
train
test <- train_test_split(data.frame, 0.8, train = FALSE)
test
#Decision Tree Model
#The decision tree model is built using the rpart() available in rpart library
#The attributes on the left of ‘~’ specify the target label and attributes on left specify the features used for training.
#‘data’ argument is your training data and method= ‘class’ tells that we are trying to solve a classification problem.
library(rpart)
library(rpart.plot)
fit <- rpart(Survived~., data = train, method = 'class')
rpart.plot(fit, extra = 106)
#Accuracy
#After training the model, we use it to make predictions on the test set using predict() function. We pass the fitted model, the test data and type = ‘class’ for classification. It returns a vector of predictions. The table() function produces a table of the actual labels vs predicted labels, also called confusion matrix.
predicted = predict(fit, test, type = 'class')
table = table(test$Survived, predicted)
#The accuracy is calculated using (TP + TN)/(TP + TN + FP + FN). I got an accuracy of 81.11%
dt_accuracy = sum(diag(table)) / sum(table)
paste("The accuracy is : ", dt_accuracy)
#Fine tune the decision tree
#You can fine tune your decision tree with the control parameter by selecting the minsplit( min number of samples for decision), minbucket( min number of samples at leaf node), maxdepth( max depth of the tree).
control = rpart.control(minsplit = 8,
minbucket = 2,
maxdepth = 6,
cp = 0)
tuned_fit = rpart(Survived~., data = train, method = 'class', control = control)
dt_predict = predict(tuned_fit, test, type = 'class')
table_mat = table(test$Survived, dt_predict)
dt_accuracy_2 = sum(diag(table_mat)) / sum(table_mat)
paste("The accuracy is : ", dt_accuracy_2)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/exportToJson.R
\name{exportToJson}
\alias{exportToJson}
\title{exportToJson}
\usage{
exportToJson(connectionDetails, cdmDatabaseSchema, resultsDatabaseSchema,
outputPath = getwd(), reports = allReports,
vocabDatabaseSchema = cdmDatabaseSchema, compressIntoOneFile = FALSE)
}
\arguments{
\item{connectionDetails}{An R object of type ConnectionDetail (details for the function that contains server info, database type, optionally username/password, port)}
\item{cdmDatabaseSchema}{Name of the database schema that contains the OMOP CDM.}
\item{resultsDatabaseSchema}{Name of the database schema that contains the Achilles analysis files. Default is cdmDatabaseSchema}
\item{outputPath}{A folder location to save the JSON files. Default is current working folder}
\item{reports}{A character vector listing the set of reports to generate. Default is all reports.}
\item{vocabDatabaseSchema}{string name of database schema that contains OMOP Vocabulary. Default is cdmDatabaseSchema. On SQL Server, this should specifiy both the database and the schema, so for example 'results.dbo'.}
\item{compressIntoOneFile}{Boolean indicating if the JSON files should be compressed into one zip file
See \code{data(allReports)} for a list of all report types}
}
\value{
none
}
\description{
\code{exportToJson} Exports Achilles statistics into a JSON form for reports.
}
\details{
Creates individual files for each report found in Achilles.Web
}
\examples{
\dontrun{
connectionDetails <- DatabaseConnector::createConnectionDetails(dbms="sql server", server="yourserver")
exportToJson(connectionDetails, cdmDatabaseSchema="cdm4_sim", outputPath="your/output/path")
}
}
|
/man/exportToJson.Rd
|
permissive
|
mustafaascha/Achilles
|
R
| false
| true
| 1,744
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/exportToJson.R
\name{exportToJson}
\alias{exportToJson}
\title{exportToJson}
\usage{
exportToJson(connectionDetails, cdmDatabaseSchema, resultsDatabaseSchema,
outputPath = getwd(), reports = allReports,
vocabDatabaseSchema = cdmDatabaseSchema, compressIntoOneFile = FALSE)
}
\arguments{
\item{connectionDetails}{An R object of type ConnectionDetail (details for the function that contains server info, database type, optionally username/password, port)}
\item{cdmDatabaseSchema}{Name of the database schema that contains the OMOP CDM.}
\item{resultsDatabaseSchema}{Name of the database schema that contains the Achilles analysis files. Default is cdmDatabaseSchema}
\item{outputPath}{A folder location to save the JSON files. Default is current working folder}
\item{reports}{A character vector listing the set of reports to generate. Default is all reports.}
\item{vocabDatabaseSchema}{string name of database schema that contains OMOP Vocabulary. Default is cdmDatabaseSchema. On SQL Server, this should specifiy both the database and the schema, so for example 'results.dbo'.}
\item{compressIntoOneFile}{Boolean indicating if the JSON files should be compressed into one zip file
See \code{data(allReports)} for a list of all report types}
}
\value{
none
}
\description{
\code{exportToJson} Exports Achilles statistics into a JSON form for reports.
}
\details{
Creates individual files for each report found in Achilles.Web
}
\examples{
\dontrun{
connectionDetails <- DatabaseConnector::createConnectionDetails(dbms="sql server", server="yourserver")
exportToJson(connectionDetails, cdmDatabaseSchema="cdm4_sim", outputPath="your/output/path")
}
}
|
# This is the user-interface definition of a Shiny web application.
# You can find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com
#
library(shiny)
library(DT)
source("helper.R")
# Define UI for application that draws a histogram
ui <- navbarPage("2017 Philly Primary Vote Explorer", id="nav",
tabPanel("Interactive Map",
div(class="outer",
leafletOutput('voteMap', width="100%", height="100%"),
absolutePanel(id="controls", class = "panel panel-default", fixed = TRUE,
draggable = TRUE, top = 60, left = "auto", right = 20, bottom = "auto",
width = 330, height = "auto",
selectInput("office", label = h4("Office"),
choices = c("District Attorney" = "DA",
"Controller" = "Controller",
"CCP Judge (D)" = "CCP",
"Commonwealth Ct (D)" = "CWCD",
"Commonwealth Ct (R)" = "CWCR"),
selected = "DA"),
selectInput("candidate", label = h4("Candidate"),
choices = names(DApercents[,c(-1,-2,-3)]),
selected = "KRASNER..L")
),
tags$div(id="cite",
'Created by ', tags$a(href="mailto:hollander@gmail.com", "Michael Hollander"), "Available on ", tags$a(href="asdf","GitHub")
)
)
),
tabPanel("Data Explorer",
htmlOutput("DTTitle",container= tags$h2),
dataTableOutput("voteTable"),
tags$div(id="cite",
'Created by ', tags$a(href="mailto:hollander@gmail.com", "Michael Hollander"), "Available on ", tags$a(href="https://github.com/mhollander/2017PhillyDAPrimary","GitHub")
)
),
tags$head( tags$style(HTML("
div.outer {
position: fixed;
top: 41px;
left: 0;
right: 0;
bottom: 0;
overflow: hidden;
padding: 0;
}
#controls {
/* Appearance */
background-color: white;
padding: 0 20px 20px 20px;
cursor: move;
/* Fade out while not hovering */
opacity: 0.65;
zoom: 0.9;
transition: opacity 500ms 1s;
}
#controls:hover {
/* Fade in while hovering */
opacity: 0.95;
transition-delay: 0;
}
/* Position and style citation */
#cite {
position: absolute;
bottom: 10px;
left: 10px;
font-size: 14px;
}
"))
)
)
server <- function(input, output, session) {
session$userData$DA <- "KRASNER..L"
session$userData$CCP <- "KRISTIANSSON..V"
session$userData$Controller <- "RHYNHART..R"
session$userData$CWCD <- "CEISLER..E"
session$userData$CWCR <- "LALLEY..P"
session$userData$Office <- "DA"
output$voteMap <- renderLeaflet({
return(daVoteMap)
})
output$DTTitle <- renderUI({
HTML(paste(input$office,"Primary Results, by Ward and Division"))
})
output$voteTable <- renderDataTable({
outputTable <- switch(input$office,
"DA" = DApercents,
"CCP" = CCPpercents,
"Controller" = Contpercents,
"CWCD" = CWDpercents,
"CWCR" = CWRpercents)
aVoteTable <- DT::datatable(outputTable[,-1],
options=list(
pageLength = 10,
lengthMenu = list(c(10, 30, 60, -1),c("10", "30", "60", 'All')),
order = list(0,'asc'),
searching=TRUE
),
class="stripe",
rownames=FALSE
)
return(aVoteTable)
})
observe({
#if (is.null(session$userData$CCP) || is.na(session$userData$CCP) || session$userData$CCP=="")
# session$userData$CCP <- "KRISTIANSSON..V"
office <- input$office
session$userData$office <- office
cNames <- switch(office,
"DA" = names(DApercents[,c(-1,-2,-3)]),
"CCP" = names(CCPpercents[,c(-1,-2,-3)]),
"Controller" = names(Contpercents[,c(-1,-2,-3)]),
"CWCD" = names(CWDpercents[,c(-1,-2,-3)]),
"CWCR" = names(CWRpercents[,c(-1,-2,-3)]))
cDefault <- switch(office,
"DA" = session$userData$DA,
"CCP" = session$userData$CCP,
"Controller" = session$userData$Controller,
"CWCD" = session$userData$CWCD,
"CWCR" = session$userData$CWCR)
updateSelectInput(session, "candidate",
choices = cNames,
selected = cDefault)
})
observe({
if (session$userData$office == "DA") {
columnData = precincts@data[[input$candidate]]
session$userData$DA <- input$candidate
# otherLayer = ifelse(input$vulnerable=="vulnerableLowStateMargin","vulnerableHighPresMargin","vulnerableLowStateMargin")
leafletProxy("voteMap", data=precincts) %>%
clearShapes() %>%
addPolygons(fillColor = ~pal(columnData),
fillOpacity = 0.8,
color="#BDBDC3",
weight = 1,
popup = votePopup)
}
else if (session$userData$office == "CCP") {
columnData = precinctsCCP@data[[input$candidate]]
session$userData$CCP <- input$candidate
# otherLayer = ifelse(input$vulnerable=="vulnerableLowStateMargin","vulnerableHighPresMargin","vulnerableLowStateMargin")
leafletProxy("voteMap", data=precinctsCCP) %>%
clearShapes() %>%
addPolygons(fillColor = ~ccppal(columnData),
fillOpacity = 0.8,
color="#BDBDC3",
weight = 1,
popup = votePopupCCP)
}
else if (session$userData$office == "Controller") {
columnData = precinctsCont@data[[input$candidate]]
session$userData$Controller <- input$candidate
# otherLayer = ifelse(input$vulnerable=="vulnerableLowStateMargin","vulnerableHighPresMargin","vulnerableLowStateMargin")
leafletProxy("voteMap", data=precinctsCont) %>%
clearShapes() %>%
addPolygons(fillColor = ~pal(columnData),
fillOpacity = 0.8,
color="#BDBDC3",
weight = 1,
popup = votePopupCont)
}
else if (session$userData$office == "CWCD") {
columnData = precinctsCWD@data[[input$candidate]]
session$userData$CWCD <- input$candidate
leafletProxy("voteMap", data=precinctsCWD) %>%
clearShapes() %>%
addPolygons(fillColor = ~cwdpal(columnData),
fillOpacity = 0.8,
color="#BDBDC3",
weight = 1,
popup = votePopupCWD)
}
else if (session$userData$office == "CWCR") {
columnData = precinctsCWR@data[[input$candidate]]
session$userData$CWCR <- input$candidate
leafletProxy("voteMap", data=precinctsCWR) %>%
clearShapes() %>%
addPolygons(fillColor = ~pal(columnData),
fillOpacity = 0.8,
color="#BDBDC3",
weight = 1,
popup = votePopupCWR)
}
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
/app.R
|
no_license
|
mhollander/2017PhillyDAPrimary
|
R
| false
| false
| 8,882
|
r
|
# This is the user-interface definition of a Shiny web application.
# You can find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com
#
library(shiny)
library(DT)
source("helper.R")
# Define UI for application that draws a histogram
ui <- navbarPage("2017 Philly Primary Vote Explorer", id="nav",
tabPanel("Interactive Map",
div(class="outer",
leafletOutput('voteMap', width="100%", height="100%"),
absolutePanel(id="controls", class = "panel panel-default", fixed = TRUE,
draggable = TRUE, top = 60, left = "auto", right = 20, bottom = "auto",
width = 330, height = "auto",
selectInput("office", label = h4("Office"),
choices = c("District Attorney" = "DA",
"Controller" = "Controller",
"CCP Judge (D)" = "CCP",
"Commonwealth Ct (D)" = "CWCD",
"Commonwealth Ct (R)" = "CWCR"),
selected = "DA"),
selectInput("candidate", label = h4("Candidate"),
choices = names(DApercents[,c(-1,-2,-3)]),
selected = "KRASNER..L")
),
tags$div(id="cite",
'Created by ', tags$a(href="mailto:hollander@gmail.com", "Michael Hollander"), "Available on ", tags$a(href="asdf","GitHub")
)
)
),
tabPanel("Data Explorer",
htmlOutput("DTTitle",container= tags$h2),
dataTableOutput("voteTable"),
tags$div(id="cite",
'Created by ', tags$a(href="mailto:hollander@gmail.com", "Michael Hollander"), "Available on ", tags$a(href="https://github.com/mhollander/2017PhillyDAPrimary","GitHub")
)
),
tags$head( tags$style(HTML("
div.outer {
position: fixed;
top: 41px;
left: 0;
right: 0;
bottom: 0;
overflow: hidden;
padding: 0;
}
#controls {
/* Appearance */
background-color: white;
padding: 0 20px 20px 20px;
cursor: move;
/* Fade out while not hovering */
opacity: 0.65;
zoom: 0.9;
transition: opacity 500ms 1s;
}
#controls:hover {
/* Fade in while hovering */
opacity: 0.95;
transition-delay: 0;
}
/* Position and style citation */
#cite {
position: absolute;
bottom: 10px;
left: 10px;
font-size: 14px;
}
"))
)
)
server <- function(input, output, session) {
session$userData$DA <- "KRASNER..L"
session$userData$CCP <- "KRISTIANSSON..V"
session$userData$Controller <- "RHYNHART..R"
session$userData$CWCD <- "CEISLER..E"
session$userData$CWCR <- "LALLEY..P"
session$userData$Office <- "DA"
output$voteMap <- renderLeaflet({
return(daVoteMap)
})
output$DTTitle <- renderUI({
HTML(paste(input$office,"Primary Results, by Ward and Division"))
})
output$voteTable <- renderDataTable({
outputTable <- switch(input$office,
"DA" = DApercents,
"CCP" = CCPpercents,
"Controller" = Contpercents,
"CWCD" = CWDpercents,
"CWCR" = CWRpercents)
aVoteTable <- DT::datatable(outputTable[,-1],
options=list(
pageLength = 10,
lengthMenu = list(c(10, 30, 60, -1),c("10", "30", "60", 'All')),
order = list(0,'asc'),
searching=TRUE
),
class="stripe",
rownames=FALSE
)
return(aVoteTable)
})
observe({
#if (is.null(session$userData$CCP) || is.na(session$userData$CCP) || session$userData$CCP=="")
# session$userData$CCP <- "KRISTIANSSON..V"
office <- input$office
session$userData$office <- office
cNames <- switch(office,
"DA" = names(DApercents[,c(-1,-2,-3)]),
"CCP" = names(CCPpercents[,c(-1,-2,-3)]),
"Controller" = names(Contpercents[,c(-1,-2,-3)]),
"CWCD" = names(CWDpercents[,c(-1,-2,-3)]),
"CWCR" = names(CWRpercents[,c(-1,-2,-3)]))
cDefault <- switch(office,
"DA" = session$userData$DA,
"CCP" = session$userData$CCP,
"Controller" = session$userData$Controller,
"CWCD" = session$userData$CWCD,
"CWCR" = session$userData$CWCR)
updateSelectInput(session, "candidate",
choices = cNames,
selected = cDefault)
})
observe({
if (session$userData$office == "DA") {
columnData = precincts@data[[input$candidate]]
session$userData$DA <- input$candidate
# otherLayer = ifelse(input$vulnerable=="vulnerableLowStateMargin","vulnerableHighPresMargin","vulnerableLowStateMargin")
leafletProxy("voteMap", data=precincts) %>%
clearShapes() %>%
addPolygons(fillColor = ~pal(columnData),
fillOpacity = 0.8,
color="#BDBDC3",
weight = 1,
popup = votePopup)
}
else if (session$userData$office == "CCP") {
columnData = precinctsCCP@data[[input$candidate]]
session$userData$CCP <- input$candidate
# otherLayer = ifelse(input$vulnerable=="vulnerableLowStateMargin","vulnerableHighPresMargin","vulnerableLowStateMargin")
leafletProxy("voteMap", data=precinctsCCP) %>%
clearShapes() %>%
addPolygons(fillColor = ~ccppal(columnData),
fillOpacity = 0.8,
color="#BDBDC3",
weight = 1,
popup = votePopupCCP)
}
else if (session$userData$office == "Controller") {
columnData = precinctsCont@data[[input$candidate]]
session$userData$Controller <- input$candidate
# otherLayer = ifelse(input$vulnerable=="vulnerableLowStateMargin","vulnerableHighPresMargin","vulnerableLowStateMargin")
leafletProxy("voteMap", data=precinctsCont) %>%
clearShapes() %>%
addPolygons(fillColor = ~pal(columnData),
fillOpacity = 0.8,
color="#BDBDC3",
weight = 1,
popup = votePopupCont)
}
else if (session$userData$office == "CWCD") {
columnData = precinctsCWD@data[[input$candidate]]
session$userData$CWCD <- input$candidate
leafletProxy("voteMap", data=precinctsCWD) %>%
clearShapes() %>%
addPolygons(fillColor = ~cwdpal(columnData),
fillOpacity = 0.8,
color="#BDBDC3",
weight = 1,
popup = votePopupCWD)
}
else if (session$userData$office == "CWCR") {
columnData = precinctsCWR@data[[input$candidate]]
session$userData$CWCR <- input$candidate
leafletProxy("voteMap", data=precinctsCWR) %>%
clearShapes() %>%
addPolygons(fillColor = ~pal(columnData),
fillOpacity = 0.8,
color="#BDBDC3",
weight = 1,
popup = votePopupCWR)
}
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
#****************************************************************************************************************************************************2. fisher
sidebarLayout(
sidebarPanel(
h4(tags$b("Step 1. Data Preparation")),
p(tags$b("1. Give 2 names to each category of factor shown as column names")),
tags$textarea(id="cn4", rows=2, "High salt\nLow salt"),
p(tags$b("2. Give 2 names to case-control shown as row names")),
tags$textarea(id="rn4", rows=2, "CVD\nNon CVD"), p(br()),
p(tags$b("3. Input 4 values in row-order")),
p("Data points can be separated by , ; /Enter /Tab"),
tags$textarea(id="x4", rows=4,
"5\n30\n2\n23"),
p("Note: No Missing Value"),
conditionalPanel(
condition = "input.explain_on_off",
p(tags$i("The case-control was CVD patients or not. Factor categories were a high salt diet or not.")),
p(tags$i("Of 35 people who died from CVD, 5 were on a high-salt diet before they die; of 25 people who died from other causes, 2 were on a high-salt diet."))
),
hr(),
h4(tags$b("Step 2. Choose Hypothesis")),
p(tags$b("Null hypothesis")),
p("Case-Control (Row) do not significantly associate with Grouped Factors (Column)"),
radioButtons("yt4", label = "Alternative hypothesis",
choiceNames = list(
HTML("Case-Control (Row) has a significant association with Grouped Factors (Column); odds ratio of Group 1 is significantly different from Group 2"),
HTML("The odds ratio of Group 1 is higher than Group 2"),
HTML("The odds ratio of Group 2 is higher than Group 1")
),
choiceValues = list("two.sided", "greater", "less")
),
conditionalPanel(
condition = "input.explain_on_off",
p(tags$i("In this example, we wanted to determine if there was an association between the cause of death and a high-salt diet."))
)
),
mainPanel(
h4(tags$b("Output 1. Contingency Table")), p(br()),
tabsetPanel(
tabPanel("Table Preview", p(br()),
p(tags$b("2 x 2 Contingency Table with Total Number")),
DT::DTOutput("dt4"),
p(tags$b("Expected Value")),
DT::DTOutput("dt4.0")
),
tabPanel("Percentage Table", p(br()),
p(tags$b("Cell/Total %")),
DT::DTOutput("dt4.3"),
p(tags$b("Cell/Row-Total %")),
DT::DTOutput("dt4.1"),
p(tags$b("Cell/Column-Total %")),
DT::DTOutput("dt4.2")
),
tabPanel("Percentage Plot", p(br()),
p(tags$b("Percentages in the rows")),
plotly::plotlyOutput("makeplot4"),
p(tags$b("Percentages in the columns")),
plotly::plotlyOutput("makeplot4.1")
)
),
hr(),
h4(tags$b("Output 2. Test Results")), p(br()),
DT::DTOutput("c.test4"),
HTML(
"<b> Explanations </b>
<ul>
<li> P Value < 0.05, then Case-Control (Row) is significantly associated with Grouped Factors (Column) (Accept the alternative hypothesis)</li>
<li> P Value >= 0.05, then Case-Control (Row) is not associated with Grouped Factors (Column). (Accept the null hypothesis)</li>
</ul>"
),
conditionalPanel(
condition = "input.explain_on_off",
p(tags$i("In this default setting, two expected values < 5, so we used the Fisher exact test. From the test result, we concluded that no significant association was found between the cause of death and high salt diet" ))
)
)
)
|
/5MFSrctabtest/ui_2_fisher.R
|
permissive
|
mephas/mephas_web
|
R
| false
| false
| 3,432
|
r
|
#****************************************************************************************************************************************************2. fisher
sidebarLayout(
sidebarPanel(
h4(tags$b("Step 1. Data Preparation")),
p(tags$b("1. Give 2 names to each category of factor shown as column names")),
tags$textarea(id="cn4", rows=2, "High salt\nLow salt"),
p(tags$b("2. Give 2 names to case-control shown as row names")),
tags$textarea(id="rn4", rows=2, "CVD\nNon CVD"), p(br()),
p(tags$b("3. Input 4 values in row-order")),
p("Data points can be separated by , ; /Enter /Tab"),
tags$textarea(id="x4", rows=4,
"5\n30\n2\n23"),
p("Note: No Missing Value"),
conditionalPanel(
condition = "input.explain_on_off",
p(tags$i("The case-control was CVD patients or not. Factor categories were a high salt diet or not.")),
p(tags$i("Of 35 people who died from CVD, 5 were on a high-salt diet before they die; of 25 people who died from other causes, 2 were on a high-salt diet."))
),
hr(),
h4(tags$b("Step 2. Choose Hypothesis")),
p(tags$b("Null hypothesis")),
p("Case-Control (Row) do not significantly associate with Grouped Factors (Column)"),
radioButtons("yt4", label = "Alternative hypothesis",
choiceNames = list(
HTML("Case-Control (Row) has a significant association with Grouped Factors (Column); odds ratio of Group 1 is significantly different from Group 2"),
HTML("The odds ratio of Group 1 is higher than Group 2"),
HTML("The odds ratio of Group 2 is higher than Group 1")
),
choiceValues = list("two.sided", "greater", "less")
),
conditionalPanel(
condition = "input.explain_on_off",
p(tags$i("In this example, we wanted to determine if there was an association between the cause of death and a high-salt diet."))
)
),
mainPanel(
h4(tags$b("Output 1. Contingency Table")), p(br()),
tabsetPanel(
tabPanel("Table Preview", p(br()),
p(tags$b("2 x 2 Contingency Table with Total Number")),
DT::DTOutput("dt4"),
p(tags$b("Expected Value")),
DT::DTOutput("dt4.0")
),
tabPanel("Percentage Table", p(br()),
p(tags$b("Cell/Total %")),
DT::DTOutput("dt4.3"),
p(tags$b("Cell/Row-Total %")),
DT::DTOutput("dt4.1"),
p(tags$b("Cell/Column-Total %")),
DT::DTOutput("dt4.2")
),
tabPanel("Percentage Plot", p(br()),
p(tags$b("Percentages in the rows")),
plotly::plotlyOutput("makeplot4"),
p(tags$b("Percentages in the columns")),
plotly::plotlyOutput("makeplot4.1")
)
),
hr(),
h4(tags$b("Output 2. Test Results")), p(br()),
DT::DTOutput("c.test4"),
HTML(
"<b> Explanations </b>
<ul>
<li> P Value < 0.05, then Case-Control (Row) is significantly associated with Grouped Factors (Column) (Accept the alternative hypothesis)</li>
<li> P Value >= 0.05, then Case-Control (Row) is not associated with Grouped Factors (Column). (Accept the null hypothesis)</li>
</ul>"
),
conditionalPanel(
condition = "input.explain_on_off",
p(tags$i("In this default setting, two expected values < 5, so we used the Fisher exact test. From the test result, we concluded that no significant association was found between the cause of death and high salt diet" ))
)
)
)
|
metaReg<-function(list_of_files,sheet){
object_lenge<-c()
Namen_liste<-c()
for(file in list_of_files){
Name<-substring(file,1,nchar(file)-5)
Namen_liste<-c(Namen_liste,Name)
assign(Name,read.xlsx(file,sheet=sheet))
object_lenge<-c(object_lenge,nrow(get(Name)))
}
start<-Namen_liste[which(object_lenge==max(object_lenge))[1]]
Table<-get(start)
Table<-data.frame(Table$coef,Table$B,1/Table$SD)
names(Table)<-c("Coef",paste0("B_",start),paste0("Prez_",start))
Meta<-Table[,2]*Table[,3]
Prez<-Table[,3]
for(i in 1:length(Namen_liste)){
if(!Namen_liste[i]==start){
Table2<-get(Namen_liste[i])
M<-match(Table[,1],Table2[,1])
Table[[paste0("B_",Namen_liste[i])]]<-Table2$B[M]
Table[[paste0("Prez_",Namen_liste[i])]]<-1/Table2$SD[M]
Meta<-Meta+Table2$B/Table2$SD[M]
Prez<-Prez+1/Table2$SD[M]
}
}
Table$B_Meta<-Meta/Prez
Table$Prez_Meta<-Prez
wb <- createWorkbook()
options("openxlsx.borderStyle" = "thin") # Tabellen-Definition
options("openxlsx.borderColour" = "#4F81BD")
addWorksheet(wb, "Meta_Reg")
writeData(wb, "Meta_Reg", Table, rowNames = TRUE)
saveWorkbook(wb, "Meta.xlsx", overwrite = TRUE)
}
meta_to_fit<-function(file,Modell){
fit<-fitMorbiRSA()
meta<-read.xlsx(file,sheet=1)
B<-meta$B_Meta
names(B)<-meta$Coef
SD<-1/meta$Prez_Meta
fit<-setCoef(fit,SD^2,B)
fit@Modell<-Modell
return(fit)
}
|
/R/meta_Reg.R
|
no_license
|
AaarrrRookie/GWR
|
R
| false
| false
| 1,443
|
r
|
metaReg<-function(list_of_files,sheet){
object_lenge<-c()
Namen_liste<-c()
for(file in list_of_files){
Name<-substring(file,1,nchar(file)-5)
Namen_liste<-c(Namen_liste,Name)
assign(Name,read.xlsx(file,sheet=sheet))
object_lenge<-c(object_lenge,nrow(get(Name)))
}
start<-Namen_liste[which(object_lenge==max(object_lenge))[1]]
Table<-get(start)
Table<-data.frame(Table$coef,Table$B,1/Table$SD)
names(Table)<-c("Coef",paste0("B_",start),paste0("Prez_",start))
Meta<-Table[,2]*Table[,3]
Prez<-Table[,3]
for(i in 1:length(Namen_liste)){
if(!Namen_liste[i]==start){
Table2<-get(Namen_liste[i])
M<-match(Table[,1],Table2[,1])
Table[[paste0("B_",Namen_liste[i])]]<-Table2$B[M]
Table[[paste0("Prez_",Namen_liste[i])]]<-1/Table2$SD[M]
Meta<-Meta+Table2$B/Table2$SD[M]
Prez<-Prez+1/Table2$SD[M]
}
}
Table$B_Meta<-Meta/Prez
Table$Prez_Meta<-Prez
wb <- createWorkbook()
options("openxlsx.borderStyle" = "thin") # Tabellen-Definition
options("openxlsx.borderColour" = "#4F81BD")
addWorksheet(wb, "Meta_Reg")
writeData(wb, "Meta_Reg", Table, rowNames = TRUE)
saveWorkbook(wb, "Meta.xlsx", overwrite = TRUE)
}
meta_to_fit<-function(file,Modell){
fit<-fitMorbiRSA()
meta<-read.xlsx(file,sheet=1)
B<-meta$B_Meta
names(B)<-meta$Coef
SD<-1/meta$Prez_Meta
fit<-setCoef(fit,SD^2,B)
fit@Modell<-Modell
return(fit)
}
|
library(dplyr)
'Datasources'
race_by_city <- read.csv(file = "Data/ShareRaceByCity.csv")
police_killings <- read.csv(file = "Data/PoliceKillingsUS.csv")
education_stats <- read.csv(file = "Data/PercentOVer25CompletedHighSchool.csv")
poverty_stats <- read.csv(file = "Data/PercentagePeopleBelowPovertyLevel.csv")
income_stats <- read.csv(file = "Data/MedianHouseholdIncome2015.csv")
summary(police_killings$armed == "gun" && police_killings$race == "B")
|
/script.R
|
no_license
|
bmetsker/pv_project
|
R
| false
| false
| 456
|
r
|
library(dplyr)
'Datasources'
race_by_city <- read.csv(file = "Data/ShareRaceByCity.csv")
police_killings <- read.csv(file = "Data/PoliceKillingsUS.csv")
education_stats <- read.csv(file = "Data/PercentOVer25CompletedHighSchool.csv")
poverty_stats <- read.csv(file = "Data/PercentagePeopleBelowPovertyLevel.csv")
income_stats <- read.csv(file = "Data/MedianHouseholdIncome2015.csv")
summary(police_killings$armed == "gun" && police_killings$race == "B")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/helpers.R
\name{get_unique_vars}
\alias{get_unique_vars}
\title{Returns information of the dataframe - The variables which have a constant value}
\usage{
get_unique_vars(df)
}
\arguments{
\item{df}{The dataframe to generate the EDA report}
}
\value{
list(unique_count, values) A list containing two elements - a dataframe with the column name and the unique_count and another named list of the variables and their constant values.
}
\description{
Returns information of the dataframe - The variables which have a constant value
}
\examples{
get_unique_vars(df)
}
|
/man/get_unique_vars.Rd
|
permissive
|
vivekkalyanarangan30/dfprofile
|
R
| false
| true
| 641
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/helpers.R
\name{get_unique_vars}
\alias{get_unique_vars}
\title{Returns information of the dataframe - The variables which have a constant value}
\usage{
get_unique_vars(df)
}
\arguments{
\item{df}{The dataframe to generate the EDA report}
}
\value{
list(unique_count, values) A list containing two elements - a dataframe with the column name and the unique_count and another named list of the variables and their constant values.
}
\description{
Returns information of the dataframe - The variables which have a constant value
}
\examples{
get_unique_vars(df)
}
|
\name{ExpQQ}
\alias{ExpQQ}
\title{
Exponential quantile plot
}
\description{
Computes the empirical quantiles of a data vector and the theoretical quantiles of the standard exponential distribution. These quantiles are then plotted in an exponential QQ-plot with the theoretical quantiles on the \eqn{x}-axis and the empirical quantiles on the \eqn{y}-axis.
}
\usage{
ExpQQ(data, plot = TRUE, main = "Exponential QQ-plot", ...)
}
\arguments{
\item{data}{
Vector of \eqn{n} observations.
}
\item{plot}{
Logical indicating if the quantiles should be plotted in an Exponential QQ-plot, default is \code{TRUE}.
}
\item{main}{
Title for the plot, default is \code{"Exponential QQ-plot"}.
}
\item{\dots}{
Additional arguments for the \code{plot} function, see \code{\link[graphics]{plot}} for more details.
}
}
\details{
The exponential QQ-plot is defined as
\deqn{( -\log(1-i/(n+1)), X_{i,n} )}
for \eqn{i=1,...,n,}
with \eqn{X_{i,n}} the \eqn{i}-th order statistic of the data.
Note that the mean excess plot is the derivative plot of the Exponential QQ-plot.
See Section 4.1 of Albrecher et al. (2017) for more details.
}
\value{
A list with following components:
\item{eqq.the}{Vector of the theoretical quantiles from a standard exponential distribution.}
\item{eqq.emp}{Vector of the empirical quantiles from the data.}
}
\references{
Albrecher, H., Beirlant, J. and Teugels, J. (2017). \emph{Reinsurance: Actuarial and Statistical Aspects}, Wiley, Chichester.
Beirlant J., Goegebeur Y., Segers, J. and Teugels, J. (2004). \emph{Statistics of Extremes: Theory and Applications}, Wiley Series in Probability, Wiley, Chichester.
}
\author{
Tom Reynkens based on \code{S-Plus} code from Yuri Goegebeur.
}
\seealso{
\code{\link{MeanExcess}}, \code{\link{LognormalQQ}}, \code{\link{ParetoQQ}}, \code{\link{WeibullQQ}}
}
\examples{
data(norwegianfire)
# Exponential QQ-plot for Norwegian Fire Insurance data for claims in 1976.
ExpQQ(norwegianfire$size[norwegianfire$year==76])
# Pareto QQ-plot for Norwegian Fire Insurance data for claims in 1976.
ParetoQQ(norwegianfire$size[norwegianfire$year==76])
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
|
/man/ExpQQ.Rd
|
no_license
|
kevinykuo/ReIns
|
R
| false
| false
| 2,211
|
rd
|
\name{ExpQQ}
\alias{ExpQQ}
\title{
Exponential quantile plot
}
\description{
Computes the empirical quantiles of a data vector and the theoretical quantiles of the standard exponential distribution. These quantiles are then plotted in an exponential QQ-plot with the theoretical quantiles on the \eqn{x}-axis and the empirical quantiles on the \eqn{y}-axis.
}
\usage{
ExpQQ(data, plot = TRUE, main = "Exponential QQ-plot", ...)
}
\arguments{
\item{data}{
Vector of \eqn{n} observations.
}
\item{plot}{
Logical indicating if the quantiles should be plotted in an Exponential QQ-plot, default is \code{TRUE}.
}
\item{main}{
Title for the plot, default is \code{"Exponential QQ-plot"}.
}
\item{\dots}{
Additional arguments for the \code{plot} function, see \code{\link[graphics]{plot}} for more details.
}
}
\details{
The exponential QQ-plot is defined as
\deqn{( -\log(1-i/(n+1)), X_{i,n} )}
for \eqn{i=1,...,n,}
with \eqn{X_{i,n}} the \eqn{i}-th order statistic of the data.
Note that the mean excess plot is the derivative plot of the Exponential QQ-plot.
See Section 4.1 of Albrecher et al. (2017) for more details.
}
\value{
A list with following components:
\item{eqq.the}{Vector of the theoretical quantiles from a standard exponential distribution.}
\item{eqq.emp}{Vector of the empirical quantiles from the data.}
}
\references{
Albrecher, H., Beirlant, J. and Teugels, J. (2017). \emph{Reinsurance: Actuarial and Statistical Aspects}, Wiley, Chichester.
Beirlant J., Goegebeur Y., Segers, J. and Teugels, J. (2004). \emph{Statistics of Extremes: Theory and Applications}, Wiley Series in Probability, Wiley, Chichester.
}
\author{
Tom Reynkens based on \code{S-Plus} code from Yuri Goegebeur.
}
\seealso{
\code{\link{MeanExcess}}, \code{\link{LognormalQQ}}, \code{\link{ParetoQQ}}, \code{\link{WeibullQQ}}
}
\examples{
data(norwegianfire)
# Exponential QQ-plot for Norwegian Fire Insurance data for claims in 1976.
ExpQQ(norwegianfire$size[norwegianfire$year==76])
# Pareto QQ-plot for Norwegian Fire Insurance data for claims in 1976.
ParetoQQ(norwegianfire$size[norwegianfire$year==76])
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 2383
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 2383
c
c Input Parameter (command line, file):
c input filename QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/eequery_query57_1344n.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 993
c no.of clauses 2383
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 2383
c
c QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/eequery_query57_1344n.qdimacs 993 2383 E1 [] 0 70 923 2383 NONE
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/eequery_query57_1344n/eequery_query57_1344n.R
|
no_license
|
arey0pushpa/dcnf-autarky
|
R
| false
| false
| 710
|
r
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 2383
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 2383
c
c Input Parameter (command line, file):
c input filename QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/eequery_query57_1344n.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 993
c no.of clauses 2383
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 2383
c
c QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/eequery_query57_1344n.qdimacs 993 2383 E1 [] 0 70 923 2383 NONE
|
\encoding{UTF-8}
\name{genpop class}
\alias{genpop-class}
\alias{dist,genpop,ANY,ANY,ANY,missing-method}
\alias{names,genpop-method}
\alias{show,genpop-method}
\alias{summary,genpop-method}
\alias{print,genpopSummary-method}
\alias{print.genpopSummary}
\alias{is.genpop}
\title{adegenet formal class (S4) for allele counts in populations}
\description{An object of class \code{genpop} contain alleles counts
for several loci.\cr
It contains several components (see 'slots' section).\cr
Such object is obtained using \code{genind2genpop} which converts
individuals genotypes of known population into a \code{genpop} object.
Note that the function \code{summary} of a \code{genpop} object
returns a list of components.
Note that as in other S4 classes, slots are accessed using @ instead
of \$.
}
\section{Slots}{
\describe{
\item{\code{tab}:}{matrix of alleles counts for each combinaison of population
(in rows) and alleles (in columns).}
\item{\code{loc.fac}:}{locus factor for the columns of \code{tab}}
\item{\code{loc.n.all}:}{integer vector giving the number of alleles per locus}
\item{\code{all.names}:}{list having one component per locus, each containing a character vector of alleles names}
\item{\code{call}:}{the matched call}
\item{\code{ploidy}:}{ an integer indicating the degree of ploidy of
the genotypes. Beware: 2 is not an integer, but as.integer(2) is.}
\item{\code{type}:}{ a character string indicating the type of
marker: 'codom' stands for 'codominant' (e.g. microstallites,
allozymes); 'PA' stands for 'presence/absence' (e.g. AFLP).}
\item{\code{other}:}{(optional) a list containing other information}
}
}
\section{Extends}{
Class \code{"\linkS4class{gen}"}, directly.
Class \code{"\linkS4class{popInfo}"}, directly.
}
\section{Methods}{
\describe{
\item{names}{\code{signature(x = "genpop")}: give the names of the
components of a genpop object}
\item{print}{\code{signature(x = "genpop")}: prints a genpop object}
\item{show}{\code{signature(object = "genpop")}: shows a genpop
object (same as print)}
\item{summary}{\code{signature(object = "genpop")}: summarizes a
genpop object, invisibly returning its content or suppress printing of auxiliary information by specifying \code{verbose = FALSE}}
}
}
\seealso{\code{\link{as.genpop}}, \code{\link{is.genpop}},\code{\link{makefreq}}, \code{\link{genind}}, \code{\link{import2genind}}, \code{\link{read.genetix}}, \code{\link{read.genepop}}, \code{\link{read.fstat}}
}
\author{ Thibaut Jombart \email{t.jombart@imperial.ac.uk} }
\examples{
obj1 <- import2genind(system.file("files/nancycats.gen",
package="adegenet"))
obj1
obj2 <- genind2genpop(obj1)
obj2
\dontrun{
data(microsatt)
# use as.genpop to convert convenient count tab to genpop
obj3 <- as.genpop(microsatt$tab)
obj3
all(obj3@tab==microsatt$tab)
# perform a correspondance analysis
obj4 <- genind2genpop(obj1,missing="chi2")
ca1 <- dudi.coa(as.data.frame(obj4@tab),scannf=FALSE)
s.label(ca1$li,sub="Correspondance Analysis",csub=2)
add.scatter.eig(ca1$eig,2,xax=1,yax=2,posi="top")
}
}
\keyword{classes}
\keyword{manip}
\keyword{multivariate}
|
/man/genpop.Rd
|
no_license
|
gtonkinhill/adegenet
|
R
| false
| false
| 3,209
|
rd
|
\encoding{UTF-8}
\name{genpop class}
\alias{genpop-class}
\alias{dist,genpop,ANY,ANY,ANY,missing-method}
\alias{names,genpop-method}
\alias{show,genpop-method}
\alias{summary,genpop-method}
\alias{print,genpopSummary-method}
\alias{print.genpopSummary}
\alias{is.genpop}
\title{adegenet formal class (S4) for allele counts in populations}
\description{An object of class \code{genpop} contain alleles counts
for several loci.\cr
It contains several components (see 'slots' section).\cr
Such object is obtained using \code{genind2genpop} which converts
individuals genotypes of known population into a \code{genpop} object.
Note that the function \code{summary} of a \code{genpop} object
returns a list of components.
Note that as in other S4 classes, slots are accessed using @ instead
of \$.
}
\section{Slots}{
\describe{
\item{\code{tab}:}{matrix of alleles counts for each combinaison of population
(in rows) and alleles (in columns).}
\item{\code{loc.fac}:}{locus factor for the columns of \code{tab}}
\item{\code{loc.n.all}:}{integer vector giving the number of alleles per locus}
\item{\code{all.names}:}{list having one component per locus, each containing a character vector of alleles names}
\item{\code{call}:}{the matched call}
\item{\code{ploidy}:}{ an integer indicating the degree of ploidy of
the genotypes. Beware: 2 is not an integer, but as.integer(2) is.}
\item{\code{type}:}{ a character string indicating the type of
marker: 'codom' stands for 'codominant' (e.g. microstallites,
allozymes); 'PA' stands for 'presence/absence' (e.g. AFLP).}
\item{\code{other}:}{(optional) a list containing other information}
}
}
\section{Extends}{
Class \code{"\linkS4class{gen}"}, directly.
Class \code{"\linkS4class{popInfo}"}, directly.
}
\section{Methods}{
\describe{
\item{names}{\code{signature(x = "genpop")}: give the names of the
components of a genpop object}
\item{print}{\code{signature(x = "genpop")}: prints a genpop object}
\item{show}{\code{signature(object = "genpop")}: shows a genpop
object (same as print)}
\item{summary}{\code{signature(object = "genpop")}: summarizes a
genpop object, invisibly returning its content or suppress printing of auxiliary information by specifying \code{verbose = FALSE}}
}
}
\seealso{\code{\link{as.genpop}}, \code{\link{is.genpop}},\code{\link{makefreq}}, \code{\link{genind}}, \code{\link{import2genind}}, \code{\link{read.genetix}}, \code{\link{read.genepop}}, \code{\link{read.fstat}}
}
\author{ Thibaut Jombart \email{t.jombart@imperial.ac.uk} }
\examples{
obj1 <- import2genind(system.file("files/nancycats.gen",
package="adegenet"))
obj1
obj2 <- genind2genpop(obj1)
obj2
\dontrun{
data(microsatt)
# use as.genpop to convert convenient count tab to genpop
obj3 <- as.genpop(microsatt$tab)
obj3
all(obj3@tab==microsatt$tab)
# perform a correspondance analysis
obj4 <- genind2genpop(obj1,missing="chi2")
ca1 <- dudi.coa(as.data.frame(obj4@tab),scannf=FALSE)
s.label(ca1$li,sub="Correspondance Analysis",csub=2)
add.scatter.eig(ca1$eig,2,xax=1,yax=2,posi="top")
}
}
\keyword{classes}
\keyword{manip}
\keyword{multivariate}
|
ui <- fluidPage(
titlePanel("Chicago Crime Analysis 2020 "),
mainPanel(tabsetPanel(
tabPanel("Crime Types by Month Frequency",h3("Frequency of Crimes Types by Month"), fluid = TRUE,
# Sidebar panel for Month
sidebarPanel(
selectInput(
inputId = "month",
label = "Month : ",
choices = unique(crime["month"]),
multiple = FALSE,
selected = "1"
),
),
mainPanel(plotOutput("BarGraph"))
),
tabPanel("Location Map",h3("Crime Locations"), fluid = TRUE,
sidebarPanel(
selectInput(
inputId = "date",
label = "Date : ",
choices = unique(crime["date_"]),
multiple = FALSE,
selected = "2020-01-01 CST"
),
selectInput(
inputId = "CrimeType",
label = "Crime Type ",
choices = unique(crime["Primary Type"]),
multiple = FALSE,
selected = "2020-01-01 CST"
)
),
mainPanel(leafletOutput("mymap"),height="600",width="1000")),
tabPanel("Heat Map",h3("Crime Locations"), fluid = TRUE,
mainPanel(plotOutput("HeatMap"))),
tabPanel("Chicago Crime Analysis", fluid = TRUE,
# Sidebar panel for Month
mainPanel(plotOutput("tab4")))
)))
|
/ui.R
|
no_license
|
GarimaTuteja/RShinyApplication2
|
R
| false
| false
| 2,243
|
r
|
ui <- fluidPage(
titlePanel("Chicago Crime Analysis 2020 "),
mainPanel(tabsetPanel(
tabPanel("Crime Types by Month Frequency",h3("Frequency of Crimes Types by Month"), fluid = TRUE,
# Sidebar panel for Month
sidebarPanel(
selectInput(
inputId = "month",
label = "Month : ",
choices = unique(crime["month"]),
multiple = FALSE,
selected = "1"
),
),
mainPanel(plotOutput("BarGraph"))
),
tabPanel("Location Map",h3("Crime Locations"), fluid = TRUE,
sidebarPanel(
selectInput(
inputId = "date",
label = "Date : ",
choices = unique(crime["date_"]),
multiple = FALSE,
selected = "2020-01-01 CST"
),
selectInput(
inputId = "CrimeType",
label = "Crime Type ",
choices = unique(crime["Primary Type"]),
multiple = FALSE,
selected = "2020-01-01 CST"
)
),
mainPanel(leafletOutput("mymap"),height="600",width="1000")),
tabPanel("Heat Map",h3("Crime Locations"), fluid = TRUE,
mainPanel(plotOutput("HeatMap"))),
tabPanel("Chicago Crime Analysis", fluid = TRUE,
# Sidebar panel for Month
mainPanel(plotOutput("tab4")))
)))
|
## ---- echo = FALSE------------------------------------------------------------
knitr::opts_chunk$set(collapse = TRUE, comment = "#>")
## -----------------------------------------------------------------------------
library(conStruct)
data(conStruct.data)
## ----eval=FALSE---------------------------------------------------------------
# # load the example dataset
# data(conStruct.data)
#
# # run a conStruct analysis
#
# # you have to specify:
# # the number of layers (K)
# # the allele frequency data (freqs)
# # the geographic distance matrix (geoDist)
# # the sampling coordinates (coords)
#
# my.run <- conStruct(spatial = TRUE,
# K = 3,
# freqs = conStruct.data$allele.frequencies,
# geoDist = conStruct.data$geoDist,
# coords = conStruct.data$coords,
# prefix = "spK3")
## ----eval=FALSE---------------------------------------------------------------
# # load the example dataset
# data(conStruct.data)
#
# # run a conStruct analysis
#
# # you have to specify:
# # the number of layers (K)
# # the allele frequency data (freqs)
# # the sampling coordinates (coords)
# #
# # if you're running the nonspatial model,
# # you do not have to specify
# # the geographic distance matrix (geoDist)
#
# my.run <- conStruct(spatial = FALSE,
# K = 2,
# freqs = conStruct.data$allele.frequencies,
# geoDist = NULL,
# coords = conStruct.data$coords,
# prefix = "nspK2")
## ----eval=FALSE---------------------------------------------------------------
# my.run <- conStruct(spatial = TRUE,
# K = 3,
# freqs = conStruct.data$allele.frequencies,
# geoDist = conStruct.data$geoDist,
# coords = conStruct.data$coords,
# prefix = "spK3",
# n.chains = 1,
# n.iter = 1000,
# make.figs = TRUE,
# save.files = TRUE)
## ----echo=FALSE,fig.width=7,fig.height=2.7------------------------------------
par(mfrow=c(1,3),mar=c(4,3,1.5,1))
plot(c(0,rnorm(500,1,0.2)),type='l',
xlab="",yaxt='n',ylab="")
mtext(side=2,text="parameter estimate",padj=-1)
mtext(side=3,text="(a) looks good",padj=-0.1)
plot(c(0,rnorm(500,c(log(seq(0,1,length.out=500))),0.2)),type='l',
xlab="",yaxt='n',ylab="")
mtext(side=1,text="mcmc iterations",padj=2.6)
mtext(side=3,text="(b) hasn't converged",padj=-0.1)
plot(c(0,rnorm(150,1,0.2),rnorm(200,3,0.2),rnorm(150,1,0.2)),type='l',
xlab="",yaxt='n',ylab="")
mtext(side=3,text="(c) multi-modal",padj=-0.1)
## ----echo=FALSE,fig.width=7,fig.height=3--------------------------------------
w <- matrix(rnorm(40,sample(2:10,40,replace=TRUE),1),
nrow=20,ncol=2)
w <- w/rowSums(w)
w <- cbind(pmax(rnorm(20,0.15,0.005),0),w)
w <- w/rowSums(w)
conStruct::make.structure.plot(w)
|
/inst/doc/run-conStruct.R
|
no_license
|
cran/conStruct
|
R
| false
| false
| 2,788
|
r
|
## ---- echo = FALSE------------------------------------------------------------
knitr::opts_chunk$set(collapse = TRUE, comment = "#>")
## -----------------------------------------------------------------------------
library(conStruct)
data(conStruct.data)
## ----eval=FALSE---------------------------------------------------------------
# # load the example dataset
# data(conStruct.data)
#
# # run a conStruct analysis
#
# # you have to specify:
# # the number of layers (K)
# # the allele frequency data (freqs)
# # the geographic distance matrix (geoDist)
# # the sampling coordinates (coords)
#
# my.run <- conStruct(spatial = TRUE,
# K = 3,
# freqs = conStruct.data$allele.frequencies,
# geoDist = conStruct.data$geoDist,
# coords = conStruct.data$coords,
# prefix = "spK3")
## ----eval=FALSE---------------------------------------------------------------
# # load the example dataset
# data(conStruct.data)
#
# # run a conStruct analysis
#
# # you have to specify:
# # the number of layers (K)
# # the allele frequency data (freqs)
# # the sampling coordinates (coords)
# #
# # if you're running the nonspatial model,
# # you do not have to specify
# # the geographic distance matrix (geoDist)
#
# my.run <- conStruct(spatial = FALSE,
# K = 2,
# freqs = conStruct.data$allele.frequencies,
# geoDist = NULL,
# coords = conStruct.data$coords,
# prefix = "nspK2")
## ----eval=FALSE---------------------------------------------------------------
# my.run <- conStruct(spatial = TRUE,
# K = 3,
# freqs = conStruct.data$allele.frequencies,
# geoDist = conStruct.data$geoDist,
# coords = conStruct.data$coords,
# prefix = "spK3",
# n.chains = 1,
# n.iter = 1000,
# make.figs = TRUE,
# save.files = TRUE)
## ----echo=FALSE,fig.width=7,fig.height=2.7------------------------------------
par(mfrow=c(1,3),mar=c(4,3,1.5,1))
plot(c(0,rnorm(500,1,0.2)),type='l',
xlab="",yaxt='n',ylab="")
mtext(side=2,text="parameter estimate",padj=-1)
mtext(side=3,text="(a) looks good",padj=-0.1)
plot(c(0,rnorm(500,c(log(seq(0,1,length.out=500))),0.2)),type='l',
xlab="",yaxt='n',ylab="")
mtext(side=1,text="mcmc iterations",padj=2.6)
mtext(side=3,text="(b) hasn't converged",padj=-0.1)
plot(c(0,rnorm(150,1,0.2),rnorm(200,3,0.2),rnorm(150,1,0.2)),type='l',
xlab="",yaxt='n',ylab="")
mtext(side=3,text="(c) multi-modal",padj=-0.1)
## ----echo=FALSE,fig.width=7,fig.height=3--------------------------------------
w <- matrix(rnorm(40,sample(2:10,40,replace=TRUE),1),
nrow=20,ncol=2)
w <- w/rowSums(w)
w <- cbind(pmax(rnorm(20,0.15,0.005),0),w)
w <- w/rowSums(w)
conStruct::make.structure.plot(w)
|
## This is is one of several files containing scripts and functions used in processing and analysis of data for Matthew Dufort's Ph.D. dissertation at the University of Minnesota, titled "Coexistence, Ecomorphology, and Diversification in the Avian Family Picidae (Woodpeckers and Allies)."
## this file contains scripts and functions to calculate variables at the subclade level, and to test for relationships between subclade variables
### load packages and data
## load necessary packages
library(ape)
library(geiger)
library(phytools)
library(nlme)
library(laser)
library(DDD)
load(file="Picidae_data_for_distribution_morphology_evolution.RData") # load data needed from morphology and distribution analyses (from Morphology_data_processing.R)
load(file="Picidae_BAMM_data_for_automated_subclade_analyses.RData") # load data objects needed from BAMM analyses (from BAMM_data_prep_and_processing.R)
### generate necessary functions for automated subclade analyses
## the function extractSubclades.all() extracts all subclades with at least min.taxa and at most max.taxa tips from the tree
# as input, it takes phy (a phylogenetic tree of class phylo), min.taxa (the minimum number of taxa for a subclade to be included), and max.taxa (the maximum number of taxa for a subclade to be include)
# it returns a list of phy objects, one for each subclade, with the list elements named with the node numbers from the original tree
extractSubclades.all <- function(phy, min.taxa=4, max.taxa=Inf) {
require(ape)
subclades <- list() # initialize a list to store subclades
ntaxa <- length(phy$tip.label) # get the number of taxa in the tree
j <- 0
for (i in (ntaxa + 1):(ntaxa + phy$Nnode)) { # loop over internal nodes
clade.tmp <- extract.clade(phy, node=i) # extract current subclade (the subclade descending from the current node)
# if current subclade meets specifications, add it to the list
if ((length(clade.tmp$tip.label) >= min.taxa) & (length(clade.tmp$tip.label) <= max.taxa)) {
j <- j+1
subclades[[j]] <- clade.tmp
names(subclades)[j] <- as.character(i)
}
}
return(subclades)
}
## the function getSubclades.withData() extracts subclades from a tree, including only subclades that have sufficient taxa with data in a vector or matrix of trait data
# as input, it takes phylist (a list of subclades, each a phylo object), taxondata (the vector of matrix of trait data, with names or rownames corresponding to taxon names), inc.data (boolean to return a treedata object for each subclade; if FALSE, returns a phylo object for each subclade), and min.taxa (the minimum number of taxa for a subclade to be included)
# it returns a list of subclades, either as phylo objects or treedata objects, with the list elements named by the node numbers in the original tree
getSubclades.withData <- function(phylist, taxondata, inc.data=TRUE, min.taxa=4, quiet=TRUE) {
require(geiger)
subclades.new <- list()
# get taxon names from trait data vector/matrix
if (is.matrix(taxondata)) {
taxon.names <- rownames(taxondata)
} else if (is.vector(taxondata)) {
taxon.names <- names(taxondata)
}
# loop over subclades in phylist, testing if each subclade has more than min.taxa in the data vector/matrix
j <- 0
for (i in 1:length(phylist)) {
if (!quiet) print(i)
if (!quiet) print(phylist[[i]]$tip.label %in% taxon.names)
if (sum(phylist[[i]]$tip.label %in% taxon.names) >= min.taxa) {
j <- j + 1
if (inc.data) {
subclades.new[[j]] <- treedata(phy = phylist[[i]], data=taxondata, warnings=FALSE)
} else {
subclades.new[[j]] <- phylist[[i]]
}
names(subclades.new)[j] <- names(phylist)[i]
}
}
return(subclades.new)
}
## the function areOverlappingSubclades() tests a list of two or more subclades (or trees) to determine if there is any overlap in the tips included
# as input, it takes phylist (a list of subclades, each a phylo object), and getoverlaps (boolean to return overlapping taxa)
# it returns TRUE if any of the subclades in phylist share taxa, and FALSE if there are no shared taxa among them; if getoverlaps=TRUE, it returns a list containing the test value (TRUE or FALSE), and a vector of the taxa shard among subclades
areOverlappingSubclades <- function(phylist, getoverlaps=FALSE) {
# generate a character vector containing the concatenated taxa from each subclade
taxnames <- character()
if (!is.null(phylist[[1]]$data)) { # checks if they're treedata objects
taxnames <- unlist(lapply(phylist, FUN = function(x) x$phy$tip.label))
} else {
taxnames <- unlist(lapply(phylist, FUN = function(x) x$tip.label))
}
# check for duplicates in the vector of taxon names
duplicates <- duplicated(taxnames)
if (!any(duplicates)) {
return(FALSE)
} else if (!getoverlaps) {
return(TRUE)
} else {
return(list(test=TRUE, overlaps=taxnames[which(duplicates)]))}
}
## the function subcladeCombinations.all() determines all sets of reciprocally monophyletic subclades meeting a set of criteria, and returns them as a list of lists of phylo objects
# this sped-up version generates a pairwise matrix of overlapping clades, then checks if any of the subclades in the combination are TRUE in the matrix (and therefore takes advantage of speed-ups with vectorization)
# for large trees, there is a VERY large number of possible combinations, and using this function is not advisable
# as input, it takes phylist (a list of subclades, each a phylo object), min.clades (the minimum number of clades to include in a combination), and max.clades (the maximum number of clades to include in a combination)
# it returns a list of subclade combinations, each a list of phylo objects
subcladeCombinations.all <- function(phylist, min.clades=3, max.clades=Inf) {
if (max.clades > length(phylist)) max.clades <- length(phylist)
# generate matrix of pairwise subclade overlaps
subclade.overlap.pairwise <- matrix(nrow=length(phylist), ncol=length(phylist))
for (i in 1:nrow(subclade.overlap.pairwise)) {
for (j in 1:ncol(subclade.overlap.pairwise)) {
subclade.overlap.pairwise[i,j] <- areOverlappingSubclades(list(phylist[[i]], phylist[[j]]))
}
}
subclade.names <- names(phylist) # get the subclade names
combinations <- list() # initialize list to store subclade combinations
complete <- FALSE # boolean to end search
k <- 0
for (nclades in (min.clades:max.clades)) { # loop over number of subclades to include in set
if (!complete) {
length.last <- length(combinations)
combinations.to.test <- combn(x=(1:length(subclade.names)), m=nclades, simplify=TRUE) # generate a matrix of combinations to test
print(paste("Testing ", ncol(combinations.to.test), " combinations for ", nclades, " clades.", sep=""))
# test each proposed combination for reciprocal monophyly; if they are reciprocally monophyletic, add to list
for (i in 1:ncol(combinations.to.test)) {
if ((i %% 10000) == 0) print(paste("Testing combination ",i, sep=""))
pairwise.combinations.temp <- combn(x=combinations.to.test[,i], m=2, simplify=TRUE)
if (!any(subclade.overlap.pairwise[cbind(pairwise.combinations.temp[1,],pairwise.combinations.temp[2,])])) {
k <- k+1
combinations[[k]] <- subclade.names[combinations.to.test[,i]]
}
}
# test if any combinations were added for this number of subclades, and terminate if none were
if (length(combinations)==length.last) {
complete <- TRUE
print(paste("No successful combinations for ", nclades, " clades; stopping search.", sep=""))
}
}
}
return(combinations)
}
## the function subcladeCombinations.random() generates a random sample of combinations of reciprocally monophyletic subclades meeting a set of criteria
# this samples by selecting a subclade at random, then selecting another from all the possible subclades that don't overlap the first, and continuing doing that until there aren't any more possibilities; this approach probably leads to the same subclades being selected repeatedly, as certain isolated subclades are almost always going to be suitable
# as input, it takes phylist (a list of subclades, each a phylo object), ncombs (the maximum number of combinations to return), min.clades (the minimum number of subclades to include in a combination), max.clades (the maximum number of subclades to include in a combination), min.taxa (the minimum number of taxa for a subclade to be considered for inclusion), max.fails (the maximum number of failures before halting the search), and report (boolean to output status updates to console)
# it returns a list of subclade combinations, each a list of phylo objects
subcladeCombinations.random <- function(phylist, ncombs=1000, min.clades=5, max.clades=Inf, min.taxa=4, max.fails=1e6, report=TRUE) {
# check if the objects are phylo objects or treedata objects; also drop subclades with fewer taxa than the minimum
for (i in names(phylist)) {
if (class(phylist[[i]]) != "phylo") {
if (class(phylist[[i]]$phy) == "phylo") {
phylist[[i]] <- phylist[[i]]$phy
} else {
cat("\nError: item ", i, " in phylist is not a phylo or treedata object.\n", sep="")
return()
}
}
if (length(phylist[[i]]$tip.label) < min.taxa) phylist[[i]] <- NULL # drop subclades with too few taxa
}
if (max.clades > length(phylist)) max.clades <- length(phylist)
subclade.names <- names(phylist) # extract the subclade names
# generate matrix of pairwise subclade overlaps
subclade.overlap.pairwise <- matrix(nrow=length(phylist), ncol=length(phylist), dimnames=list(subclade.names, subclade.names))
for (i in 1:nrow(subclade.overlap.pairwise)) {
for (j in 1:ncol(subclade.overlap.pairwise)) {
subclade.overlap.pairwise[i,j] <- areOverlappingSubclades(list(phylist[[i]], phylist[[j]]))
}
}
combinations <- list() # the combinations that will be returned
all.done <- FALSE
z <- 1
fails <- 0
while ((length(combinations) < ncombs) & (!all.done)) {
combination.done <- FALSE
combination.temp <- sample(x=subclade.names, size=1) # pick the first subclade in the possible combination
q <- 1
while ((length(combination.temp) < max.clades) & (!combination.done)) {
subclades.possible.additions <- colnames(subclade.overlap.pairwise)[which(rowSums(as.matrix(subclade.overlap.pairwise[,combination.temp]))==0)] # this finds all subclades that don't overlap with any of the subclades already in the combination
if (length(subclades.possible.additions) == 0) {
combination.done <- TRUE
} else {
q <- q + 1
combination.temp[q] <- sample(x=subclades.possible.additions, size=1)
}
}
combination.temp <- sort(combination.temp)
if ((length(combination.temp) >= min.clades) & (length(which(sapply(combinations, identical, combination.temp, simplify=TRUE)==TRUE)) < 1)) {
combinations[[z]] <- combination.temp
cat("Found combination ", z, "\n", sep="")
z <- z + 1
} else {
fails <- fails+1
}
if (fails == max.fails) {
all.done <- TRUE
print(paste("Reached maximum failures. Returning", length(combinations), "combinations"))
}
}
return(combinations)
}
## the function subcladeCombinations.sequential() determines a set of combinations of reciprocally monophyletic subclades by working its way down the tree; it slices the tree at each node and determines all valid subclades below that slice
# as input, it takes phy (a tree as a phylo object), min.taxa (the minimum number of taxa for a subclade to be included), min.clades (the minimum number of subclades to include in a combination), and max.clades (the maximum number of subclades to include in a combination)
# it returns a list of subclade combinations, each a list of phylo objects
subcladeCombinations.sequential <- function(phy, min.taxa=4, min.clades=5, max.clades=Inf) {
require(ape)
combinations <- list()
phy.nodedepth.sorted <- sort((max(branching.times(phy)) - branching.times(phy)), decreasing=FALSE) # generate a vector of node depths
l <- 0
for (i in 1:length(phy.nodedepth.sorted)) {
candidate.nodes <- phy$edge[,2][(node.depth.edgelength(phy)[phy$edge[,1]] <= phy.nodedepth.sorted[i]) & (node.depth.edgelength(phy)[phy$edge[,2]] > phy.nodedepth.sorted[i]) & (phy$edge[,2] > length(phy$tip.label))] # find all the descendant nodes from edges cut at current step in phy.nodedepth.sorted
# identify nodes just below the branching point I'm examining
candidate.nodes <- candidate.nodes[candidate.nodes > length(phy$tip.label)]
# extract combination (if possible) from list of descendant subclades
if (length(candidate.nodes) >= min.clades) {
candidate.combination <- character()
for (j in 1:length(candidate.nodes)) {
if (length(extract.clade(phy, node=candidate.nodes[j], root.edge=0)$tip.label) >= min.taxa) {
candidate.combination <- c(candidate.combination, candidate.nodes[j])
}
}
if ((length(candidate.combination) >= min.clades) & (length(candidate.combination) <= max.clades)) {
l <- l + 1
combinations[[l]] <- candidate.combination
}
}
}
combinations <- combinations[!duplicated(combinations)]
return(combinations)
}
## this function determines all members of each subclade, including those not in the tree; it uses a list of taxon proxies, and checks these proxies against the actual taxa in the subclade; it has several options for returning these taxa
# as input, it takes phylist (a list of subclades, each a phylo object or treedata object), taxon.proxies (a list containing a vector of proxies for each taxon), and to_return (a switching variable, which allows the user to select whether to return the missing taxa ("missing"), all taxa ("full"), or a list of the included and missing taxa ("split"))
# it returns a list containing vectors with the set of taxa specified by to_return
subclades.fulltaxlist <- function(phylist, taxon.proxies, to_return="full") {
subclades.taxa_to_include <- list()
for (i in 1:length(phylist)) {
subclades.taxa_to_include.temp <- character()
for (j in 1:length(taxon.proxies)) { # loop over list of taxa
# if all proxies are included in the subclade, add the current taxon to the list of included taxa
if (all(taxon.proxies[[j]] %in% phylist[[i]]$tip.label) | all(taxon.proxies[[j]] %in% phylist[[i]]$phy$tip.label)) {
subclades.taxa_to_include.temp <- c(subclades.taxa_to_include.temp, names(taxon.proxies)[j])
}
}
subclades.taxa_to_include[[i]] <- switch(to_return,
missing = subclades.taxa_to_include.temp,
full = c(phylist[[i]]$tip.label, subclades.taxa_to_include.temp),
split = list(included=phylist[[i]]$tip.label, missing=subclades.taxa_to_include.temp))
}
names(subclades.taxa_to_include) <- names(phylist)
return(subclades.taxa_to_include)
}
# the function subclade.fulltaxlist() is the same as subclades.fulltaxlist(), but it acts only on a single subclade; this allows looping or applying over a list of treedata objects and adding the full membership to the treedata object
# as input, it takes phy (a subclades, either a phylo object or treedata object), taxon.proxies (a list containing a vector of proxies for each taxon), and to_return (a switching variable, which allows the user to select whether to return the missing taxa ("missing"), all taxa ("full"), or a list of the included and missing taxa ("split"))
# it returns a vector with the set of taxa specified by to_return
subclade.fulltaxlist <- function(phy, taxon.proxies, to_return="full") {
taxa_to_include.tmp <- character()
for (j in 1:length(taxon.proxies)) {
if (all(taxon.proxies[[j]] %in% phy$tip.label)) {
taxa_to_include.tmp <- c(taxa_to_include.tmp, names(taxon.proxies)[j])
}
}
taxa.to_include <- switch(to_return,
missing = taxa_to_include.tmp,
full = c(phy$tip.label, taxa_to_include.tmp),
split = list(included=phy$tip.label, missing=taxa_to_include.tmp))
return(taxa.to_include)
}
## the function getTreedata.subclades() extracts the backbone tree with subclades, and builds a treedata object including the subclade data
# as input, it takes phy (the full tree as a phylo object), subclade.combination (a vector containing the node numbers of the subclades), and subclade.data (the data for the subclades, as a matrix with node numbers as the rownames)
# it returns a treedata object, where the returned tree has only the subclades as tips, with the backbone of those nodes retained
getTreedata.subclades <- function(phy, subclade.combination, subclade.data) {
subclade.data.selected <- subset(subclade.data, row.names(subclade.data) %in% subclade.combination)
subclades.temp <- list()
subclades.edge.length.temp <- numeric()
# get the stem edge length for each subclade, and rename one tip in each subclade with teh subclade name
for (i in 1:length(subclade.combination)) {
subclades.temp[[i]] <- extract.clade(phy, node=as.numeric(subclade.combination[i]))
subclades.edge.length.temp[i] <- phy$edge.length[which(phy$edge[,2]==as.numeric(subclade.combination[i]))] # find the stem edge length for the subclade
phy$tip.label[phy$tip.label==subclades.temp[[i]]$tip.label[1]] <- subclade.combination[i] # rename one tip with the name of the subclade
}
# loop over subclades, dropping all tips but the one set to the subclade name above; this is done separately, as dropping tips could change the node numbers and make the step above not work properly
for (i in 1:length(subclade.combination)) {
phy <- drop.tip(phy, tip=subclades.temp[[i]]$tip.label[-1]) # drop the remaining tips from the subclade
phy$edge.length[which(phy$edge[,2]==which(phy$tip.label==subclade.combination[i]))] <- subclades.edge.length.temp[i] # finds the edge that has the subclade name as its descendant node, and changes the length
}
phy.treedata <- treedata(phy, data=subclade.data.selected, warnings=FALSE) # generate treedata object with backbone tree and subclade data
return(phy.treedata)
}
### generate full taxon lists (to match taxa not on tree with subclades)
## the function read.taxon.proxy.list() reads a file of taxon proxies and formats them for later use
# as input, it takes filename (the location of the file containing the taxon proxies, with each taxon name followed by all the proxy taxa that must be present for the focal taxon to be included)
# it returns a list of character vectors, where each list element is named with the focal taxon name, and the vector contains all the proxy taxa that must be present for the focal taxon to be included
read.taxon.proxy.list <- function(filename) {
taxon.proxy.list <- strsplit(scan(file=filename, what="character", sep="\n"), split=",") # read in file as a list of character vectors
names(taxon.proxy.list) <- sapply(taxon.proxy.list, function(x) x[1]) # set the first element in the character vector to be the name
taxon.proxy.list <- lapply(taxon.proxy.list, function(x) x[-1]) # remove that first element
for (i in names(taxon.proxy.list)) {
if (length(taxon.proxy.list[[i]]) == 0) taxon.proxy.list[[i]] <- NULL
} # this drops empty lists, so that the only ones retained are ones that actually have proxies
return(taxon.proxy.list)
}
## read in files of taxon proxies
picidae.RAxML.taxon.subclade.proxies <- list()
picidae.RAxML.taxon.subclade.proxies[["full_tree"]] <- read.taxon.proxy.list(filename="picidae_taxon_proxies_for_automated_subclade_analyses_full_tree.csv")
picidae.RAxML.taxon.subclade.proxies[["morph_tree"]][["all_inds"]] <- read.taxon.proxy.list(filename="picidae_taxon_proxies_for_automated_subclade_analyses_morph_tree_all_inds.csv")
picidae.RAxML.taxon.subclade.proxies[["morph_tree"]][["complete_ind_only"]] <- read.taxon.proxy.list(filename="picidae_taxon_proxies_for_automated_subclade_analyses_morph_tree_complete_ind_only.csv")
### extract subclades from full tree and morph trees
## extract subclades from full trees
picidae.RAxML.all.BEAST_calibrated.with_proxies.subclades <- extractSubclades.all(picidae.RAxML.all.BEAST_calibrated.with_proxies)
# extract subclades from morph trees
picidae.morph.log.fully_reduced.treedata.subclades <- list()
for (i in c("all_inds", "complete_ind_only")) {
picidae.morph.log.fully_reduced.treedata.subclades[[i]] <- extractSubclades.all(picidae.morph.log.fully_reduced.treedata[[i]]$phy)
}
rm(i)
### generate treedata-like objects for each subclade, for the data variants I'm using
## combine all the data into a treedata-like object that has a bunch of different sets of data for each subclade, for picidae
picidae.morph.log.fully_reduced.subclades.treedata <- list()
for (i in names(picidae.morph.log.fully_reduced.treedata.subclades)) { # loop over individual inclusion
for (j in names(picidae.morph.log.fully_reduced.treedata.subclades[[i]])) { # loop over subclades
picidae.morph.log.fully_reduced.subclades.treedata[[i]][[j]] <- list()
picidae.morph.log.fully_reduced.subclades.treedata[[i]][[j]][["phy"]] <- picidae.morph.log.fully_reduced.treedata.subclades[[i]][[j]]
picidae.morph.log.fully_reduced.subclades.treedata[[i]][[j]][["missing_taxa"]] <- subclade.fulltaxlist(phy=picidae.morph.log.fully_reduced.subclades.treedata[[i]][[j]][["phy"]], taxon.proxies=picidae.RAxML.taxon.subclade.proxies[["morph_tree"]][[i]], to_return="missing")
picidae.morph.log.fully_reduced.subclades.treedata[[i]][[j]][["missing_count"]] <- length(picidae.morph.log.fully_reduced.subclades.treedata[[i]][[j]][["missing_taxa"]])
picidae.morph.log.fully_reduced.subclades.treedata[[i]][[j]][["node.full_tree"]] <- getMRCA(phy=picidae.RAxML.all.BEAST_calibrated.with_proxies, tip=picidae.morph.log.fully_reduced.subclades.treedata[[i]][[j]][["phy"]]$tip.label)
picidae.morph.log.fully_reduced.subclades.treedata[[i]][[j]][["phy.full_tree"]] <- extract.clade(phy=picidae.RAxML.all.BEAST_calibrated.with_proxies, node=picidae.morph.log.fully_reduced.subclades.treedata[[i]][[j]][["node.full_tree"]])
picidae.morph.log.fully_reduced.subclades.treedata[[i]][[j]][["missing_taxa.full_tree"]] <- subclade.fulltaxlist(phy=picidae.morph.log.fully_reduced.subclades.treedata[[i]][[j]][["phy.full_tree"]], taxon.proxies=picidae.RAxML.taxon.subclade.proxies[["full_tree"]], to_return="missing")
picidae.morph.log.fully_reduced.subclades.treedata[[i]][[j]][["missing_count.full_tree"]] <- length(picidae.morph.log.fully_reduced.subclades.treedata[[i]][[j]][["missing_taxa.full_tree"]])
picidae.morph.log.fully_reduced.subclades.treedata[[i]][[j]][["geomean"]] <- picidae.morph.log.fully_reduced.geomean[[i]][[j]][picidae.morph.log.fully_reduced.subclades.treedata[[i]][[j]][["phy"]]$tip.label] # pull in the geomean data
picidae.morph.log.fully_reduced.subclades.treedata[[i]][[j]][["phyl_pca"]] <- picidae.morph.log.fully_reduced.phyl_pca[[i]][[j]]$pca$S[picidae.morph.log.fully_reduced.subclades.treedata[[i]][[j]][["phy"]]$tip.label,] # pull in the unscaled PCA-rotated data
picidae.morph.log.fully_reduced.subclades.treedata[[i]][[j]][["geomean_scaled.phyl_pca"]] <- picidae.morph.log.fully_reduced.geomean_scaled.phyl_pca[[i]][[j]]$pca$S[picidae.morph.log.fully_reduced.subclades.treedata[[i]][[j]][["phy"]]$tip.label,] # pull in the geomean-scaled PCA-rotated data
picidae.morph.log.fully_reduced.subclades.treedata[[i]][[j]][["overlaps.scaled"]] <- picidae.summed_overlaps.shp.BirdLife.UnaryUnion.buffer0[["mytax"]][["migratory"]][["overlaps.scaled"]][c(picidae.morph.log.fully_reduced.subclades.treedata[[i]][[j]][["phy"]]$tip.label, picidae.morph.log.fully_reduced.subclades.treedata[[i]][[j]][["missing_taxa"]])]
# get overlaps scaled by focal taxon range and similarity in geomean, unscaled PCA, and geomean-scaled PCA
picidae.morph.log.fully_reduced.subclades.treedata[[i]][[j]][["overlaps.euclidean_scaled"]] <- list()
for (q in c("geomean", "phyl_pca", "geomean_scaled.phyl_pca")) {
picidae.morph.log.fully_reduced.subclades.treedata[[i]][[j]][["overlaps.euclidean_scaled"]][[q]] <- picidae.summed_overlaps.shp.BirdLife.UnaryUnion.buffer0.euclidean_scaled[["migratory"]][[q]][[i]][[j]][["inc_no_phylo"]][c(picidae.morph.log.fully_reduced.subclades.treedata[[i]][[j]][["phy"]]$tip.label, picidae.morph.log.fully_reduced.subclades.treedata[[i]][[j]][["missing_taxa"]])]
}
}
}
rm(i,j,q)
### fit models of diversification and morphological evolution to the entire data set
## fit diversification models to full tree
picidae.divrate.models <- list()
picidae.divrate.models[["full_tree"]] <- list()
picidae.divrate.models[["full_tree"]][["constant"]] <- try(bd_ML(branching.times(picidae.RAxML.all.BEAST_calibrated.with_proxies), missnumspec=237-length(picidae.RAxML.all.BEAST_calibrated.with_proxies$tip.label), tdmodel=0))
picidae.divrate.models[["full_tree"]][["time_dependent"]] <- try(bd_ML(branching.times(picidae.RAxML.all.BEAST_calibrated.with_proxies), missnumspec=237-length(picidae.RAxML.all.BEAST_calibrated.with_proxies$tip.label), tdmodel=1, idparsopt=1:3, initparsopt=c(0.1, 0.05, 0.1)))
picidae.divrate.models[["full_tree"]][["diversity_dependent"]] <- try(dd_ML(branching.times(picidae.RAxML.all.BEAST_calibrated.with_proxies), missnumspec=237-length(picidae.RAxML.all.BEAST_calibrated.with_proxies$tip.label), ddmodel=1))
## calculate AICc for divrate models of full tree
for (i in names(picidae.divrate.models[["full_tree"]])) {
picidae.divrate.models[["full_tree"]][[i]][["AICc"]] <- (-2 * picidae.divrate.models[["full_tree"]][[i]]$loglik) + (2 * picidae.divrate.models[["full_tree"]][[i]]$df) + (((2 * picidae.divrate.models[["full_tree"]][[i]]$df) * (picidae.divrate.models[["full_tree"]][[i]]$df + 1)) / (picidae.RAxML.all.BEAST_calibrated.with_proxies$Nnode - picidae.divrate.models[["full_tree"]][[i]]$df - 1))
}
## fit diversification models to morph tree
picidae.divrate.models[["morph_tree"]] <- list()
picidae.divrate.models[["morph_tree"]][["constant"]] <- try(bd_ML(branching.times(picidae.morph.log.fully_reduced.treedata[["all_inds"]]$phy), missnumspec=237-length(picidae.morph.log.fully_reduced.treedata[["all_inds"]]$phy$tip.label), tdmodel=0))
picidae.divrate.models[["morph_tree"]][["time_dependent"]] <- try(bd_ML(branching.times(picidae.morph.log.fully_reduced.treedata[["all_inds"]]$phy), missnumspec=237-length(picidae.morph.log.fully_reduced.treedata[["all_inds"]]$phy$tip.label), tdmodel=1, idparsopt=1:3, initparsopt=c(0.1, 0.05, 0.1)))
picidae.divrate.models[["morph_tree"]][["diversity_dependent"]] <- try(dd_ML(branching.times(picidae.morph.log.fully_reduced.treedata[["all_inds"]]$phy), missnumspec=237-length(picidae.morph.log.fully_reduced.treedata[["all_inds"]]$phy$tip.label), ddmodel=1))
## calculate AICc for divrate models of morph tree
for (i in names(picidae.divrate.models[["morph_tree"]])) {
picidae.divrate.models[["morph_tree"]][[i]][["AICc"]] <- (-2 * picidae.divrate.models[["morph_tree"]][[i]]$loglik) + (2 * picidae.divrate.models[["morph_tree"]][[i]]$df) + (((2 * picidae.divrate.models[["morph_tree"]][[i]]$df) * (picidae.divrate.models[["morph_tree"]][[i]]$df + 1)) / (picidae.morph.log.fully_reduced.treedata[["all_inds"]]$phy$Nnode - picidae.divrate.models[["morph_tree"]][[i]]$df - 1))
}
## summarize divrate model results with aicc
for (i in names(picidae.divrate.models)) {
for (q in names(picidae.divrate.models[[i]])) {
cat(i, q, picidae.divrate.models[[i]][[q]]$AICc, "\n", sep=" ")
}
}
rm(i,q)
## fit morphological evolution models to morph tree with geomean, phyl_pca, and geomean_scaled.phyl_pca (with the same models I used below)
picidae.morphrate.models <- list()
for (i in c("all_inds", "complete_ind_only")) {
cat("\nStarting model fitting for", i, "\n", sep=" ")
# for geomean
cat("\nStarting geomean models.\n")
for (q in c("BM","OU","trend","EB")) {
cat("Starting ", q, " model\n", sep="")
picidae.morphrate.models[[i]][["geomean"]][[q]] <- fitContinuous(phy=picidae.morph.log.fully_reduced.treedata[[i]]$phy, dat=picidae.morph.log.fully_reduced.geomean[[i]][picidae.morph.log.fully_reduced.treedata[[i]]$phy$tip.label], model=q)
}
# for phyl_pca
cat("\nStarting phyl_pca models.\n")
for (q in c("BM","OU","trend","EB")) {
cat("Starting ", q, " model\n", sep="")
picidae.morphrate.models[[i]][["phyl_pca"]][[q]] <- fitContinuous(phy=picidae.morph.log.fully_reduced.treedata[[i]]$phy, dat=picidae.morph.log.fully_reduced.phyl_pca[[i]]$pca$S, model=q)
}
# for geomean_scaled.phyl_pca
cat("\nStarting geomean_scaled phyl_pca models.\n")
for (q in c("BM","OU","trend","EB")) {
cat("Starting ", q, " model\n", sep="")
picidae.morphrate.models[[i]][["geomean_scaled.phyl_pca"]][[q]] <- fitContinuous(phy=picidae.morph.log.fully_reduced.treedata[[i]]$phy, dat=picidae.morph.log.fully_reduced.geomean_scaled.phyl_pca[[i]]$pca$S, model=q)
}
}
# summarize morphological evolution model fits (with AICc)
for (i in names(picidae.morphrate.models)) {
for (q in names(picidae.morphrate.models[[i]])) {
for (r in names(picidae.morphrate.models[[i]][[q]])) {
if ("gfit" %in% class(picidae.morphrate.models[[i]][[q]][[r]])) {
cat(i, q, r, picidae.morphrate.models[[i]][[q]][[r]]$opt$aicc, "\n", sep=" ")
} else if ("gfits" %in% class(picidae.morphrate.models[[i]][[q]][[r]])) {
cat(i, q, r, picidae.morphrate.models[[i]][[q]][[r]][[1]]$opt$aicc, "\n", sep=" ")
}
}
}
}
rm(i,q,r)
### calculate subclade metrics
## the function calcMetrics.subclades() calculates a huge range of metrics for a list of subclades, including fitting models of diversification and trait evolution to the subclade
# as input, it takes subclades.treedata (a list of treedata-like objects, each containing a phy and other data objects for a single subclade), BAMM_divrates (the subclade average diversification rates from BAMM), BAMM_morphrates (the subclade average trait evolution rates from BAMM), metrics (a character vector containing the metrics to calculate), return_format (format of object to be returned; can be "matrix" or "list"), and quiet (boolean to output status to console)
# it returns either a matrix or list of metrics by subclade
calcMetrics.subclades <- function(subclades.treedata, BAMM_divrates=NULL, BAMM_morphrates=NULL, metrics=c("ntaxa", "ntaxa.on_morph_tree", "total_div", "crown_age", "divrate.ms.e10", "divrate.ms.e50", "divrate.ms.e90", "divrate.ML.constant.rate", "divrate.ML.constant.AICc", "divrate.ML.constant.AIC", "divrate.ML.time_dependent.rate", "divrate.ML.time_dependent.lambda1", "divrate.ML.time_dependent.mu1", "divrate.ML.time_dependent.AICc", "divrate.ML.time_dependent.AIC", "divrate.ML.diversity_dependent.rate", "divrate.ML.diversity_dependent.K", "divrate.ML.diversity_dependent.AICc", "divrate.ML.diversity_dependent.AIC", "divrate.BAMM", "divrate.BAMM.morph_tree", "gamma", "morphrate.geomean.BM.sigsq", "morphrate.geomean.BM.AICc", "morphrate.geomean.BM.AIC", "morphrate.geomean.OU.sigsq", "morphrate.geomean.OU.alpha", "morphrate.geomean.OU.AICc", "morphrate.geomean.OU.AIC", "morphrate.geomean.trend.slope", "morphrate.geomean.trend.sigsq", "morphrate.geomean.trend.AICc", "morphrate.geomean.trend.AIC", "morphrate.geomean.EB.alpha", "morphrate.geomean.EB.sigsq", "morphrate.geomean.EB.AICc", "morphrate.geomean.EB.AIC", "morphrate.geomean.delta.delta", "morphrate.geomean.delta.sigsq", "morphrate.geomean.delta.AICc", "morphrate.geomean.delta.AIC", "morphrate.geomean.BAMM", "morphrate.phyl_pca.BM.sigsq", "morphrate.phyl_pca.PC1.BM.AICc", "morphrate.phyl_pca.PC1.BM.AIC", "morphrate.phyl_pca.PC1.OU.sigsq", "morphrate.phyl_pca.PC1.OU.alpha", "morphrate.phyl_pca.PC1.OU.AICc", "morphrate.phyl_pca.PC1.OU.AIC", "morphrate.phyl_pca.PC1.trend.slope", "morphrate.phyl_pca.PC1.trend.sigsq", "morphrate.phyl_pca.PC1.trend.AICc", "morphrate.phyl_pca.PC1.trend.AIC", "morphrate.phyl_pca.PC1.EB.alpha", "morphrate.phyl_pca.PC1.EB.sigsq", "morphrate.phyl_pca.PC1.EB.AICc", "morphrate.phyl_pca.PC1.EB.AIC", "morphrate.phyl_pca.PC1.delta.delta", "morphrate.phyl_pca.PC1.delta.sigsq", "morphrate.phyl_pca.PC1.delta.AICc", "morphrate.phyl_pca.PC1.delta.AIC", "morphrate.phyl_pca.PC13.BAMM", "morphrate.geomean_scaled.phyl_pca.BM.sigsq", "morphrate.geomean_scaled.phyl_pca.PC1.BM.AICc", "morphrate.geomean_scaled.phyl_pca.PC1.BM.AIC", "morphrate.geomean_scaled.phyl_pca.PC1.OU.sigsq", "morphrate.geomean_scaled.phyl_pca.PC1.OU.alpha", "morphrate.geomean_scaled.phyl_pca.PC1.OU.AICc", "morphrate.geomean_scaled.phyl_pca.PC1.OU.AIC", "morphrate.geomean_scaled.phyl_pca.PC1.trend.slope", "morphrate.geomean_scaled.phyl_pca.PC1.trend.sigsq", "morphrate.geomean_scaled.phyl_pca.PC1.trend.AICc", "morphrate.geomean_scaled.phyl_pca.PC1.trend.AIC", "morphrate.geomean_scaled.phyl_pca.PC1.EB.alpha", "morphrate.geomean_scaled.phyl_pca.PC1.EB.sigsq", "morphrate.geomean_scaled.phyl_pca.PC1.EB.AICc", "morphrate.geomean_scaled.phyl_pca.PC1.EB.AIC", "morphrate.geomean_scaled.phyl_pca.PC1.delta.delta", "morphrate.geomean_scaled.phyl_pca.PC1.delta.sigsq", "morphrate.geomean_scaled.phyl_pca.PC1.delta.AICc", "morphrate.geomean_scaled.phyl_pca.PC1.delta.AIC", "morphrate.geomean_scaled.phyl_pca.PC13.BAMM", "avg_overlaps.rangesize_scaled", "avg_overlaps.euclidean_scaled.geomean", "avg_overlaps.euclidean_scaled.phyl_pca", "avg_overlaps.euclidean_scaled.geomean_scaled.phyl_pca"), return_format="matrix", quiet=TRUE) {
# build a list of vectors to store the various metrics (list above is NOT complete)
# loop over subclades.treedata, which is a list of treedata-like objects, each with a phy, and a bunch of different data
# calculate metrics for each subclade, and store them in the vectors
# reformat the list of vectors if necessary (e.g. to matrix)
# return the reformatted subclade metrics
require(geiger)
require(laser)
for (metric in metrics) { # set up vectors to store subclade data in
assign(metric, value=numeric())
if (!quiet) print(metric)
}
for (i in names(subclades.treedata)) { # loop over subclades, calculating metrics
cat("\nStarting clade ", i, ", ", which(names(subclades.treedata)==i), " of ", length(subclades.treedata), " total subclades.\n\n", sep="")
## diversification and tree-shape stuff
cat("Starting diversification analyses.\n")
# calculate total number of taxa
if ("ntaxa" %in% metrics) ntaxa[[i]] <- length(subclades.treedata[[i]]$phy$tip.label) + subclades.treedata[[i]]$missing_count
if ("ntaxa.on_morph_tree" %in% metrics) ntaxa.on_morph_tree[[i]] <- length(subclades.treedata[[i]]$phy$tip.label)
# calculate total diversification
if ("total_div" %in% metrics) total_div[[i]] <- log(length(subclades.treedata[[i]]$phy$tip.label) + subclades.treedata[[i]]$missing_count)
# calculate clade age
if ("crown_age" %in% metrics) crown_age[[i]] <- max(node.depth.edgelength(subclades.treedata[[i]]$phy))
# calculate Magallon-Sanderson diversification rates
if (length(intersect(c("divrate.ms.e10","divrate.ms.e50","divrate.ms.e90"), metrics)) > 0) cat("Calculating Magallon-Sanderson diversification rates.\n")
if ("divrate.ms.e10" %in% metrics) divrate.ms.e10[[i]] <- geiger::bd.ms(phy=subclades.treedata[[i]]$phy.full_tree, missing=subclades.treedata[[i]]$missing_count.full_tree, crown=TRUE, epsilon=0.10) # calculate Magallon-Sanderson diversification rate with extinction fraction 0.10
if ("divrate.ms.e50" %in% metrics) divrate.ms.e50[[i]] <- geiger::bd.ms(phy=subclades.treedata[[i]]$phy.full_tree, missing=subclades.treedata[[i]]$missing_count.full_tree, crown=TRUE, epsilon=0.50) # calculate Magallon-Sanderson diversification rate with extinction fraction 0.50
if ("divrate.ms.e90" %in% metrics) divrate.ms.e90[[i]] <- geiger::bd.ms(phy=subclades.treedata[[i]]$phy.full_tree, missing=subclades.treedata[[i]]$missing_count.full_tree, crown=TRUE, epsilon=0.90) # calculate Magallon-Sanderson diversification rate with extinction fraction 0.90
# calculate diversification rate using laser
if ("divrate.laser" %in% metrics) {
cat("Calculating laser diversification rate.\n")
divrate.laser[[i]] <- laser::bd(subclades.treedata[[i]]$phy.full_tree)$r # get diversification rate from laser model fitting
}
# fit constant-rate model, and return diversification rate (lambda-mu) and/or AICc
if (length(intersect(c("divrate.ML.constant.rate","divrate.ML.constant.AICc", "divrate.ML.constant.AIC"), metrics)) > 1) {
cat("Fitting constant-rate diversification model.\n")
sink("/dev/null")
divmodel.tmp <- try(bd_ML(branching.times(subclades.treedata[[i]]$phy.full_tree), missnumspec=subclades.treedata[[i]]$missing_count.full_tree, tdmodel=0)) # fit a constant-rate model
sink()
if (class(divmodel.tmp) == "try-error") {
if ("divrate.ML.constant.rate" %in% metrics) divrate.ML.constant.rate[[i]] <- NA
if ("divrate.ML.constant.AICc" %in% metrics) divrate.ML.constant.AICc[[i]] <- NA
if ("divrate.ML.constant.AIC" %in% metrics) divrate.ML.constant.AIC[[i]] <- NA
} else {
if ("divrate.ML.constant.rate" %in% metrics) divrate.ML.constant.rate[[i]] <- with(divmodel.tmp, lambda0-mu0) # extract diversification rate (lambda - mu) from the constant-rate model
if ("divrate.ML.constant.AICc" %in% metrics) divrate.ML.constant.AICc[[i]] <- (-2 * divmodel.tmp$loglik) + (2 * divmodel.tmp$df) + (((2 * divmodel.tmp$df) * (divmodel.tmp$df + 1)) / (subclades.treedata[[i]]$phy$Nnode - divmodel.tmp$df - 1)) # calculate AICc for the constant-rate model
if ("divrate.ML.constant.AIC" %in% metrics) divrate.ML.constant.AIC[[i]] <- (-2 * divmodel.tmp$loglik) + (2 * divmodel.tmp$df) # calculate AIC for the constant-rate model
}
rm(divmodel.tmp) # remove the temporary model
}
# fit time-dependent-rate model, and return diversification rate (lambda-mu), lambda1, mu1, and/or AICc
if (length(intersect(c("divrate.ML.time_dependent.rate", "divrate.ML.time_dependent.lambda1", "divrate.ML.time_dependent.mu1", "divrate.ML.time_dependent.AICc", "divrate.ML.time_dependent.AIC"), metrics)) > 1) {
cat("Fitting time-dependent diversification model.\n")
sink("/dev/null")
divmodel.tmp <- try(bd_ML(branching.times(subclades.treedata[[i]]$phy.full_tree), missnumspec=subclades.treedata[[i]]$missing_count.full_tree, tdmodel=1, idparsopt=1:3, initparsopt=c(0.1, 0.05, 0.1))) # fit a time-dependent-rate model
sink()
if (class(divmodel.tmp) == "try-error") {
if ("divrate.ML.time_dependent.rate" %in% metrics) divrate.ML.time_dependent.rate[[i]] <- NA
if ("divrate.ML.time_dependent.lambda1" %in% metrics) divrate.ML.time_dependent.lambda1[[i]] <- NA
if ("divrate.ML.time_dependent.mu1" %in% metrics) divrate.ML.time_dependent.mu1[[i]] <- NA
if ("divrate.ML.time_dependent.AICc" %in% metrics) divrate.ML.time_dependent.AICc[[i]] <- NA
if ("divrate.ML.time_dependent.AIC" %in% metrics) divrate.ML.time_dependent.AIC[[i]] <- NA
} else {
if ("divrate.ML.time_dependent.rate" %in% metrics) divrate.ML.time_dependent.rate[[i]] <- with(divmodel.tmp, lambda0-mu0) # extract diversification rate (lambda - mu) from the time-dependent-rate model
if ("divrate.ML.time_dependent.lambda1" %in% metrics) divrate.ML.time_dependent.lambda1[[i]] <- with(divmodel.tmp, lambda1) # extract diversification rate (lambda - mu) from the time-dependent-rate model
if ("divrate.ML.time_dependent.mu1" %in% metrics) divrate.ML.time_dependent.mu1[[i]] <- with(divmodel.tmp, mu1) # extract diversification rate (lambda - mu) from the time-dependent-rate model
if ("divrate.ML.time_dependent.AICc" %in% metrics) divrate.ML.time_dependent.AICc[[i]] <- (-2 * divmodel.tmp$loglik) + (2 * divmodel.tmp$df) + (((2 * divmodel.tmp$df) * (divmodel.tmp$df + 1)) / (subclades.treedata[[i]]$phy$Nnode - divmodel.tmp$df - 1)) # calculate AICc for the time-dependent-rate model
if ("divrate.ML.time_dependent.AIC" %in% metrics) divrate.ML.time_dependent.AIC[[i]] <- (-2 * divmodel.tmp$loglik) + (2 * divmodel.tmp$df) # calculate AIC for the time-dependent-rate model
}
rm(divmodel.tmp) # remove the temporary model
}
# fit diversity-dependent-rate model, and return diversification rate (lambda-mu), K, and/or AICc
if (length(intersect(c("divrate.ML.diversity_dependent.rate", "divrate.ML.diversity_dependent.K", "divrate.ML.diversity_dependent.AICc", "divrate.ML.diversity_dependent.AIC"), metrics)) > 1) {
cat("Fitting diversity-dependent diversification model.\n")
sink("/dev/null")
divmodel.tmp <- try(dd_ML(branching.times(subclades.treedata[[i]]$phy.full_tree), missnumspec=subclades.treedata[[i]]$missing_count.full_tree, ddmodel=1)) # fit a diversity-dependent-rate model, with exponential dependence in speciation rate
sink()
if (class(divmodel.tmp) == "try-error") {
if ("divrate.ML.diversity_dependent.rate" %in% metrics) divrate.ML.diversity_dependent.rate[[i]] <- NA
if ("divrate.ML.diversity_dependent.K" %in% metrics) divrate.ML.diversity_dependent.K[[i]] <- NA
if ("divrate.ML.diversity_dependent.AICc" %in% metrics) divrate.ML.diversity_dependent.AICc[[i]] <- NA
if ("divrate.ML.diversity_dependent.AIC" %in% metrics) divrate.ML.diversity_dependent.AIC[[i]] <- NA
} else {
if ("divrate.ML.diversity_dependent.rate" %in% metrics) divrate.ML.diversity_dependent.rate[[i]] <- with(divmodel.tmp, lambda-mu) # extract diversification rate (lambda - mu) from the diversity-dependent-rate model
if ("divrate.ML.diversity_dependent.K" %in% metrics) divrate.ML.diversity_dependent.K[[i]] <- with(divmodel.tmp, K) # extract diversification rate (lambda - mu) from the diversity-dependent-rate model
if ("divrate.ML.diversity_dependent.AICc" %in% metrics) divrate.ML.diversity_dependent.AICc[[i]] <- (-2 * divmodel.tmp$loglik) + (2 * divmodel.tmp$df) + (((2 * divmodel.tmp$df) * (divmodel.tmp$df + 1)) / (subclades.treedata[[i]]$phy$Nnode - divmodel.tmp$df - 1)) # calculate AICc for the time-dependent-rate model
if ("divrate.ML.diversity_dependent.AIC" %in% metrics) divrate.ML.diversity_dependent.AIC[[i]] <- (-2 * divmodel.tmp$loglik) + (2 * divmodel.tmp$df) # calculate AIC for the time-dependent-rate model
}
rm(divmodel.tmp) # remove the temporary model
}
# extract average diversification rate from BAMM
if ("divrate.BAMM" %in% metrics) divrate.BAMM[[i]] <- BAMM_divrates$full_tree[as.character(subclades.treedata[[i]]$node.full_tree)] # get average subclade diversification rate from BAMM
# extract average diversification rate from BAMM
if ("divrate.BAMM.morph_tree" %in% metrics) divrate.BAMM.morph_tree[[i]] <- BAMM_divrates$morph_tree[i] # get average subclade diversification rate from BAMM
# calculate gamma
if ("gamma" %in% metrics) gamma[[i]] <- gammaStat(subclades.treedata[[i]]$phy.full_tree)
## morphological evolution stuff; I use fitContinuous because the functions in the mvMORPH package are really slow with more than a few variables
cat("Starting morphological evolution analyses.\n")
# fit BM model to geomean data, and extract sigma-squared and/or AICc
if (length(intersect(c("morphrate.geomean.BM.sigsq", "morphrate.geomean.BM.AICc", "morphrate.geomean.BM.AIC"), metrics)) > 0) {
cat("Fitting BM model to geomean data.\n")
morphmodel.tmp <- try(fitContinuous(phy=subclades.treedata[[i]]$phy, dat=subclades.treedata[[i]]$geomean, model="BM"))
if ((class(morphmodel.tmp) == "try-error") | (min(abs(morphmodel.tmp$res[,"convergence"])) > 0)) {
if ("morphrate.geomean.BM.sigsq" %in% metrics) morphrate.geomean.BM.sigsq[[i]] <- NA
if ("morphrate.geomean.BM.AICc" %in% metrics) morphrate.geomean.BM.AICc[[i]] <- NA
if ("morphrate.geomean.BM.AIC" %in% metrics) morphrate.geomean.BM.AIC[[i]] <- NA
} else {
if ("morphrate.geomean.BM.sigsq" %in% metrics) morphrate.geomean.BM.sigsq[[i]] <- morphmodel.tmp$opt$sigsq
if ("morphrate.geomean.BM.AICc" %in% metrics) morphrate.geomean.BM.AICc[[i]] <- morphmodel.tmp$opt$aicc
if ("morphrate.geomean.BM.AIC" %in% metrics) morphrate.geomean.BM.AIC[[i]] <- morphmodel.tmp$opt$aic
}
rm(morphmodel.tmp)
}
# fit Ornstein-Uhlenbeck (OU) model to geomean data, and and extract sigsq, alpha (the stable attractor parameter) and/or AICc
if (length(intersect(c("morphrate.geomean.OU.alpha", "morphrate.geomean.OU.sigsq", "morphrate.geomean.OU.AICc", "morphrate.geomean.OU.AIC"), metrics)) > 0) {
cat("Fitting OU model to geomean data.\n")
morphmodel.tmp <- try(fitContinuous(phy=subclades.treedata[[i]]$phy, dat=subclades.treedata[[i]]$geomean, model="OU"))
if ((class(morphmodel.tmp) == "try-error") | (min(abs(morphmodel.tmp$res[,"convergence"])) > 0)) {
if ("morphrate.geomean.OU.alpha" %in% metrics) morphrate.geomean.OU.alpha[[i]] <- NA
if ("morphrate.geomean.OU.sigsq" %in% metrics) morphrate.geomean.OU.sigsq[[i]] <- NA
if ("morphrate.geomean.OU.AICc" %in% metrics) morphrate.geomean.OU.AICc[[i]] <- NA
if ("morphrate.geomean.OU.AIC" %in% metrics) morphrate.geomean.OU.AIC[[i]] <- NA
} else {
if ("morphrate.geomean.OU.alpha" %in% metrics) morphrate.geomean.OU.alpha[[i]] <- morphmodel.tmp$opt$alpha
if ("morphrate.geomean.OU.sigsq" %in% metrics) morphrate.geomean.OU.sigsq[[i]] <- morphmodel.tmp$opt$sigsq
if ("morphrate.geomean.OU.AICc" %in% metrics) morphrate.geomean.OU.AICc[[i]] <- morphmodel.tmp$opt$aicc
if ("morphrate.geomean.OU.AIC" %in% metrics) morphrate.geomean.OU.AIC[[i]] <- morphmodel.tmp$opt$aic
}
rm(morphmodel.tmp)
}
# fit trend model to geomean data, and extract sigma-squared and/or AICc
if (length(intersect(c("morphrate.geomean.trend.slope", "morphrate.geomean.trend.sigsq", "morphrate.geomean.trend.AICc", "morphrate.geomean.trend.AIC"), metrics)) > 0) {
cat("Fitting trend model to geomean data.\n")
morphmodel.tmp <- try(fitContinuous(phy=subclades.treedata[[i]]$phy, dat=subclades.treedata[[i]]$geomean, model="trend"))
if ((class(morphmodel.tmp) == "try-error") | (min(abs(morphmodel.tmp$res[,"convergence"])) > 0)) {
if ("morphrate.geomean.trend.slope" %in% metrics) morphrate.geomean.trend.slope[[i]] <- NA
if ("morphrate.geomean.trend.sigsq" %in% metrics) morphrate.geomean.trend.sigsq[[i]] <- NA
if ("morphrate.geomean.trend.AICc" %in% metrics) morphrate.geomean.trend.AICc[[i]] <- NA
if ("morphrate.geomean.trend.AIC" %in% metrics) morphrate.geomean.trend.AIC[[i]] <- NA
} else {
if ("morphrate.geomean.trend.slope" %in% metrics) morphrate.geomean.trend.slope[[i]] <- morphmodel.tmp$opt$slope
if ("morphrate.geomean.trend.sigsq" %in% metrics) morphrate.geomean.trend.sigsq[[i]] <- morphmodel.tmp$opt$sigsq
if ("morphrate.geomean.trend.AICc" %in% metrics) morphrate.geomean.trend.AICc[[i]] <- morphmodel.tmp$opt$aicc
if ("morphrate.geomean.trend.AIC" %in% metrics) morphrate.geomean.trend.AIC[[i]] <- morphmodel.tmp$opt$aic
}
rm(morphmodel.tmp)
}
# fit early burst (EB) model to geomean data, and extract alpha and/or AICc
if (length(intersect(c("morphrate.geomean.EB.alpha", "morphrate.geomean.EB.sigsq", "morphrate.geomean.EB.AICc", "morphrate.geomean.EB.AIC"), metrics)) > 0) {
cat("Fitting EB model to geomean data.\n")
morphmodel.tmp <- try(fitContinuous(phy=subclades.treedata[[i]]$phy, dat=subclades.treedata[[i]]$geomean, model="EB"))
if ((class(morphmodel.tmp) == "try-error") | (min(abs(morphmodel.tmp$res[,"convergence"])) > 0)) {
if ("morphrate.geomean.EB.alpha" %in% metrics) morphrate.geomean.EB.alpha[[i]] <- NA
if ("morphrate.geomean.EB.sigsq" %in% metrics) morphrate.geomean.EB.sigsq[[i]] <- NA
if ("morphrate.geomean.EB.AICc" %in% metrics) morphrate.geomean.EB.AICc[[i]] <- NA
if ("morphrate.geomean.EB.AIC" %in% metrics) morphrate.geomean.EB.AIC[[i]] <- NA
} else {
if ("morphrate.geomean.EB.alpha" %in% metrics) morphrate.geomean.EB.alpha[[i]] <- morphmodel.tmp$opt$a
if ("morphrate.geomean.EB.sigsq" %in% metrics) morphrate.geomean.EB.sigsq[[i]] <- morphmodel.tmp$opt$sigsq
if ("morphrate.geomean.EB.AICc" %in% metrics) morphrate.geomean.EB.AICc[[i]] <- morphmodel.tmp$opt$aicc
if ("morphrate.geomean.EB.AIC" %in% metrics) morphrate.geomean.EB.AIC[[i]] <- morphmodel.tmp$opt$aic
}
rm(morphmodel.tmp)
}
# fit delta model to geomean data, and extract delta and/or AICc
if (length(intersect(c("morphrate.geomean.delta.delta", "morphrate.geomean.delta.sigsq", "morphrate.geomean.delta.AICc", "morphrate.geomean.delta.AIC"), metrics)) > 0) {
cat("Fitting delta model to geomean data.\n")
morphmodel.tmp <- try(fitContinuous(phy=subclades.treedata[[i]]$phy, dat=subclades.treedata[[i]]$geomean, model="delta"))
if ((class(morphmodel.tmp) == "try-error") | (min(abs(morphmodel.tmp$res[,"convergence"])) > 0)) {
if ("morphrate.geomean.delta.delta" %in% metrics) morphrate.geomean.delta.delta[[i]] <- NA
if ("morphrate.geomean.delta.sigsq" %in% metrics) morphrate.geomean.delta.sigsq[[i]] <- NA
if ("morphrate.geomean.delta.AICc" %in% metrics) morphrate.geomean.delta.AICc[[i]] <- NA
if ("morphrate.geomean.delta.AIC" %in% metrics) morphrate.geomean.delta.AIC[[i]] <- NA
} else {
if ("morphrate.geomean.delta.delta" %in% metrics) morphrate.geomean.delta.delta[[i]] <- morphmodel.tmp$opt$delta
if ("morphrate.geomean.delta.sigsq" %in% metrics) morphrate.geomean.delta.sigsq[[i]] <- morphmodel.tmp$opt$sigsq
if ("morphrate.geomean.delta.AICc" %in% metrics) morphrate.geomean.delta.AICc[[i]] <- morphmodel.tmp$opt$aicc
if ("morphrate.geomean.delta.AIC" %in% metrics) morphrate.geomean.delta.AIC[[i]] <- morphmodel.tmp$opt$aic
}
rm(morphmodel.tmp)
}
# extract average geomean morphological evolution rate from BAMM
if ("morphrate.geomean.BAMM" %in% metrics) morphrate.geomean.BAMM[[i]] <- BAMM_morphrates[[grep("geomean(?!_scaled)", names(BAMM_morphrates), value=TRUE, perl=TRUE)]][i]
# fit BM model to phyl_pca data, and extract sigma-squared and/or AICc
if (length(intersect(c("morphrate.phyl_pca.BM.sigsq", "morphrate.phyl_pca.PC1.BM.AICc", "morphrate.phyl_pca.PC1.BM.AIC"), metrics)) > 0) {
cat("Fitting BM model to phyl_pca data.\n")
morphmodel.tmp <- try(fitContinuous(phy=subclades.treedata[[i]]$phy, dat=subclades.treedata[[i]]$phyl_pca, model="BM"))
if ((class(morphmodel.tmp) == "try-error") | (min(abs(morphmodel.tmp[["PC1"]]$res[,"convergence"])) > 0)) {
if ("morphrate.phyl_pca.BM.sigsq" %in% metrics) morphrate.phyl_pca.BM.sigsq[[i]] <- NA
if ("morphrate.phyl_pca.PC1.BM.AICc" %in% metrics) morphrate.phyl_pca.PC1.BM.AICc[[i]] <- NA
if ("morphrate.phyl_pca.PC1.BM.AIC" %in% metrics) morphrate.phyl_pca.PC1.BM.AIC[[i]] <- NA
} else {
if ("morphrate.phyl_pca.BM.sigsq" %in% metrics) morphrate.phyl_pca.BM.sigsq[[i]] <- sum(sapply(morphmodel.tmp, function(x) x$opt$sigsq))
if ("morphrate.phyl_pca.PC1.BM.AICc" %in% metrics) morphrate.phyl_pca.PC1.BM.AICc[[i]] <- morphmodel.tmp[["PC1"]]$opt$aicc
if ("morphrate.phyl_pca.PC1.BM.AIC" %in% metrics) morphrate.phyl_pca.PC1.BM.AIC[[i]] <- morphmodel.tmp[["PC1"]]$opt$aic
}
rm(morphmodel.tmp)
}
# fit Ornstein-Uhlenbeck (OU) model to phyl_pca data, and extract sigsq, alpha (the stable attractor parameter) and/or AICc
if (length(intersect(c("morphrate.phyl_pca.PC1.OU.alpha", "morphrate.phyl_pca.PC1.OU.sigsq", "morphrate.phyl_pca.PC1.OU.AICc"), metrics)) > 0) {
cat("Fitting OU model to phyl_pca data.\n")
morphmodel.tmp <- try(fitContinuous(phy=subclades.treedata[[i]]$phy, dat=subclades.treedata[[i]]$phyl_pca[,"PC1"], model="OU"))
if ((class(morphmodel.tmp) == "try-error") | (min(abs(morphmodel.tmp$res[,"convergence"])) > 0)) {
if ("morphrate.phyl_pca.PC1.OU.alpha" %in% metrics) morphrate.phyl_pca.PC1.OU.alpha[[i]] <- NA
if ("morphrate.phyl_pca.PC1.OU.sigsq" %in% metrics) morphrate.phyl_pca.PC1.OU.sigsq[[i]] <- NA
if ("morphrate.phyl_pca.PC1.OU.AICc" %in% metrics) morphrate.phyl_pca.PC1.OU.AICc[[i]] <- NA
if ("morphrate.phyl_pca.PC1.OU.AIC" %in% metrics) morphrate.phyl_pca.PC1.OU.AIC[[i]] <- NA
} else {
if ("morphrate.phyl_pca.PC1.OU.alpha" %in% metrics) morphrate.phyl_pca.PC1.OU.alpha[[i]] <- morphmodel.tmp$opt$alpha
if ("morphrate.phyl_pca.PC1.OU.sigsq" %in% metrics) morphrate.phyl_pca.PC1.OU.sigsq[[i]] <- morphmodel.tmp$opt$sigsq
if ("morphrate.phyl_pca.PC1.OU.AICc" %in% metrics) morphrate.phyl_pca.PC1.OU.AICc[[i]] <- morphmodel.tmp$opt$aicc
if ("morphrate.phyl_pca.PC1.OU.AIC" %in% metrics) morphrate.phyl_pca.PC1.OU.AIC[[i]] <- morphmodel.tmp$opt$aic
}
rm(morphmodel.tmp)
}
# fit trend model to phyl_pca data, and extract slope and/or AICc
if (length(intersect(c("morphrate.phyl_pca.PC1.trend.slope", "morphrate.phyl_pca.PC1.trend.sigsq", "morphrate.phyl_pca.PC1.trend.AICc", "morphrate.phyl_pca.PC1.trend.AIC"), metrics)) > 0) {
cat("Fitting trend model to phyl_pca data.\n")
morphmodel.tmp <- try(fitContinuous(phy=subclades.treedata[[i]]$phy, dat=subclades.treedata[[i]]$phyl_pca[,"PC1"], model="trend"))
if ((class(morphmodel.tmp) == "try-error") | (min(abs(morphmodel.tmp$res[,"convergence"])) > 0)) {
if ("morphrate.phyl_pca.PC1.trend.slope" %in% metrics) morphrate.phyl_pca.PC1.trend.slope[[i]] <- NA
if ("morphrate.phyl_pca.PC1.trend.sigsq" %in% metrics) morphrate.phyl_pca.PC1.trend.sigsq[[i]] <- NA
if ("morphrate.phyl_pca.PC1.trend.AICc" %in% metrics) morphrate.phyl_pca.PC1.trend.AICc[[i]] <- NA
if ("morphrate.phyl_pca.PC1.trend.AIC" %in% metrics) morphrate.phyl_pca.PC1.trend.AIC[[i]] <- NA
} else {
if ("morphrate.phyl_pca.PC1.trend.slope" %in% metrics) morphrate.phyl_pca.PC1.trend.slope[[i]] <- morphmodel.tmp$opt$slope
if ("morphrate.phyl_pca.PC1.trend.sigsq" %in% metrics) morphrate.phyl_pca.PC1.trend.sigsq[[i]] <- morphmodel.tmp$opt$sigsq
if ("morphrate.phyl_pca.PC1.trend.AICc" %in% metrics) morphrate.phyl_pca.PC1.trend.AICc[[i]] <- morphmodel.tmp$opt$aicc
if ("morphrate.phyl_pca.PC1.trend.AIC" %in% metrics) morphrate.phyl_pca.PC1.trend.AIC[[i]] <- morphmodel.tmp$opt$aic
}
rm(morphmodel.tmp)
}
# fit early burst (EB) model to phyl_pca data, and extract alpha (the rate decline parameter) and/or AICc
if (length(intersect(c("morphrate.phyl_pca.PC1.EB.alpha", "morphrate.phyl_pca.PC1.EB.sigsq", "morphrate.phyl_pca.PC1.EB.AICc", "morphrate.phyl_pca.PC1.EB.AIC"), metrics)) > 0) {
cat("Fitting EB model to phyl_pca data.\n")
morphmodel.tmp <- try(fitContinuous(phy=subclades.treedata[[i]]$phy, dat=subclades.treedata[[i]]$phyl_pca[,"PC1"], model="EB"))
if ((class(morphmodel.tmp) == "try-error") | (min(abs(morphmodel.tmp$res[,"convergence"])) > 0)) {
if ("morphrate.phyl_pca.PC1.EB.alpha" %in% metrics) morphrate.phyl_pca.PC1.EB.alpha[[i]] <- NA
if ("morphrate.phyl_pca.PC1.EB.sigsq" %in% metrics) morphrate.phyl_pca.PC1.EB.sigsq[[i]] <- NA
if ("morphrate.phyl_pca.PC1.EB.AICc" %in% metrics) morphrate.phyl_pca.PC1.EB.AICc[[i]] <- NA
if ("morphrate.phyl_pca.PC1.EB.AIC" %in% metrics) morphrate.phyl_pca.PC1.EB.AIC[[i]] <- NA
} else {
if ("morphrate.phyl_pca.PC1.EB.alpha" %in% metrics) morphrate.phyl_pca.PC1.EB.alpha[[i]] <- morphmodel.tmp$opt$a
if ("morphrate.phyl_pca.PC1.EB.sigsq" %in% metrics) morphrate.phyl_pca.PC1.EB.sigsq[[i]] <- morphmodel.tmp$opt$sigsq
if ("morphrate.phyl_pca.PC1.EB.AICc" %in% metrics) morphrate.phyl_pca.PC1.EB.AICc[[i]] <- morphmodel.tmp$opt$aicc
if ("morphrate.phyl_pca.PC1.EB.AIC" %in% metrics) morphrate.phyl_pca.PC1.EB.AIC[[i]] <- morphmodel.tmp$opt$aic
}
rm(morphmodel.tmp)
}
# fit delta model to phyl_pca data, and extract alpha (the rate decline parameter) and/or AICc
if (length(intersect(c("morphrate.phyl_pca.PC1.delta.delta", "morphrate.phyl_pca.PC1.delta.sigsq", "morphrate.phyl_pca.PC1.delta.AICc", "morphrate.phyl_pca.PC1.delta.AIC"), metrics)) > 0) {
cat("Fitting delta model to phyl_pca data.\n")
morphmodel.tmp <- try(fitContinuous(phy=subclades.treedata[[i]]$phy, dat=subclades.treedata[[i]]$phyl_pca[,"PC1"], model="delta"))
if ((class(morphmodel.tmp) == "try-error") | (min(abs(morphmodel.tmp$res[,"convergence"])) > 0)) {
if ("morphrate.phyl_pca.PC1.delta.delta" %in% metrics) morphrate.phyl_pca.PC1.delta.delta[[i]] <- NA
if ("morphrate.phyl_pca.PC1.delta.sigsq" %in% metrics) morphrate.phyl_pca.PC1.delta.sigsq[[i]] <- NA
if ("morphrate.phyl_pca.PC1.delta.AICc" %in% metrics) morphrate.phyl_pca.PC1.delta.AICc[[i]] <- NA
if ("morphrate.phyl_pca.PC1.delta.AIC" %in% metrics) morphrate.phyl_pca.PC1.delta.AIC[[i]] <- NA
} else {
if ("morphrate.phyl_pca.PC1.delta.delta" %in% metrics) morphrate.phyl_pca.PC1.delta.delta[[i]] <- morphmodel.tmp$opt$delta
if ("morphrate.phyl_pca.PC1.delta.sigsq" %in% metrics) morphrate.phyl_pca.PC1.delta.sigsq[[i]] <- morphmodel.tmp$opt$sigsq
if ("morphrate.phyl_pca.PC1.delta.AICc" %in% metrics) morphrate.phyl_pca.PC1.delta.AICc[[i]] <- morphmodel.tmp$opt$aicc
if ("morphrate.phyl_pca.PC1.delta.AIC" %in% metrics) morphrate.phyl_pca.PC1.delta.AIC[[i]] <- morphmodel.tmp$opt$aic
}
rm(morphmodel.tmp)
}
# extract average phyl_pca morphological evolution rate from BAMM
if ("morphrate.phyl_pca.PC13.BAMM" %in% metrics) morphrate.phyl_pca.PC13.BAMM[[i]] <- BAMM_morphrates[[grep("(?<!scaled_)phyl_pca_PC1", names(BAMM_morphrates), value=TRUE, perl=TRUE)]][[i]]
# fit BM model to geomean_scaled.phyl_pca data, and extract sigma-squared and/or AICc
if (length(intersect(c("morphrate.geomean_scaled.phyl_pca.BM.sigsq", "morphrate.geomean_scaled.phyl_pca.PC1.BM.AICc", "morphrate.geomean_scaled.phyl_pca.PC1.BM.AIC"), metrics)) > 0) {
cat("Fitting BM model to geomean-scaled phyl_pca data.\n")
morphmodel.tmp <- try(fitContinuous(phy=subclades.treedata[[i]]$phy, dat=subclades.treedata[[i]]$geomean_scaled.phyl_pca, model="BM"))
if ((class(morphmodel.tmp) == "try-error") | (min(abs(morphmodel.tmp[["PC1"]]$res[,"convergence"])) > 0)) {
if ("morphrate.geomean_scaled.phyl_pca.BM.sigsq" %in% metrics) morphrate.geomean_scaled.phyl_pca.BM.sigsq[[i]] <- NA
if ("morphrate.geomean_scaled.phyl_pca.PC1.BM.AICc" %in% metrics) morphrate.geomean_scaled.phyl_pca.PC1.BM.AICc[[i]] <- NA
if ("morphrate.geomean_scaled.phyl_pca.PC1.BM.AIC" %in% metrics) morphrate.geomean_scaled.phyl_pca.PC1.BM.AIC[[i]] <- NA
} else {
if ("morphrate.geomean_scaled.phyl_pca.BM.sigsq" %in% metrics) morphrate.geomean_scaled.phyl_pca.BM.sigsq[[i]] <- sum(sapply(morphmodel.tmp, function(x) x$opt$sigsq))
if ("morphrate.geomean_scaled.phyl_pca.PC1.BM.AICc" %in% metrics) morphrate.geomean_scaled.phyl_pca.PC1.BM.AICc[[i]] <- morphmodel.tmp[["PC1"]]$opt$aicc
if ("morphrate.geomean_scaled.phyl_pca.PC1.BM.AIC" %in% metrics) morphrate.geomean_scaled.phyl_pca.PC1.BM.AIC[[i]] <- morphmodel.tmp[["PC1"]]$opt$aic
}
rm(morphmodel.tmp)
}
# fit Ornstein-Uhlenbeck (OU) model to geomean_scaled.phyl_pca data, and extract sigsq, alpha (the stable attractor parameter) and/or AICc
if (length(intersect(c("morphrate.geomean_scaled.phyl_pca.PC1.OU.alpha", "morphrate.geomean_scaled.phyl_pca.PC1.OU.sigsq", "morphrate.geomean_scaled.phyl_pca.PC1.OU.AICc", "morphrate.geomean_scaled.phyl_pca.PC1.OU.AIC"), metrics)) > 0) {
cat("Fitting OU model to geomean-scaled phyl_pca data.\n")
morphmodel.tmp <- try(fitContinuous(phy=subclades.treedata[[i]]$phy, dat=subclades.treedata[[i]]$geomean_scaled.phyl_pca[,"PC1"], model="OU"))
if ((class(morphmodel.tmp) == "try-error") | (min(abs(morphmodel.tmp$res[,"convergence"])) > 0)) {
if ("morphrate.geomean_scaled.phyl_pca.PC1.OU.alpha" %in% metrics) morphrate.geomean_scaled.phyl_pca.PC1.OU.alpha[[i]] <- NA
if ("morphrate.geomean_scaled.phyl_pca.PC1.OU.sigsq" %in% metrics) morphrate.geomean_scaled.phyl_pca.PC1.OU.sigsq[[i]] <- NA
if ("morphrate.geomean_scaled.phyl_pca.PC1.OU.AICc" %in% metrics) morphrate.geomean_scaled.phyl_pca.PC1.OU.AICc[[i]] <- NA
if ("morphrate.geomean_scaled.phyl_pca.PC1.OU.AIC" %in% metrics) morphrate.geomean_scaled.phyl_pca.PC1.OU.AIC[[i]] <- NA
} else {
if ("morphrate.geomean_scaled.phyl_pca.PC1.OU.alpha" %in% metrics) morphrate.geomean_scaled.phyl_pca.PC1.OU.alpha[[i]] <- morphmodel.tmp$opt$alpha
if ("morphrate.geomean_scaled.phyl_pca.PC1.OU.sigsq" %in% metrics) morphrate.geomean_scaled.phyl_pca.PC1.OU.sigsq[[i]] <- morphmodel.tmp$opt$sigsq
if ("morphrate.geomean_scaled.phyl_pca.PC1.OU.AICc" %in% metrics) morphrate.geomean_scaled.phyl_pca.PC1.OU.AICc[[i]] <- morphmodel.tmp$opt$aicc
if ("morphrate.geomean_scaled.phyl_pca.PC1.OU.AIC" %in% metrics) morphrate.geomean_scaled.phyl_pca.PC1.OU.AIC[[i]] <- morphmodel.tmp$opt$aic
}
rm(morphmodel.tmp)
}
# fit trend model to geomean_scaled.phyl_pca data, and extract slope and/or AICc
if (length(intersect(c("morphrate.geomean_scaled.phyl_pca.PC1.trend.slope", "morphrate.geomean_scaled.phyl_pca.PC1.trend.sigsq", "morphrate.geomean_scaled.phyl_pca.PC1.trend.AICc", "morphrate.geomean_scaled.phyl_pca.PC1.trend.AIC"), metrics)) > 0) {
cat("Fitting trend model to geomean-scaled phyl_pca data.\n")
morphmodel.tmp <- try(fitContinuous(phy=subclades.treedata[[i]]$phy, dat=subclades.treedata[[i]]$geomean_scaled.phyl_pca[,"PC1"], model="trend"))
if ((class(morphmodel.tmp) == "try-error") | (min(abs(morphmodel.tmp$res[,"convergence"])) > 0)) {
if ("morphrate.geomean_scaled.phyl_pca.PC1.trend.slope" %in% metrics) morphrate.geomean_scaled.phyl_pca.PC1.trend.slope[[i]] <- NA
if ("morphrate.geomean_scaled.phyl_pca.PC1.trend.sigsq" %in% metrics) morphrate.geomean_scaled.phyl_pca.PC1.trend.sigsq[[i]] <- NA
if ("morphrate.geomean_scaled.phyl_pca.PC1.trend.AICc" %in% metrics) morphrate.geomean_scaled.phyl_pca.PC1.trend.AICc[[i]] <- NA
if ("morphrate.geomean_scaled.phyl_pca.PC1.trend.AIC" %in% metrics) morphrate.geomean_scaled.phyl_pca.PC1.trend.AIC[[i]] <- NA
} else {
if ("morphrate.geomean_scaled.phyl_pca.PC1.trend.slope" %in% metrics) morphrate.geomean_scaled.phyl_pca.PC1.trend.slope[[i]] <- morphmodel.tmp$opt$slope
if ("morphrate.geomean_scaled.phyl_pca.PC1.trend.sigsq" %in% metrics) morphrate.geomean_scaled.phyl_pca.PC1.trend.sigsq[[i]] <- morphmodel.tmp$opt$sigsq
if ("morphrate.geomean_scaled.phyl_pca.PC1.trend.AICc" %in% metrics) morphrate.geomean_scaled.phyl_pca.PC1.trend.AICc[[i]] <- morphmodel.tmp$opt$aicc
if ("morphrate.geomean_scaled.phyl_pca.PC1.trend.AIC" %in% metrics) morphrate.geomean_scaled.phyl_pca.PC1.trend.AIC[[i]] <- morphmodel.tmp$opt$aic
}
rm(morphmodel.tmp)
}
# fit early burst (EB) model to geomean_scaled.phyl_pca data, and extract alpha (the rate decline parameter) and/or AICc
if (length(intersect(c("morphrate.geomean_scaled.phyl_pca.PC1.EB.alpha", "morphrate.geomean_scaled.phyl_pca.PC1.EB.sigsq", "morphrate.geomean_scaled.phyl_pca.PC1.EB.AICc", "morphrate.geomean_scaled.phyl_pca.PC1.EB.AIC"), metrics)) > 0) {
cat("Fitting EB model to geomean-scaled phyl_pca data.\n")
morphmodel.tmp <- try(fitContinuous(phy=subclades.treedata[[i]]$phy, dat=subclades.treedata[[i]]$geomean_scaled.phyl_pca[,"PC1"], model="EB"))
if ((class(morphmodel.tmp) == "try-error") | (min(abs(morphmodel.tmp$res[,"convergence"])) > 0)) {
if ("morphrate.geomean_scaled.phyl_pca.PC1.EB.alpha" %in% metrics) morphrate.geomean_scaled.phyl_pca.PC1.EB.alpha[[i]] <- NA
if ("morphrate.geomean_scaled.phyl_pca.PC1.EB.sigsq" %in% metrics) morphrate.geomean_scaled.phyl_pca.PC1.EB.sigsq[[i]] <- NA
if ("morphrate.geomean_scaled.phyl_pca.PC1.EB.AICc" %in% metrics) morphrate.geomean_scaled.phyl_pca.PC1.EB.AICc[[i]] <- NA
if ("morphrate.geomean_scaled.phyl_pca.PC1.EB.AIC" %in% metrics) morphrate.geomean_scaled.phyl_pca.PC1.EB.AIC[[i]] <- NA
} else {
if ("morphrate.geomean_scaled.phyl_pca.PC1.EB.alpha" %in% metrics) morphrate.geomean_scaled.phyl_pca.PC1.EB.alpha[[i]] <- morphmodel.tmp$opt$a
if ("morphrate.geomean_scaled.phyl_pca.PC1.EB.sigsq" %in% metrics) morphrate.geomean_scaled.phyl_pca.PC1.EB.sigsq[[i]] <- morphmodel.tmp$opt$sigsq
if ("morphrate.geomean_scaled.phyl_pca.PC1.EB.AICc" %in% metrics) morphrate.geomean_scaled.phyl_pca.PC1.EB.AICc[[i]] <- morphmodel.tmp$opt$aicc
if ("morphrate.geomean_scaled.phyl_pca.PC1.EB.AIC" %in% metrics) morphrate.geomean_scaled.phyl_pca.PC1.EB.AIC[[i]] <- morphmodel.tmp$opt$aic
}
rm(morphmodel.tmp)
}
# fit delta model to geomean_scaled.phyl_pca data, and extract alpha (the rate decline parameter) and/or AICc
if (length(intersect(c("morphrate.geomean_scaled.phyl_pca.PC1.delta.delta", "morphrate.geomean_scaled.phyl_pca.PC1.delta.sigsq", "morphrate.geomean_scaled.phyl_pca.PC1.delta.AICc", "morphrate.geomean_scaled.phyl_pca.PC1.delta.AIC"), metrics)) > 0) {
cat("Fitting delta model to geomean-scaled phyl_pca data.\n")
morphmodel.tmp <- try(fitContinuous(phy=subclades.treedata[[i]]$phy, dat=subclades.treedata[[i]]$geomean_scaled.phyl_pca[,"PC1"], model="delta"))
if ((class(morphmodel.tmp) == "try-error") | (min(abs(morphmodel.tmp$res[,"convergence"])) > 0)) {
if ("morphrate.geomean_scaled.phyl_pca.PC1.delta.delta" %in% metrics) morphrate.geomean_scaled.phyl_pca.PC1.delta.delta[[i]] <- NA
if ("morphrate.geomean_scaled.phyl_pca.PC1.delta.sigsq" %in% metrics) morphrate.geomean_scaled.phyl_pca.PC1.delta.sigsq[[i]] <- NA
if ("morphrate.geomean_scaled.phyl_pca.PC1.delta.AICc" %in% metrics) morphrate.geomean_scaled.phyl_pca.PC1.delta.AICc[[i]] <- NA
if ("morphrate.geomean_scaled.phyl_pca.PC1.delta.AIC" %in% metrics) morphrate.geomean_scaled.phyl_pca.PC1.delta.AIC[[i]] <- NA
} else {
if ("morphrate.geomean_scaled.phyl_pca.PC1.delta.delta" %in% metrics) morphrate.geomean_scaled.phyl_pca.PC1.delta.delta[[i]] <- morphmodel.tmp$opt$delta
if ("morphrate.geomean_scaled.phyl_pca.PC1.delta.sigsq" %in% metrics) morphrate.geomean_scaled.phyl_pca.PC1.delta.sigsq[[i]] <- morphmodel.tmp$opt$sigsq
if ("morphrate.geomean_scaled.phyl_pca.PC1.delta.AICc" %in% metrics) morphrate.geomean_scaled.phyl_pca.PC1.delta.AICc[[i]] <- morphmodel.tmp$opt$aicc
if ("morphrate.geomean_scaled.phyl_pca.PC1.delta.AIC" %in% metrics) morphrate.geomean_scaled.phyl_pca.PC1.delta.AIC[[i]] <- morphmodel.tmp$opt$aic
}
rm(morphmodel.tmp)
}
# extract average geomean_scaled.phyl_pca morphological evolution rate from BAMM
if ("morphrate.geomean_scaled.phyl_pca.PC13.BAMM" %in% metrics) morphrate.geomean_scaled.phyl_pca.PC13.BAMM[[i]] <- BAMM_morphrates[[grep("geomean_scaled_phyl_pca_PC1", names(BAMM_morphrates), value=TRUE)]][[i]]
## overlap metrics
cat("Starting overlap metrics.\n")
# calculate average of summed overlaps scaled by focal taxon range
if ("avg_overlaps.rangesize_scaled" %in% metrics) avg_overlaps.rangesize_scaled[[i]] <- mean(subclades.treedata[[i]]$overlaps.scaled)
if ("avg_overlaps.euclidean_scaled.geomean" %in% metrics) avg_overlaps.euclidean_scaled.geomean[[i]] <- mean(subclades.treedata[[i]]$overlaps.euclidean_scaled$geomean)
if ("avg_overlaps.euclidean_scaled.phyl_pca" %in% metrics) avg_overlaps.euclidean_scaled.phyl_pca[[i]] <- mean(subclades.treedata[[i]]$overlaps.euclidean_scaled$phyl_pca)
if ("avg_overlaps.euclidean_scaled.geomean_scaled.phyl_pca" %in% metrics) avg_overlaps.euclidean_scaled.geomean_scaled.phyl_pca[[i]] <- mean(subclades.treedata[[i]]$overlaps.euclidean_scaled$geomean_scaled.phyl_pca)
}
if (return_format == "matrix") {
subclade_data <- matrix(nrow=length(subclades.treedata), ncol=length(metrics), dimnames=list(names(subclades.treedata), metrics))
for (i in 1:length(metrics)) {
if (!quiet) cat(metrics[i], ": ", get(metrics[i]), "\n", sep="")
subclade_data[,i] <- get(metrics[i])
}
}
if (return_format == "list") {
subclade_data <- list()
for (metric in metrics) subclade_data[[metric]] <- get(metric)
}
return(subclade_data)
}
## loop over the list of subclades, calculating metrics for each
picidae.subclade.data <- list()
for (i in names(picidae.morph.log.fully_reduced.subclades.treedata)) { # loop over individual inclusion
picidae.subclade.data[[i]] <- calc.subclade.metrics(subclades.treedata=picidae.morph.log.fully_reduced.subclades.treedata[[i]], BAMM_divrates=picidae.BAMM.divrates_by_node, BAMM_morphrates=picidae.BAMM.morphrates_by_node)
}
rm(i)
## calculate delta_aicc for all models vs. basic models (e.g time-dependent and diversity-dependent vs. constant-rate diversification, OU and trend and EB vs. BM for all morph variables)
for (i in names(picidae.subclade.data)) { # loop over individual inclusion
for (m in grep("divrate(?!.ML.constant)[a-zA-Z0-9._]+AICc", colnames(picidae.subclade.data[[i]]), value=TRUE, perl=TRUE)) {
picidae.subclade.data[[i]] <- cbind(picidae.subclade.data[[i]], picidae.subclade.data[[i]][,m] - picidae.subclade.data[[i]][,"divrate.ML.constant.AICc"])
colnames(picidae.subclade.data[[i]])[ncol(picidae.subclade.data[[i]])] <- sub("AICc", "delta_AICc", m)
}
for (m in grep("morphrate[a-zA-Z0-9._]+(?<!BM.)AICc", colnames(picidae.subclade.data[[i]]), value=TRUE, perl=TRUE)) {
picidae.subclade.data[[i]] <- cbind(picidae.subclade.data[[i]], picidae.subclade.data[[i]][,m] - picidae.subclade.data[[i]][,sub("[a-zA-Z0-9]+(?=.AICc)", "BM", m, perl=TRUE)])
colnames(picidae.subclade.data[[i]])[ncol(picidae.subclade.data[[i]])] <- sub("AICc", "delta_AICc", m)
}
}
rm(i,m)
### generate subclade combinations for my subclade regressions
## generate subclade combinations using the random method (100 iterations), and one set using the sequential selection method; for each, set a minimum of 5 clades and 6 taxa per clade
picidae.subclade.combinations.6sp.random <- list()
picidae.subclade.combinations.6sp.sequential <- list()
for (i in names(picidae.morph.log.fully_reduced.subclades.treedata)) {
picidae.subclade.combinations.6sp.random[[i]] <- subcladeCombinations.random(phylist=picidae.morph.log.fully_reduced.subclades.treedata[[i]], ncombs=100, min.clades=5, min.taxa=6)
picidae.subclade.combinations.6sp.sequential[[i]] <- subcladeCombinations.sequential(phy=picidae.morph.log.fully_reduced.treedata[[i]]$phy, min.clades=5, min.taxa=6)
}
rm(i)
### generate backbone trees, subclade treedata objects, and fit models to subclade combinations
## general process
## loop over subclade combinations
# generate backbone tree for each subclade combination
# generate treedata object for each subclade combination (using the backbone tree and the subclade data)
# fit models to subclade data using pgls, with the treedata$phy and treedata$data
# save off the relevant bits from the models (slopes and intercepts, R^2)
# examine distributions of r_squared, p_values, slopes, etc.
## the function fit.subcladeModels.bivariate() fits regression models to subclade data, iterating over various combinations of subclades
# as input, it takes phy (the phylogenetic tree of taxa, as phylo object), subclade.combinations (a list of subclade combinations, each containing a vector of node numbers in phy), subclade.data (a matrix of subclade metrics, with rownames as the subclade numbers (the node numbers)), models (an optional character vector containing models to test, formatted as "var1_vs_var2"), models_filename (the name of an optional text file with models to test, with one model on each line, formatted as "var1_vs_var2"), return_format (format to return results in; can be "matrix" or "list"), model_fitting (method for fitting models; either "pgls" or "lm"), quiet.subclade.combinations (boolean to output to console when starting the next subclade combination), quiet.models (boolean to output to console when starting fitting the next model)
# it returns a matrix or list, with the columns of the matrix or the elements of the list containig the parameter values from the specified models fit to each subclade combination
fit.subcladeModels.bivariate <- function(phy, subclade.combinations, subclade.data, models=NULL, models_filename="Picidae_subclade_models_bivariate.txt", return_format="matrix", model_fitting="pgls", quiet.subclade.combinations=TRUE, quiet.models=TRUE) {
# if models not provided as argument, read them from file
if (is.null(models)) {
models <- read.table(file=models_filename, header=F, stringsAsFactors=F)[,1]
}
# create an empty set of vectors for storing parameter values from each model
for (model in models) {
for (measure in c("r_squared","p_value","slope","intercept")) {
assign(paste(model, measure, sep="."), value=numeric())
}
}
for (i in 1:length(subclade.combinations)) { # loop over subclade combinations
# generate backbone tree and treedata object
if (!quiet.subclade.combinations) cat("\nStarting model fitting for combination ", i, " of ", length(subclade.combinations), ".\n", sep="")
subclade.treedata.tmp <- getTreedata.subclades(phy=phy, subclade.combination=subclade.combinations[[i]], subclade.data=subclade.data)
for (model in models) { # loop over models
if (!quiet.models) cat("Starting model ", model, "\n", sep="")
model_split <- strsplit(model, "_vs_")[[1]] # split the model name into the two component variables
y_var <- model_split[1] # extract variable names
x_var <- model_split[2] # extract variable names
if (model_fitting=="pgls") {
# fit model using pgls
model.tmp <- try(gls(data = data.frame(y = subclade.treedata.tmp$data[,y_var], x = subclade.treedata.tmp$data[,x_var]), model = y ~ x, na.action=na.exclude, correlation=corPagel(value=1, phy=subclade.treedata.tmp$phy), method="REML")) # model with correlation structure based on tree, with lambda estimated
if (class(model.tmp)=="try-error") {
model.tmp <- try(gls(data = data.frame(y = subclade.treedata.tmp$data[,y_var], x = subclade.treedata.tmp$data[,x_var]), model = y ~ x, na.action=na.exclude, correlation=corBrownian(value=1, phy=subclade.treedata.tmp$phy), method="REML")) # model with correlation structure based on tree, assuming Brownian Motion (if lambda estimation fails)
}
# if model still fails, set parameter values to NA
if (class(model.tmp)=="try-error") {
assign(paste(model, "r_squared", sep="."), value=c(get(paste(model, "r_squared", sep=".")), NA))
assign(paste(model, "p_value", sep="."), value=c(get(paste(model, "p_value", sep=".")), NA))
} else {
assign(paste(model, "r_squared", sep="."), value=c(get(paste(model, "r_squared", sep=".")), cor(subclade.treedata.tmp$data[,y_var], model.tmp$fitted)^2))
assign(paste(model, "p_value", sep="."), value=c(get(paste(model, "p_value", sep=".")), summary(model.tmp)$tTable["x","p-value"]))
}
} else if (model_fitting=="lm") {
# fit model using lm
model.tmp <- try(lm(subclade.treedata.tmp$data[,y_var] ~ subclade.treedata.tmp$data[,x_var]))
if (class(model.tmp)=="try-error") {
assign(paste(model, "r_squared", sep="."), value=c(get(paste(model, "r_squared", sep=".")), NA))
assign(paste(model, "p_value", sep="."), value=c(get(paste(model, "p_value", sep=".")), NA))
} else {
assign(paste(model, "r_squared", sep="."), value=c(get(paste(model, "r_squared", sep=".")), summary(model.tmp)$r.squared))
assign(paste(model, "p_value", sep="."), value=c(get(paste(model, "p_value", sep=".")), summary(model.tmp.lm)$coefficients["x","Pr(>|t|)"]))
}
}
# if model fitting fails, set parameter values to NA
if (class(model.tmp)=="try-error") {
assign(paste(model, "slope", sep="."), value=c(get(paste(model, "slope", sep=".")), NA))
assign(paste(model, "intercept", sep="."), value=c(get(paste(model, "intercept", sep=".")), NA))
} else {
assign(paste(model, "slope", sep="."), value=c(get(paste(model, "slope", sep=".")), model.tmp$coefficients["x"]))
assign(paste(model, "intercept", sep="."), value=c(get(paste(model, "intercept", sep=".")), model.tmp$coefficients["(Intercept)"]))
}
}
}
if (return_format == "matrix") {
# generate a matrix with the values of r_squared, slope, and intercept for all models (in columns), and the subclade combinations as rows
subclade_combination_model_results <- matrix(nrow=length(subclade.combinations), ncol=length(models)*4, dimnames=list(as.character(1:length(subclade.combinations)), as.vector(sapply(models, function(y) paste(y, c("r_squared","p_value","slope","intercept"), sep=".")))))
for (i in colnames(subclade_combination_model_results)) {
subclade_combination_model_results[,i] <- get(i)
}
} else if (return_format == "list") {
# generate a list of vectors with the values of r_squared, slope, and intercept for all models (in separate list items), and the subclade combinations as elements of vectors
subclade_combination_model_results <- list()
for (i in as.vector(sapply(models, function(y) paste(y, c("r_squared","p_value","slope","intercept", sep="."))))) subclade_combination_model_results[[i]] <- get(i)
} else if (return_format == "array") {
# generate an array with all models in one dimension, the values of r_squared, slope, and intercept in another dimension, and all subclade combinations in another dimension
subclade_combination_model_results <- array(dim=c(length(models), 4, length(subclade.combinations)), dimnames=list(models, c("r_squared","p_value","slope","intercept"), as.character(1:length(subclade.combinations))))
for (i in models) {
for (j in c("r_squared","p_value","slope","intercept")) {
subclade_combination_model_results[i,j,] <- get(paste(i,j, sep="."))
}
}
}
return(subclade_combination_model_results)
}
picidae.subclade.combinations.6sp.random.model_params <- list()
picidae.subclade.combinations.6sp.sequential.model_params <- list()
for (i in names(picidae.subclade.combinations.6sp.random)) { # loop over individual inclusions
cat("\nStarting subclade model fitting for random combinations of", i, "\n", sep=" ")
picidae.subclade.combinations.6sp.random.model_params[[i]] <- fit.subcladeModels.bivariate(phy=picidae.morph.log.fully_reduced.treedata[[i]]$phy, subclade.combinations=picidae.subclade.combinations.6sp.random[[i]], subclade.data=picidae.subclade.data[[i]], quiet.subclade.combinations=FALSE)
cat("\nStarting subclade model fitting for sequential combinations of", i, "\n", sep=" ")
picidae.subclade.combinations.6sp.sequential.model_params[[i]] <- fit.subcladeModels.bivariate(phy=picidae.morph.log.fully_reduced.treedata[[i]]$phy, subclade.combinations=picidae.subclade.combinations.6sp.sequential[[i]], subclade.data=picidae.subclade.data[[i]], quiet.subclade.combinations=FALSE)
}
rm(i)
### summarize results
## histograms of important models (across the different random subclade combinations)
for (i in names(picidae.subclade.combinations.6sp.random.model_params)) {
for (j in sub(".r_squared", "", grep("[a-zA-z0-9_.]+.r_squared", colnames(picidae.subclade.combinations.6sp.random.model_params[[i]]), perl=TRUE, value=TRUE))) {
pdf(file=paste("picidae_6sp_random", i , j, "histogram.pdf", sep="_"), height=10, width=10)
par(mfrow=c(2,2))
for (k in c("r_squared","p_value","slope","intercept")) {
if (k == "p_value") {
hist(picidae.subclade.combinations.6sp.random.model_params[[i]][,paste(j, k, sep=".")], xlab=NULL, main=k, col="gray", breaks=seq(0,1, by=0.05))
abline(v=0.05, lwd=2, col="red")
} else {
hist(picidae.subclade.combinations.6sp.random.model_params[[i]][,paste(j, k, sep=".")], xlab=NULL, main=k, col="gray", breaks=10)
}
if (k == "slope") abline(v=0, lwd=2, col="red")
}
dev.off()
}
}
rm(i,j,k)
## histograms and line plots of important models (across the different sequential subclade combinations)
for (i in names(picidae.subclade.combinations.6sp.sequential.model_params)) {
for (j in sub(".r_squared", "", grep("[a-zA-z0-9_.]+.r_squared", colnames(picidae.subclade.combinations.6sp.sequential.model_params[[i]]), perl=TRUE, value=TRUE))) {
pdf(file=paste("picidae_6sp_sequential.", j, ".histogram_lineplot.pdf", sep=""), height=10, width=20)
par(mfcol=c(2,4))
for (k in c("r_squared","p_value","slope","intercept")) {
if (k == "p_value") {
hist(picidae.subclade.combinations.6sp.sequential.model_params[[i]][,paste(j, k, sep=".")], xlab=NULL, main=k, col="gray", breaks=seq(0,1, by=0.05))
abline(v=0.05, lwd=2, col="red")
} else {
hist(picidae.subclade.combinations.6sp.sequential.model_params[[i]][,paste(j, k, sep=".")], xlab=NULL, main=k, col="gray", breaks=10)
}
if (k == "slope") abline(v=0, lwd=2, col="red")
plot(picidae.subclade.combinations.6sp.sequential.model_params[[i]][,paste(j, k, sep=".")] ~ rownames(picidae.subclade.combinations.6sp.sequential.model_params[[i]]), ylab=k, xlab="Tree slice (starting at root)", main=k, type="l")
if (k == "p_value") abline(h=0.05, lty="dashed")
if (k == "slope") abline(h=0, lty="dashed")
}
dev.off()
}
}
rm(i,j,k)
## capture median of parameter values
picidae.subclade.combinations.6sp.random.model_params.median <- list()
for (i in names(picidae.subclade.combinations.6sp.random.model_params)) {
medians.tmp <- apply(picidae.subclade.combinations.6sp.random.model_params[[i]], MARGIN=2, median, na.rm=TRUE)
picidae.subclade.combinations.6sp.random.model_params.median[[i]] <- matrix(nrow=ncol(picidae.subclade.combinations.6sp.random.model_params[[i]])/4, ncol=4, dimnames=list(sub(".r_squared", "", grep("[a-zA-z0-9_.]+.r_squared", colnames(picidae.subclade.combinations.6sp.sequential.model_params[[i]]), perl=TRUE, value=TRUE)), c("r_squared", "p_value", "slope", "intercept")))
picidae.subclade.combinations.6sp.random.model_params.median[[i]][,1] <- medians.tmp[grep("r_squared", names(medians.tmp), perl=TRUE, value=TRUE)]
picidae.subclade.combinations.6sp.random.model_params.median[[i]][,2] <- medians.tmp[grep("p_value", names(medians.tmp), perl=TRUE, value=TRUE)]
picidae.subclade.combinations.6sp.random.model_params.median[[i]][,3] <- medians.tmp[grep("(?<!trend.)slope", names(medians.tmp), perl=TRUE, value=TRUE)]
picidae.subclade.combinations.6sp.random.model_params.median[[i]][,4] <- medians.tmp[grep("intercept", names(medians.tmp), perl=TRUE, value=TRUE)]
## output median values to a table
write.csv(picidae.subclade.combinations.6sp.random.model_params.median[[i]], file=paste("picidae_subclade_combinations.6sp_random", i, "model_params.median.csv", sep="."))
}
rm(i,medians.tmp)
## capture median of parameter values without outliers (Picidae clades 234, 235; Picinae clades 208, 209)
picidae.subclade.combinations.6sp.random.model_params.no_outliers.median <- list()
for (i in names(picidae.subclade.combinations.6sp.random.model_params)) {
medians.tmp <- apply(picidae.subclade.combinations.6sp.random.model_params[[i]][!sapply(picidae.subclade.combinations.6sp.random[[i]], function(x) length(intersect(x, c("234","235"))) > 0),], MARGIN=2, median, na.rm=TRUE)
picidae.subclade.combinations.6sp.random.model_params.no_outliers.median[[i]] <- matrix(nrow=ncol(picidae.subclade.combinations.6sp.random.model_params[[i]])/4, ncol=4, dimnames=list(sub(".r_squared", "", grep("[a-zA-z0-9_.]+.r_squared", colnames(picidae.subclade.combinations.6sp.sequential.model_params[[i]]), perl=TRUE, value=TRUE)), c("r_squared", "p_value", "slope", "intercept")))
picidae.subclade.combinations.6sp.random.model_params.no_outliers.median[[i]][,1] <- medians.tmp[grep("r_squared", names(medians.tmp), perl=TRUE, value=TRUE)]
picidae.subclade.combinations.6sp.random.model_params.no_outliers.median[[i]][,2] <- medians.tmp[grep("p_value", names(medians.tmp), perl=TRUE, value=TRUE)]
picidae.subclade.combinations.6sp.random.model_params.no_outliers.median[[i]][,3] <- medians.tmp[grep("(?<!trend.)slope", names(medians.tmp), perl=TRUE, value=TRUE)]
picidae.subclade.combinations.6sp.random.model_params.no_outliers.median[[i]][,4] <- medians.tmp[grep("intercept", names(medians.tmp), perl=TRUE, value=TRUE)]
}
rm(i,medians.tmp)
### output scatterplots of clade-level metrics for all subclades
i <- "all_inds"
picidae.subclade.data.6sp.main_variant <- picidae.subclade.data[[i]][picidae.subclade.data[[i]][,"ntaxa.on_morph_tree"] >= 6,] # trim subclade data to only include clades with at least 6 taxa on the tree, as those were the ones used in model fitting
## plots of diversification rate vs. average range overlap and rate of shape evolution
pdf(file="Picidae_diversification_rates_vs_overlap_and_shape_evolution_rate.pdf", width=10, height=5, useDingbats=FALSE)
par(mfrow=c(1,2))
plot(picidae.subclade.data.6sp.main_variant[,"divrate.ML.constant.rate"] ~ picidae.subclade.data.6sp.main_variant[,"avg_overlaps.rangesize_scaled"], xlab="Average Range Overlap", ylab="Diversification Rate", pch=19)
abline(a=picidae.subclade.combinations.6sp.random.model_params.median[[i]]["divrate.ML.constant.rate_vs_avg_overlaps.rangesize_scaled","intercept"], b=picidae.subclade.combinations.6sp.random.model_params.median[[i]]["divrate.ML.constant.rate_vs_avg_overlaps.rangesize_scaled","slope"])
text(x=8.5,y=0.13,labels=bquote(paste("median ", R^2, " = ", .(format(round(picidae.subclade.combinations.6sp.random.model_params.median[[i]]["divrate.ML.constant.rate_vs_avg_overlaps.rangesize_scaled","r_squared"], 2), nsmall=2)), sep="")))
plot(picidae.subclade.data.6sp.main_variant[,"divrate.ML.constant.rate"] ~ picidae.subclade.data.6sp.main_variant[,"morphrate.geomean_scaled.phyl_pca.BM.sigsq"], xlab="Rate of Size-scaled Shape Evolution", ylab="", pch=19)
abline(a=picidae.subclade.combinations.6sp.random.model_params.median[[i]]["divrate.ML.constant.rate_vs_morphrate.geomean_scaled.phyl_pca.BM.sigsq","intercept"], b=picidae.subclade.combinations.6sp.random.model_params.median[[i]]["divrate.ML.constant.rate_vs_morphrate.geomean_scaled.phyl_pca.BM.sigsq","slope"])
text(x=0.0105,y=0.13,labels=bquote(paste("median ", R^2, " = ", .(format(round(picidae.subclade.combinations.6sp.random.model_params.median[[i]]["divrate.ML.constant.rate_vs_morphrate.geomean_scaled.phyl_pca.BM.sigsq","r_squared"], 2), nsmall=2)), sep="")))
par(mfrow=c(1,1))
dev.off()
## plots of the three morphological evolution rates vs. overlaps
pdf(file="Morpholopgical_evolution_rates_vs_overlap.pdf", width=10.5, height=3.5, useDingbats=FALSE)
par(mfrow=c(1,3), mar=c(5,5,4,2)+0.1)
plot(picidae.subclade.data.6sp.main_variant[,"morphrate.geomean.BM.sigsq"] ~ picidae.subclade.data.6sp.main_variant[,"avg_overlaps.rangesize_scaled"], xlab="Average Range Overlap", ylab=expression("Rate " ~ (sigma^2)), pch=19, main="Size Evolution")
abline(a=picidae.subclade.combinations.6sp.random.model_params.median[[i]]["morphrate.geomean.BM.sigsq_vs_avg_overlaps.rangesize_scaled","intercept"], b=picidae.subclade.combinations.6sp.random.model_params.median[[i]]["morphrate.geomean.BM.sigsq_vs_avg_overlaps.rangesize_scaled","slope"])
text(x=8.5,y=0.006,labels=bquote(paste("median ", R^2, " = ", .(format(round(picidae.subclade.combinations.6sp.random.model_params.median[[i]]["morphrate.geomean.BM.sigsq_vs_avg_overlaps.rangesize_scaled","r_squared"], 2), nsmall=2)), sep="")))
plot(picidae.subclade.data.6sp.main_variant[,"morphrate.phyl_pca.BM.sigsq"] ~ picidae.subclade.data.6sp.main_variant[,"avg_overlaps.rangesize_scaled"], xlab="Average Range Overlap", ylab="", main="Overall Morphological Evolution", pch=19)
abline(a=picidae.subclade.combinations.6sp.random.model_params.median[[i]]["morphrate.phyl_pca.BM.sigsq_vs_avg_overlaps.rangesize_scaled","intercept"], b=picidae.subclade.combinations.6sp.random.model_params.median[[i]]["morphrate.phyl_pca.BM.sigsq_vs_avg_overlaps.rangesize_scaled","slope"])
text(x=8.5,y=0.07,labels=bquote(paste("median ", R^2, " = ", .(format(round(picidae.subclade.combinations.6sp.random.model_params.median[[i]]["morphrate.phyl_pca.BM.sigsq_vs_avg_overlaps.rangesize_scaled","r_squared"], 2), nsmall=2)), sep="")))
plot(picidae.subclade.data.6sp.main_variant[,"morphrate.geomean_scaled.phyl_pca.BM.sigsq"] ~ picidae.subclade.data.6sp.main_variant[,"avg_overlaps.rangesize_scaled"], xlab="Average Range Overlap", ylab="", main="Size-scaled Shape Evolution", pch=19)
abline(a=picidae.subclade.combinations.6sp.random.model_params.median[[i]]["morphrate.geomean_scaled.phyl_pca.BM.sigsq_vs_avg_overlaps.rangesize_scaled","intercept"], b=picidae.subclade.combinations.6sp.random.model_params.median[[i]]["morphrate.geomean_scaled.phyl_pca.BM.sigsq_vs_avg_overlaps.rangesize_scaled","slope"])
text(x=8.5,y=0.002,labels=bquote(paste("median ", R^2, " = ", .(format(round(picidae.subclade.combinations.6sp.random.model_params.median[[i]]["morphrate.geomean_scaled.phyl_pca.BM.sigsq_vs_avg_overlaps.rangesize_scaled","r_squared"], 2), nsmall=2)), sep="")))
par(mfrow=c(1,1))
dev.off()
### quantifying the inclusion of subclades in the random combinations and taxa in subclades
mean(sapply(picidae.subclade.combinations.6sp.random[[i]], function(x) length(x))) # calculate the average number of subclades included in the random combinations
mean(sapply(picidae.subclade.combinations.6sp.random[[i]], function(x) sum(sapply(x, function(y) length(picidae.morph.log.fully_reduced.subclades.treedata[[i]][[y]]$phy$tip.label))))) # calculate the average number of taxa from the morph tree included in the subclade
mean(sapply(picidae.subclade.combinations.6sp.random[[i]], function(x) sum(sapply(x, function(y) length(picidae.morph.log.fully_reduced.subclades.treedata[[i]][[y]]$phy.full_tree$tip.label))))) # calculate the average number of taxa from the full tree included in the subclade
## checking median values of variables from subclade combinations with and without the two outlier clades (234 and 235 in picidae analyses, 208 and 209 in picinae analyses)
sapply(picidae.subclade.combinations.6sp.random[[i]], function(x) length(intersect(x, c("234","235"))) > 0) # identify subclade combinations including one of those 2 outlier subclades
apply(picidae.subclade.combinations.6sp.random.model_params[[i]][sapply(picidae.subclade.combinations.6sp.random[[i]], function(x) length(intersect(x, c("234","235"))) > 0),], MARGIN=2, FUN = median) # median values for subclades combinations with the two outlier clades
apply(picidae.subclade.combinations.6sp.random.model_params[[i]][!sapply(picidae.subclade.combinations.6sp.random[[i]], function(x) length(intersect(x, c("234","235"))) > 0),], MARGIN=2, FUN = median) # median values for subclades combinations without the two outlier clades
(apply(picidae.subclade.combinations.6sp.random.model_params[[i]][sapply(picidae.subclade.combinations.6sp.random[[i]], function(x) length(intersect(x, c("234","235"))) > 0),], MARGIN=2, FUN = median) - apply(picidae.subclade.combinations.6sp.random.model_params[[i]][!sapply(picidae.subclade.combinations.6sp.random[[i]], function(x) length(intersect(x, c("234","235"))) > 0),], MARGIN=2, FUN = median)) / (apply(picidae.subclade.combinations.6sp.random.model_params[[i]][sapply(picidae.subclade.combinations.6sp.random[[i]], function(x) length(intersect(x, c("234","235"))) > 0),], MARGIN=2, FUN = max) - apply(picidae.subclade.combinations.6sp.random.model_params[[i]][sapply(picidae.subclade.combinations.6sp.random[[i]], function(x) length(intersect(x, c("234","235"))) > 0),], MARGIN=2, FUN = min)) # quantify the difference between the parameters from the model fits to subclade combinations with and without the outlier clades, as a percentage of the range of values across all subclade combinations
### output table of median AICc and delta-AICc values from fitting diversification models and morphological evolution models by subclade
i <- "all_inds"
## output diversification models: AICc for constant-rate; delta AICc for time-dependent, diversity-dependent
for (m in c("divrate.ML.constant.AICc","divrate.ML.time_dependent.delta_AICc", "divrate.ML.diversity_dependent.delta_AICc")) {
cat(m, ": ", median(picidae.subclade.data[[i]][picidae.subclade.data[[i]][,"ntaxa.on_morph_tree"]>=6,m]), sep="")
}
## generate table of morph evolution models: AICc for BM; delta AICc for OU, EB, trend
morph_vars <- c("geomean", "phyl_pca.PC1", "geomean_scaled.phyl_pca.PC1")
models <- c("OU", "EB", "trend")
picidae.morph_models.delta_AICc <- matrix(nrow=length(morph_vars), ncol=length(models), dimnames=list(morph_vars, models))
for (m in morph_vars) {
for (n in models) {
picidae.morph_models.delta_AICc.table[m,n] <- median(picidae.subclade.data[[i]][picidae.subclade.data[[i]][,"ntaxa.on_morph_tree"]>=6,paste("morphrate.", m, ".", n, ".delta_AICc", sep="")])
}
}
rm(i,m,n)
### output table of median parameter values (slope, pseudo-R^2, and p-value) from model fits to subclade combinations
i <- "all_inds"
subclade_models_to_output <- c("total_div_vs_crown_age", "divrate.ML.constant.rate_vs_morphrate.geomean.BM.sigsq", "divrate.ML.constant.rate_vs_morphrate.phyl_pca.BM.sigsq", "divrate.ML.constant.rate_vs_morphrate.geomean_scaled.phyl_pca.BM.sigsq", "divrate.ML.constant.rate_vs_avg_overlaps.rangesize_scaled", "divrate.ML.constant.rate_vs_avg_overlaps.euclidean_scaled.geomean", "divrate.ML.constant.rate_vs_avg_overlaps.euclidean_scaled.phyl_pca", "divrate.ML.constant.rate_vs_avg_overlaps.euclidean_scaled.geomean_scaled.phyl_pca", "morphrate.geomean.BM.sigsq_vs_avg_overlaps.rangesize_scaled", "morphrate.geomean.BM.sigsq_vs_avg_overlaps.euclidean_scaled.geomean", "morphrate.phyl_pca.BM.sigsq_vs_avg_overlaps.rangesize_scaled", "morphrate.phyl_pca.BM.sigsq_vs_avg_overlaps.euclidean_scaled.phyl_pca", "morphrate.geomean_scaled.phyl_pca.BM.sigsq_vs_avg_overlaps.rangesize_scaled", "morphrate.geomean_scaled.phyl_pca.BM.sigsq_vs_avg_overlaps.euclidean_scaled.geomean_scaled.phyl_pca", "gamma_vs_avg_overlaps.rangesize_scaled", "gamma_vs_avg_overlaps.euclidean_scaled.geomean", "gamma_vs_avg_overlaps.euclidean_scaled.phyl_pca", "gamma_vs_avg_overlaps.euclidean_scaled.geomean_scaled.phyl_pca", "divrate.ML.time_dependent.delta_AICc_vs_avg_overlaps.rangesize_scaled", "divrate.ML.time_dependent.lambda1_vs_avg_overlaps.rangesize_scaled", "divrate.ML.diversity_dependent.delta_AICc_vs_avg_overlaps.rangesize_scaled", "morphrate.geomean.EB.delta_AICc_vs_avg_overlaps.rangesize_scaled", "morphrate.geomean.EB.alpha_vs_avg_overlaps.rangesize_scaled", "morphrate.phyl_pca.PC1.EB.delta_AICc_vs_avg_overlaps.rangesize_scaled", "morphrate.phyl_pca.PC1.EB.alpha_vs_avg_overlaps.rangesize_scaled", "morphrate.geomean_scaled.phyl_pca.PC1.EB.delta_AICc_vs_avg_overlaps.rangesize_scaled", "morphrate.geomean_scaled.phyl_pca.PC1.EB.alpha_vs_avg_overlaps.rangesize_scaled")
params_to_output <- c("r_squared", "p_value", "slope")
## generate table to store median parameter values
picidae.subclade_models.params.table <- matrix(nrow=length(subclade_models_to_output), ncol=length(params_to_output), dimnames=list(subclade_models_to_output,params_to_output))
for (m in subclade_models_to_output) {
for (n in params_to_output) {
picidae.subclade_models.params.table[m,n] <- picidae.subclade.combinations.6sp.random.model_params.median[[i]][m,n]
}
}
rm(m,n)
write.csv(picidae.subclade_models.params.table, file="picidae.subclade_models.params.median.csv") # output table to file
## generate table to store median parameter values, without outliers
picidae.subclade_models.params.no_outliers.table <- matrix(nrow=length(subclade_models_to_output), ncol=length(params_to_output), dimnames=list(subclade_models_to_output,params_to_output))
for (m in subclade_models_to_output) {
for (n in params_to_output) {
picidae.subclade_models.params.no_outliers.table[m,n] <- picidae.subclade.combinations.6sp.random.model_params.no_outliers.median[[i]][m,n]
}
}
rm(m,n)
write.csv(picidae.subclade_models.params.no_outliers.table, file="picidae.subclade_models.params.no_outliers.median.csv") # output table to file
### output boxplots of slope, R^2, and p_value for the most important models
## generate vectors of names of models to output
subclade_models_to_output.divrates_morphrates <-c("divrate.ML.constant.rate_vs_morphrate.phyl_pca.BM.sigsq", "divrate.ML.constant.rate_vs_morphrate.geomean.BM.sigsq", "divrate.ML.constant.rate_vs_morphrate.geomean_scaled.phyl_pca.BM.sigsq")
subclade_models_to_output.divrates_overlaps <- c("divrate.ML.constant.rate_vs_avg_overlaps.rangesize_scaled", "divrate.ML.constant.rate_vs_avg_overlaps.euclidean_scaled.phyl_pca", "divrate.ML.constant.rate_vs_avg_overlaps.euclidean_scaled.geomean", "divrate.ML.constant.rate_vs_avg_overlaps.euclidean_scaled.geomean_scaled.phyl_pca")
subclade_models_to_output.morphrates_overlaps <- c("morphrate.phyl_pca.BM.sigsq_vs_avg_overlaps.rangesize_scaled", "morphrate.geomean.BM.sigsq_vs_avg_overlaps.rangesize_scaled", "morphrate.geomean_scaled.phyl_pca.BM.sigsq_vs_avg_overlaps.rangesize_scaled")
subclade_models_to_output.divmodels_overlaps <- c("divrate.ML.time_dependent.delta_AICc_vs_avg_overlaps.rangesize_scaled", "divrate.ML.diversity_dependent.delta_AICc_vs_avg_overlaps.rangesize_scaled")
subclade_models_to_output.morphmodels_overlaps <- c("morphrate.geomean.EB.delta_AICc_vs_avg_overlaps.rangesize_scaled", "morphrate.phyl_pca.PC1.EB.delta_AICc_vs_avg_overlaps.rangesize_scaled", "morphrate.geomean_scaled.phyl_pca.PC1.EB.delta_AICc_vs_avg_overlaps.rangesize_scaled")
## output boxplots for divrates vs. morphrates
for (m in params_to_output) {
pdf(file=paste("picidae_subclade_combinations.6sp_random.model_params.boxplots.divrates_morphrates.", m, ".pdf", sep=""), height=6, width=4)
data.tmp <- numeric()
names.tmp <- character()
# loop over models to output, storing data and the name of the data
for (n in subclade_models_to_output.divrates_morphrates) {
data.tmp <- c(data.tmp, picidae.subclade.combinations.6sp.random.model_params[[i]][,grep(paste(n, m, sep="."), colnames(picidae.subclade.combinations.6sp.sequential.model_params[[i]]), value=TRUE)]) # get data for current model and parameter
names.tmp <- c(names.tmp, rep(n, times=nrow(picidae.subclade.combinations.6sp.random.model_params[[i]])))
}
names.tmp <- factor(x=names.tmp, levels=subclade_models_to_output.divrates_morphrates) # set the names of the data to a factor, so that they plot in the correct order
boxplot(data.tmp ~ names.tmp, names=c("Overall", "Size", "Shape"), ylim=switch(m, r_squared = c(0,1), p_value = c(0,1), slope = range(data.tmp))) # create boxplot
# add horizontal lines to boxplots as needed
switch(m,
r_squared = NULL,
p_value = abline(h=0.05, lty=2),
slope = abline(h=0, lty=2)
)
dev.off()
}
rm(m,n, data.tmp, names.tmp)
## output boxplots for divrates vs. overlaps
for (m in params_to_output) {
pdf(file=paste("picidae_subclade_combinations.6sp_random.model_params.boxplots.divrates_overlaps.", m, ".pdf", sep=""), height=6, width=4)
data.tmp <- numeric()
names.tmp <- character()
# loop over models to output, storing data and the name of the data
for (n in subclade_models_to_output.divrates_overlaps) {
data.tmp <- c(data.tmp, picidae.subclade.combinations.6sp.random.model_params[[i]][,grep(paste(n, m, sep="."), colnames(picidae.subclade.combinations.6sp.sequential.model_params[[i]]), value=TRUE)])
names.tmp <- c(names.tmp, rep(n, times=nrow(picidae.subclade.combinations.6sp.random.model_params[[i]])))
}
names.tmp <- factor(x=names.tmp, levels=subclade_models_to_output.divrates_overlaps)
boxplot(data.tmp ~ names.tmp, names=c("Unscaled", "Overall", "Size", "Shape"), ylim=switch(m, r_squared = c(0,1), p_value = c(0,1), slope = range(data.tmp))) # create boxplot
# add horizontal lines to boxplots as needed
switch(m,
r_squared = NULL,
p_value = abline(h=0.05, lty=2),
slope = abline(h=0, lty=2)
)
dev.off()
}
rm(m,n, data.tmp, names.tmp)
## output boxplots for morphrates vs. overlaps
for (m in params_to_output) {
pdf(file=paste("picidae_subclade_combinations.6sp_random.model_params.boxplots.morphrates_overlaps.", m, ".pdf", sep=""), height=6, width=4)
data.tmp <- numeric()
names.tmp <- character()
# loop over models to output, storing data and the name of the data
for (n in subclade_models_to_output.morphrates_overlaps) {
data.tmp <- c(data.tmp, picidae.subclade.combinations.6sp.random.model_params[[i]][,grep(paste(n, m, sep="."), colnames(picidae.subclade.combinations.6sp.sequential.model_params[[i]]), value=TRUE)])
names.tmp <- c(names.tmp, rep(n, times=nrow(picidae.subclade.combinations.6sp.random.model_params[[i]])))
}
names.tmp <- factor(x=names.tmp, levels=subclade_models_to_output.morphrates_overlaps)
boxplot(data.tmp ~ names.tmp, names=c("Overall", "Size", "Shape"), ylim=switch(m, r_squared = c(0,1), p_value = c(0,1), slope = range(data.tmp))) # create boxplot
# add horizontal lines to boxplots as needed
switch(m,
r_squared = NULL,
p_value = abline(h=0.05, lty=2),
slope = abline(h=0, lty=2)
)
dev.off()
}
rm(m,n, data.tmp, names.tmp)
###### run the analyses for Picinae ######
### extract subclades from full tree and morph trees
## extract subclades from full trees
picinae.RAxML.all.BEAST_calibrated.with_proxies.subclades <- extractSubclades.all(picinae.RAxML.all.BEAST_calibrated.with_proxies)
# extract subclades from morph trees
picinae.morph.log.fully_reduced.treedata.subclades <- list()
for (i in c("all_inds", "complete_ind_only")) {
picinae.morph.log.fully_reduced.treedata.subclades[[i]] <- extractSubclades.all(picinae.morph.log.fully_reduced.treedata[[i]]$phy)
}
rm(i)
### to complete analyses for Picinae, use same code as above, replacing picidae with picinae for variable and file names
|
/Automated_subclade_analyses.R
|
no_license
|
mjdufort/dissertation_code
|
R
| false
| false
| 103,438
|
r
|
## This is is one of several files containing scripts and functions used in processing and analysis of data for Matthew Dufort's Ph.D. dissertation at the University of Minnesota, titled "Coexistence, Ecomorphology, and Diversification in the Avian Family Picidae (Woodpeckers and Allies)."
## this file contains scripts and functions to calculate variables at the subclade level, and to test for relationships between subclade variables
### load packages and data
## load necessary packages
library(ape)
library(geiger)
library(phytools)
library(nlme)
library(laser)
library(DDD)
load(file="Picidae_data_for_distribution_morphology_evolution.RData") # load data needed from morphology and distribution analyses (from Morphology_data_processing.R)
load(file="Picidae_BAMM_data_for_automated_subclade_analyses.RData") # load data objects needed from BAMM analyses (from BAMM_data_prep_and_processing.R)
### generate necessary functions for automated subclade analyses
## the function extractSubclades.all() extracts all subclades with at least min.taxa and at most max.taxa tips from the tree
# as input, it takes phy (a phylogenetic tree of class phylo), min.taxa (the minimum number of taxa for a subclade to be included), and max.taxa (the maximum number of taxa for a subclade to be include)
# it returns a list of phy objects, one for each subclade, with the list elements named with the node numbers from the original tree
extractSubclades.all <- function(phy, min.taxa=4, max.taxa=Inf) {
require(ape)
subclades <- list() # initialize a list to store subclades
ntaxa <- length(phy$tip.label) # get the number of taxa in the tree
j <- 0
for (i in (ntaxa + 1):(ntaxa + phy$Nnode)) { # loop over internal nodes
clade.tmp <- extract.clade(phy, node=i) # extract current subclade (the subclade descending from the current node)
# if current subclade meets specifications, add it to the list
if ((length(clade.tmp$tip.label) >= min.taxa) & (length(clade.tmp$tip.label) <= max.taxa)) {
j <- j+1
subclades[[j]] <- clade.tmp
names(subclades)[j] <- as.character(i)
}
}
return(subclades)
}
## the function getSubclades.withData() extracts subclades from a tree, including only subclades that have sufficient taxa with data in a vector or matrix of trait data
# as input, it takes phylist (a list of subclades, each a phylo object), taxondata (the vector of matrix of trait data, with names or rownames corresponding to taxon names), inc.data (boolean to return a treedata object for each subclade; if FALSE, returns a phylo object for each subclade), and min.taxa (the minimum number of taxa for a subclade to be included)
# it returns a list of subclades, either as phylo objects or treedata objects, with the list elements named by the node numbers in the original tree
getSubclades.withData <- function(phylist, taxondata, inc.data=TRUE, min.taxa=4, quiet=TRUE) {
require(geiger)
subclades.new <- list()
# get taxon names from trait data vector/matrix
if (is.matrix(taxondata)) {
taxon.names <- rownames(taxondata)
} else if (is.vector(taxondata)) {
taxon.names <- names(taxondata)
}
# loop over subclades in phylist, testing if each subclade has more than min.taxa in the data vector/matrix
j <- 0
for (i in 1:length(phylist)) {
if (!quiet) print(i)
if (!quiet) print(phylist[[i]]$tip.label %in% taxon.names)
if (sum(phylist[[i]]$tip.label %in% taxon.names) >= min.taxa) {
j <- j + 1
if (inc.data) {
subclades.new[[j]] <- treedata(phy = phylist[[i]], data=taxondata, warnings=FALSE)
} else {
subclades.new[[j]] <- phylist[[i]]
}
names(subclades.new)[j] <- names(phylist)[i]
}
}
return(subclades.new)
}
## the function areOverlappingSubclades() tests a list of two or more subclades (or trees) to determine if there is any overlap in the tips included
# as input, it takes phylist (a list of subclades, each a phylo object), and getoverlaps (boolean to return overlapping taxa)
# it returns TRUE if any of the subclades in phylist share taxa, and FALSE if there are no shared taxa among them; if getoverlaps=TRUE, it returns a list containing the test value (TRUE or FALSE), and a vector of the taxa shard among subclades
areOverlappingSubclades <- function(phylist, getoverlaps=FALSE) {
# generate a character vector containing the concatenated taxa from each subclade
taxnames <- character()
if (!is.null(phylist[[1]]$data)) { # checks if they're treedata objects
taxnames <- unlist(lapply(phylist, FUN = function(x) x$phy$tip.label))
} else {
taxnames <- unlist(lapply(phylist, FUN = function(x) x$tip.label))
}
# check for duplicates in the vector of taxon names
duplicates <- duplicated(taxnames)
if (!any(duplicates)) {
return(FALSE)
} else if (!getoverlaps) {
return(TRUE)
} else {
return(list(test=TRUE, overlaps=taxnames[which(duplicates)]))}
}
## the function subcladeCombinations.all() determines all sets of reciprocally monophyletic subclades meeting a set of criteria, and returns them as a list of lists of phylo objects
# this sped-up version generates a pairwise matrix of overlapping clades, then checks if any of the subclades in the combination are TRUE in the matrix (and therefore takes advantage of speed-ups with vectorization)
# for large trees, there is a VERY large number of possible combinations, and using this function is not advisable
# as input, it takes phylist (a list of subclades, each a phylo object), min.clades (the minimum number of clades to include in a combination), and max.clades (the maximum number of clades to include in a combination)
# it returns a list of subclade combinations, each a list of phylo objects
subcladeCombinations.all <- function(phylist, min.clades=3, max.clades=Inf) {
if (max.clades > length(phylist)) max.clades <- length(phylist)
# generate matrix of pairwise subclade overlaps
subclade.overlap.pairwise <- matrix(nrow=length(phylist), ncol=length(phylist))
for (i in 1:nrow(subclade.overlap.pairwise)) {
for (j in 1:ncol(subclade.overlap.pairwise)) {
subclade.overlap.pairwise[i,j] <- areOverlappingSubclades(list(phylist[[i]], phylist[[j]]))
}
}
subclade.names <- names(phylist) # get the subclade names
combinations <- list() # initialize list to store subclade combinations
complete <- FALSE # boolean to end search
k <- 0
for (nclades in (min.clades:max.clades)) { # loop over number of subclades to include in set
if (!complete) {
length.last <- length(combinations)
combinations.to.test <- combn(x=(1:length(subclade.names)), m=nclades, simplify=TRUE) # generate a matrix of combinations to test
print(paste("Testing ", ncol(combinations.to.test), " combinations for ", nclades, " clades.", sep=""))
# test each proposed combination for reciprocal monophyly; if they are reciprocally monophyletic, add to list
for (i in 1:ncol(combinations.to.test)) {
if ((i %% 10000) == 0) print(paste("Testing combination ",i, sep=""))
pairwise.combinations.temp <- combn(x=combinations.to.test[,i], m=2, simplify=TRUE)
if (!any(subclade.overlap.pairwise[cbind(pairwise.combinations.temp[1,],pairwise.combinations.temp[2,])])) {
k <- k+1
combinations[[k]] <- subclade.names[combinations.to.test[,i]]
}
}
# test if any combinations were added for this number of subclades, and terminate if none were
if (length(combinations)==length.last) {
complete <- TRUE
print(paste("No successful combinations for ", nclades, " clades; stopping search.", sep=""))
}
}
}
return(combinations)
}
## the function subcladeCombinations.random() generates a random sample of combinations of reciprocally monophyletic subclades meeting a set of criteria
# this samples by selecting a subclade at random, then selecting another from all the possible subclades that don't overlap the first, and continuing doing that until there aren't any more possibilities; this approach probably leads to the same subclades being selected repeatedly, as certain isolated subclades are almost always going to be suitable
# as input, it takes phylist (a list of subclades, each a phylo object), ncombs (the maximum number of combinations to return), min.clades (the minimum number of subclades to include in a combination), max.clades (the maximum number of subclades to include in a combination), min.taxa (the minimum number of taxa for a subclade to be considered for inclusion), max.fails (the maximum number of failures before halting the search), and report (boolean to output status updates to console)
# it returns a list of subclade combinations, each a list of phylo objects
subcladeCombinations.random <- function(phylist, ncombs=1000, min.clades=5, max.clades=Inf, min.taxa=4, max.fails=1e6, report=TRUE) {
# check if the objects are phylo objects or treedata objects; also drop subclades with fewer taxa than the minimum
for (i in names(phylist)) {
if (class(phylist[[i]]) != "phylo") {
if (class(phylist[[i]]$phy) == "phylo") {
phylist[[i]] <- phylist[[i]]$phy
} else {
cat("\nError: item ", i, " in phylist is not a phylo or treedata object.\n", sep="")
return()
}
}
if (length(phylist[[i]]$tip.label) < min.taxa) phylist[[i]] <- NULL # drop subclades with too few taxa
}
if (max.clades > length(phylist)) max.clades <- length(phylist)
subclade.names <- names(phylist) # extract the subclade names
# generate matrix of pairwise subclade overlaps
subclade.overlap.pairwise <- matrix(nrow=length(phylist), ncol=length(phylist), dimnames=list(subclade.names, subclade.names))
for (i in 1:nrow(subclade.overlap.pairwise)) {
for (j in 1:ncol(subclade.overlap.pairwise)) {
subclade.overlap.pairwise[i,j] <- areOverlappingSubclades(list(phylist[[i]], phylist[[j]]))
}
}
combinations <- list() # the combinations that will be returned
all.done <- FALSE
z <- 1
fails <- 0
while ((length(combinations) < ncombs) & (!all.done)) {
combination.done <- FALSE
combination.temp <- sample(x=subclade.names, size=1) # pick the first subclade in the possible combination
q <- 1
while ((length(combination.temp) < max.clades) & (!combination.done)) {
subclades.possible.additions <- colnames(subclade.overlap.pairwise)[which(rowSums(as.matrix(subclade.overlap.pairwise[,combination.temp]))==0)] # this finds all subclades that don't overlap with any of the subclades already in the combination
if (length(subclades.possible.additions) == 0) {
combination.done <- TRUE
} else {
q <- q + 1
combination.temp[q] <- sample(x=subclades.possible.additions, size=1)
}
}
combination.temp <- sort(combination.temp)
if ((length(combination.temp) >= min.clades) & (length(which(sapply(combinations, identical, combination.temp, simplify=TRUE)==TRUE)) < 1)) {
combinations[[z]] <- combination.temp
cat("Found combination ", z, "\n", sep="")
z <- z + 1
} else {
fails <- fails+1
}
if (fails == max.fails) {
all.done <- TRUE
print(paste("Reached maximum failures. Returning", length(combinations), "combinations"))
}
}
return(combinations)
}
## the function subcladeCombinations.sequential() determines a set of combinations of reciprocally monophyletic subclades by working its way down the tree; it slices the tree at each node and determines all valid subclades below that slice
# as input, it takes phy (a tree as a phylo object), min.taxa (the minimum number of taxa for a subclade to be included), min.clades (the minimum number of subclades to include in a combination), and max.clades (the maximum number of subclades to include in a combination)
# it returns a list of subclade combinations, each a list of phylo objects
subcladeCombinations.sequential <- function(phy, min.taxa=4, min.clades=5, max.clades=Inf) {
require(ape)
combinations <- list()
phy.nodedepth.sorted <- sort((max(branching.times(phy)) - branching.times(phy)), decreasing=FALSE) # generate a vector of node depths
l <- 0
for (i in 1:length(phy.nodedepth.sorted)) {
candidate.nodes <- phy$edge[,2][(node.depth.edgelength(phy)[phy$edge[,1]] <= phy.nodedepth.sorted[i]) & (node.depth.edgelength(phy)[phy$edge[,2]] > phy.nodedepth.sorted[i]) & (phy$edge[,2] > length(phy$tip.label))] # find all the descendant nodes from edges cut at current step in phy.nodedepth.sorted
# identify nodes just below the branching point I'm examining
candidate.nodes <- candidate.nodes[candidate.nodes > length(phy$tip.label)]
# extract combination (if possible) from list of descendant subclades
if (length(candidate.nodes) >= min.clades) {
candidate.combination <- character()
for (j in 1:length(candidate.nodes)) {
if (length(extract.clade(phy, node=candidate.nodes[j], root.edge=0)$tip.label) >= min.taxa) {
candidate.combination <- c(candidate.combination, candidate.nodes[j])
}
}
if ((length(candidate.combination) >= min.clades) & (length(candidate.combination) <= max.clades)) {
l <- l + 1
combinations[[l]] <- candidate.combination
}
}
}
combinations <- combinations[!duplicated(combinations)]
return(combinations)
}
## this function determines all members of each subclade, including those not in the tree; it uses a list of taxon proxies, and checks these proxies against the actual taxa in the subclade; it has several options for returning these taxa
# as input, it takes phylist (a list of subclades, each a phylo object or treedata object), taxon.proxies (a list containing a vector of proxies for each taxon), and to_return (a switching variable, which allows the user to select whether to return the missing taxa ("missing"), all taxa ("full"), or a list of the included and missing taxa ("split"))
# it returns a list containing vectors with the set of taxa specified by to_return
subclades.fulltaxlist <- function(phylist, taxon.proxies, to_return="full") {
subclades.taxa_to_include <- list()
for (i in 1:length(phylist)) {
subclades.taxa_to_include.temp <- character()
for (j in 1:length(taxon.proxies)) { # loop over list of taxa
# if all proxies are included in the subclade, add the current taxon to the list of included taxa
if (all(taxon.proxies[[j]] %in% phylist[[i]]$tip.label) | all(taxon.proxies[[j]] %in% phylist[[i]]$phy$tip.label)) {
subclades.taxa_to_include.temp <- c(subclades.taxa_to_include.temp, names(taxon.proxies)[j])
}
}
subclades.taxa_to_include[[i]] <- switch(to_return,
missing = subclades.taxa_to_include.temp,
full = c(phylist[[i]]$tip.label, subclades.taxa_to_include.temp),
split = list(included=phylist[[i]]$tip.label, missing=subclades.taxa_to_include.temp))
}
names(subclades.taxa_to_include) <- names(phylist)
return(subclades.taxa_to_include)
}
# the function subclade.fulltaxlist() is the same as subclades.fulltaxlist(), but it acts only on a single subclade; this allows looping or applying over a list of treedata objects and adding the full membership to the treedata object
# as input, it takes phy (a subclades, either a phylo object or treedata object), taxon.proxies (a list containing a vector of proxies for each taxon), and to_return (a switching variable, which allows the user to select whether to return the missing taxa ("missing"), all taxa ("full"), or a list of the included and missing taxa ("split"))
# it returns a vector with the set of taxa specified by to_return
subclade.fulltaxlist <- function(phy, taxon.proxies, to_return="full") {
taxa_to_include.tmp <- character()
for (j in 1:length(taxon.proxies)) {
if (all(taxon.proxies[[j]] %in% phy$tip.label)) {
taxa_to_include.tmp <- c(taxa_to_include.tmp, names(taxon.proxies)[j])
}
}
taxa.to_include <- switch(to_return,
missing = taxa_to_include.tmp,
full = c(phy$tip.label, taxa_to_include.tmp),
split = list(included=phy$tip.label, missing=taxa_to_include.tmp))
return(taxa.to_include)
}
## the function getTreedata.subclades() extracts the backbone tree with subclades, and builds a treedata object including the subclade data
# as input, it takes phy (the full tree as a phylo object), subclade.combination (a vector containing the node numbers of the subclades), and subclade.data (the data for the subclades, as a matrix with node numbers as the rownames)
# it returns a treedata object, where the returned tree has only the subclades as tips, with the backbone of those nodes retained
getTreedata.subclades <- function(phy, subclade.combination, subclade.data) {
subclade.data.selected <- subset(subclade.data, row.names(subclade.data) %in% subclade.combination)
subclades.temp <- list()
subclades.edge.length.temp <- numeric()
# get the stem edge length for each subclade, and rename one tip in each subclade with teh subclade name
for (i in 1:length(subclade.combination)) {
subclades.temp[[i]] <- extract.clade(phy, node=as.numeric(subclade.combination[i]))
subclades.edge.length.temp[i] <- phy$edge.length[which(phy$edge[,2]==as.numeric(subclade.combination[i]))] # find the stem edge length for the subclade
phy$tip.label[phy$tip.label==subclades.temp[[i]]$tip.label[1]] <- subclade.combination[i] # rename one tip with the name of the subclade
}
# loop over subclades, dropping all tips but the one set to the subclade name above; this is done separately, as dropping tips could change the node numbers and make the step above not work properly
for (i in 1:length(subclade.combination)) {
phy <- drop.tip(phy, tip=subclades.temp[[i]]$tip.label[-1]) # drop the remaining tips from the subclade
phy$edge.length[which(phy$edge[,2]==which(phy$tip.label==subclade.combination[i]))] <- subclades.edge.length.temp[i] # finds the edge that has the subclade name as its descendant node, and changes the length
}
phy.treedata <- treedata(phy, data=subclade.data.selected, warnings=FALSE) # generate treedata object with backbone tree and subclade data
return(phy.treedata)
}
### generate full taxon lists (to match taxa not on tree with subclades)
## the function read.taxon.proxy.list() reads a file of taxon proxies and formats them for later use
# as input, it takes filename (the location of the file containing the taxon proxies, with each taxon name followed by all the proxy taxa that must be present for the focal taxon to be included)
# it returns a list of character vectors, where each list element is named with the focal taxon name, and the vector contains all the proxy taxa that must be present for the focal taxon to be included
read.taxon.proxy.list <- function(filename) {
taxon.proxy.list <- strsplit(scan(file=filename, what="character", sep="\n"), split=",") # read in file as a list of character vectors
names(taxon.proxy.list) <- sapply(taxon.proxy.list, function(x) x[1]) # set the first element in the character vector to be the name
taxon.proxy.list <- lapply(taxon.proxy.list, function(x) x[-1]) # remove that first element
for (i in names(taxon.proxy.list)) {
if (length(taxon.proxy.list[[i]]) == 0) taxon.proxy.list[[i]] <- NULL
} # this drops empty lists, so that the only ones retained are ones that actually have proxies
return(taxon.proxy.list)
}
## read in files of taxon proxies
picidae.RAxML.taxon.subclade.proxies <- list()
picidae.RAxML.taxon.subclade.proxies[["full_tree"]] <- read.taxon.proxy.list(filename="picidae_taxon_proxies_for_automated_subclade_analyses_full_tree.csv")
picidae.RAxML.taxon.subclade.proxies[["morph_tree"]][["all_inds"]] <- read.taxon.proxy.list(filename="picidae_taxon_proxies_for_automated_subclade_analyses_morph_tree_all_inds.csv")
picidae.RAxML.taxon.subclade.proxies[["morph_tree"]][["complete_ind_only"]] <- read.taxon.proxy.list(filename="picidae_taxon_proxies_for_automated_subclade_analyses_morph_tree_complete_ind_only.csv")
### extract subclades from full tree and morph trees
## extract subclades from full trees
picidae.RAxML.all.BEAST_calibrated.with_proxies.subclades <- extractSubclades.all(picidae.RAxML.all.BEAST_calibrated.with_proxies)
# extract subclades from morph trees
picidae.morph.log.fully_reduced.treedata.subclades <- list()
for (i in c("all_inds", "complete_ind_only")) {
picidae.morph.log.fully_reduced.treedata.subclades[[i]] <- extractSubclades.all(picidae.morph.log.fully_reduced.treedata[[i]]$phy)
}
rm(i)
### generate treedata-like objects for each subclade, for the data variants I'm using
## combine all the data into a treedata-like object that has a bunch of different sets of data for each subclade, for picidae
picidae.morph.log.fully_reduced.subclades.treedata <- list()
for (i in names(picidae.morph.log.fully_reduced.treedata.subclades)) { # loop over individual inclusion
for (j in names(picidae.morph.log.fully_reduced.treedata.subclades[[i]])) { # loop over subclades
picidae.morph.log.fully_reduced.subclades.treedata[[i]][[j]] <- list()
picidae.morph.log.fully_reduced.subclades.treedata[[i]][[j]][["phy"]] <- picidae.morph.log.fully_reduced.treedata.subclades[[i]][[j]]
picidae.morph.log.fully_reduced.subclades.treedata[[i]][[j]][["missing_taxa"]] <- subclade.fulltaxlist(phy=picidae.morph.log.fully_reduced.subclades.treedata[[i]][[j]][["phy"]], taxon.proxies=picidae.RAxML.taxon.subclade.proxies[["morph_tree"]][[i]], to_return="missing")
picidae.morph.log.fully_reduced.subclades.treedata[[i]][[j]][["missing_count"]] <- length(picidae.morph.log.fully_reduced.subclades.treedata[[i]][[j]][["missing_taxa"]])
picidae.morph.log.fully_reduced.subclades.treedata[[i]][[j]][["node.full_tree"]] <- getMRCA(phy=picidae.RAxML.all.BEAST_calibrated.with_proxies, tip=picidae.morph.log.fully_reduced.subclades.treedata[[i]][[j]][["phy"]]$tip.label)
picidae.morph.log.fully_reduced.subclades.treedata[[i]][[j]][["phy.full_tree"]] <- extract.clade(phy=picidae.RAxML.all.BEAST_calibrated.with_proxies, node=picidae.morph.log.fully_reduced.subclades.treedata[[i]][[j]][["node.full_tree"]])
picidae.morph.log.fully_reduced.subclades.treedata[[i]][[j]][["missing_taxa.full_tree"]] <- subclade.fulltaxlist(phy=picidae.morph.log.fully_reduced.subclades.treedata[[i]][[j]][["phy.full_tree"]], taxon.proxies=picidae.RAxML.taxon.subclade.proxies[["full_tree"]], to_return="missing")
picidae.morph.log.fully_reduced.subclades.treedata[[i]][[j]][["missing_count.full_tree"]] <- length(picidae.morph.log.fully_reduced.subclades.treedata[[i]][[j]][["missing_taxa.full_tree"]])
picidae.morph.log.fully_reduced.subclades.treedata[[i]][[j]][["geomean"]] <- picidae.morph.log.fully_reduced.geomean[[i]][[j]][picidae.morph.log.fully_reduced.subclades.treedata[[i]][[j]][["phy"]]$tip.label] # pull in the geomean data
picidae.morph.log.fully_reduced.subclades.treedata[[i]][[j]][["phyl_pca"]] <- picidae.morph.log.fully_reduced.phyl_pca[[i]][[j]]$pca$S[picidae.morph.log.fully_reduced.subclades.treedata[[i]][[j]][["phy"]]$tip.label,] # pull in the unscaled PCA-rotated data
picidae.morph.log.fully_reduced.subclades.treedata[[i]][[j]][["geomean_scaled.phyl_pca"]] <- picidae.morph.log.fully_reduced.geomean_scaled.phyl_pca[[i]][[j]]$pca$S[picidae.morph.log.fully_reduced.subclades.treedata[[i]][[j]][["phy"]]$tip.label,] # pull in the geomean-scaled PCA-rotated data
picidae.morph.log.fully_reduced.subclades.treedata[[i]][[j]][["overlaps.scaled"]] <- picidae.summed_overlaps.shp.BirdLife.UnaryUnion.buffer0[["mytax"]][["migratory"]][["overlaps.scaled"]][c(picidae.morph.log.fully_reduced.subclades.treedata[[i]][[j]][["phy"]]$tip.label, picidae.morph.log.fully_reduced.subclades.treedata[[i]][[j]][["missing_taxa"]])]
# get overlaps scaled by focal taxon range and similarity in geomean, unscaled PCA, and geomean-scaled PCA
picidae.morph.log.fully_reduced.subclades.treedata[[i]][[j]][["overlaps.euclidean_scaled"]] <- list()
for (q in c("geomean", "phyl_pca", "geomean_scaled.phyl_pca")) {
picidae.morph.log.fully_reduced.subclades.treedata[[i]][[j]][["overlaps.euclidean_scaled"]][[q]] <- picidae.summed_overlaps.shp.BirdLife.UnaryUnion.buffer0.euclidean_scaled[["migratory"]][[q]][[i]][[j]][["inc_no_phylo"]][c(picidae.morph.log.fully_reduced.subclades.treedata[[i]][[j]][["phy"]]$tip.label, picidae.morph.log.fully_reduced.subclades.treedata[[i]][[j]][["missing_taxa"]])]
}
}
}
rm(i,j,q)
### fit models of diversification and morphological evolution to the entire data set
## fit diversification models to full tree
picidae.divrate.models <- list()
picidae.divrate.models[["full_tree"]] <- list()
picidae.divrate.models[["full_tree"]][["constant"]] <- try(bd_ML(branching.times(picidae.RAxML.all.BEAST_calibrated.with_proxies), missnumspec=237-length(picidae.RAxML.all.BEAST_calibrated.with_proxies$tip.label), tdmodel=0))
picidae.divrate.models[["full_tree"]][["time_dependent"]] <- try(bd_ML(branching.times(picidae.RAxML.all.BEAST_calibrated.with_proxies), missnumspec=237-length(picidae.RAxML.all.BEAST_calibrated.with_proxies$tip.label), tdmodel=1, idparsopt=1:3, initparsopt=c(0.1, 0.05, 0.1)))
picidae.divrate.models[["full_tree"]][["diversity_dependent"]] <- try(dd_ML(branching.times(picidae.RAxML.all.BEAST_calibrated.with_proxies), missnumspec=237-length(picidae.RAxML.all.BEAST_calibrated.with_proxies$tip.label), ddmodel=1))
## calculate AICc for divrate models of full tree
for (i in names(picidae.divrate.models[["full_tree"]])) {
picidae.divrate.models[["full_tree"]][[i]][["AICc"]] <- (-2 * picidae.divrate.models[["full_tree"]][[i]]$loglik) + (2 * picidae.divrate.models[["full_tree"]][[i]]$df) + (((2 * picidae.divrate.models[["full_tree"]][[i]]$df) * (picidae.divrate.models[["full_tree"]][[i]]$df + 1)) / (picidae.RAxML.all.BEAST_calibrated.with_proxies$Nnode - picidae.divrate.models[["full_tree"]][[i]]$df - 1))
}
## fit diversification models to morph tree
picidae.divrate.models[["morph_tree"]] <- list()
picidae.divrate.models[["morph_tree"]][["constant"]] <- try(bd_ML(branching.times(picidae.morph.log.fully_reduced.treedata[["all_inds"]]$phy), missnumspec=237-length(picidae.morph.log.fully_reduced.treedata[["all_inds"]]$phy$tip.label), tdmodel=0))
picidae.divrate.models[["morph_tree"]][["time_dependent"]] <- try(bd_ML(branching.times(picidae.morph.log.fully_reduced.treedata[["all_inds"]]$phy), missnumspec=237-length(picidae.morph.log.fully_reduced.treedata[["all_inds"]]$phy$tip.label), tdmodel=1, idparsopt=1:3, initparsopt=c(0.1, 0.05, 0.1)))
picidae.divrate.models[["morph_tree"]][["diversity_dependent"]] <- try(dd_ML(branching.times(picidae.morph.log.fully_reduced.treedata[["all_inds"]]$phy), missnumspec=237-length(picidae.morph.log.fully_reduced.treedata[["all_inds"]]$phy$tip.label), ddmodel=1))
## calculate AICc for divrate models of morph tree
for (i in names(picidae.divrate.models[["morph_tree"]])) {
picidae.divrate.models[["morph_tree"]][[i]][["AICc"]] <- (-2 * picidae.divrate.models[["morph_tree"]][[i]]$loglik) + (2 * picidae.divrate.models[["morph_tree"]][[i]]$df) + (((2 * picidae.divrate.models[["morph_tree"]][[i]]$df) * (picidae.divrate.models[["morph_tree"]][[i]]$df + 1)) / (picidae.morph.log.fully_reduced.treedata[["all_inds"]]$phy$Nnode - picidae.divrate.models[["morph_tree"]][[i]]$df - 1))
}
## summarize divrate model results with aicc
for (i in names(picidae.divrate.models)) {
for (q in names(picidae.divrate.models[[i]])) {
cat(i, q, picidae.divrate.models[[i]][[q]]$AICc, "\n", sep=" ")
}
}
rm(i,q)
## fit morphological evolution models to morph tree with geomean, phyl_pca, and geomean_scaled.phyl_pca (with the same models I used below)
picidae.morphrate.models <- list()
for (i in c("all_inds", "complete_ind_only")) {
cat("\nStarting model fitting for", i, "\n", sep=" ")
# for geomean
cat("\nStarting geomean models.\n")
for (q in c("BM","OU","trend","EB")) {
cat("Starting ", q, " model\n", sep="")
picidae.morphrate.models[[i]][["geomean"]][[q]] <- fitContinuous(phy=picidae.morph.log.fully_reduced.treedata[[i]]$phy, dat=picidae.morph.log.fully_reduced.geomean[[i]][picidae.morph.log.fully_reduced.treedata[[i]]$phy$tip.label], model=q)
}
# for phyl_pca
cat("\nStarting phyl_pca models.\n")
for (q in c("BM","OU","trend","EB")) {
cat("Starting ", q, " model\n", sep="")
picidae.morphrate.models[[i]][["phyl_pca"]][[q]] <- fitContinuous(phy=picidae.morph.log.fully_reduced.treedata[[i]]$phy, dat=picidae.morph.log.fully_reduced.phyl_pca[[i]]$pca$S, model=q)
}
# for geomean_scaled.phyl_pca
cat("\nStarting geomean_scaled phyl_pca models.\n")
for (q in c("BM","OU","trend","EB")) {
cat("Starting ", q, " model\n", sep="")
picidae.morphrate.models[[i]][["geomean_scaled.phyl_pca"]][[q]] <- fitContinuous(phy=picidae.morph.log.fully_reduced.treedata[[i]]$phy, dat=picidae.morph.log.fully_reduced.geomean_scaled.phyl_pca[[i]]$pca$S, model=q)
}
}
# summarize morphological evolution model fits (with AICc)
for (i in names(picidae.morphrate.models)) {
for (q in names(picidae.morphrate.models[[i]])) {
for (r in names(picidae.morphrate.models[[i]][[q]])) {
if ("gfit" %in% class(picidae.morphrate.models[[i]][[q]][[r]])) {
cat(i, q, r, picidae.morphrate.models[[i]][[q]][[r]]$opt$aicc, "\n", sep=" ")
} else if ("gfits" %in% class(picidae.morphrate.models[[i]][[q]][[r]])) {
cat(i, q, r, picidae.morphrate.models[[i]][[q]][[r]][[1]]$opt$aicc, "\n", sep=" ")
}
}
}
}
rm(i,q,r)
### calculate subclade metrics
## the function calcMetrics.subclades() calculates a huge range of metrics for a list of subclades, including fitting models of diversification and trait evolution to the subclade
# as input, it takes subclades.treedata (a list of treedata-like objects, each containing a phy and other data objects for a single subclade), BAMM_divrates (the subclade average diversification rates from BAMM), BAMM_morphrates (the subclade average trait evolution rates from BAMM), metrics (a character vector containing the metrics to calculate), return_format (format of object to be returned; can be "matrix" or "list"), and quiet (boolean to output status to console)
# it returns either a matrix or list of metrics by subclade
calcMetrics.subclades <- function(subclades.treedata, BAMM_divrates=NULL, BAMM_morphrates=NULL, metrics=c("ntaxa", "ntaxa.on_morph_tree", "total_div", "crown_age", "divrate.ms.e10", "divrate.ms.e50", "divrate.ms.e90", "divrate.ML.constant.rate", "divrate.ML.constant.AICc", "divrate.ML.constant.AIC", "divrate.ML.time_dependent.rate", "divrate.ML.time_dependent.lambda1", "divrate.ML.time_dependent.mu1", "divrate.ML.time_dependent.AICc", "divrate.ML.time_dependent.AIC", "divrate.ML.diversity_dependent.rate", "divrate.ML.diversity_dependent.K", "divrate.ML.diversity_dependent.AICc", "divrate.ML.diversity_dependent.AIC", "divrate.BAMM", "divrate.BAMM.morph_tree", "gamma", "morphrate.geomean.BM.sigsq", "morphrate.geomean.BM.AICc", "morphrate.geomean.BM.AIC", "morphrate.geomean.OU.sigsq", "morphrate.geomean.OU.alpha", "morphrate.geomean.OU.AICc", "morphrate.geomean.OU.AIC", "morphrate.geomean.trend.slope", "morphrate.geomean.trend.sigsq", "morphrate.geomean.trend.AICc", "morphrate.geomean.trend.AIC", "morphrate.geomean.EB.alpha", "morphrate.geomean.EB.sigsq", "morphrate.geomean.EB.AICc", "morphrate.geomean.EB.AIC", "morphrate.geomean.delta.delta", "morphrate.geomean.delta.sigsq", "morphrate.geomean.delta.AICc", "morphrate.geomean.delta.AIC", "morphrate.geomean.BAMM", "morphrate.phyl_pca.BM.sigsq", "morphrate.phyl_pca.PC1.BM.AICc", "morphrate.phyl_pca.PC1.BM.AIC", "morphrate.phyl_pca.PC1.OU.sigsq", "morphrate.phyl_pca.PC1.OU.alpha", "morphrate.phyl_pca.PC1.OU.AICc", "morphrate.phyl_pca.PC1.OU.AIC", "morphrate.phyl_pca.PC1.trend.slope", "morphrate.phyl_pca.PC1.trend.sigsq", "morphrate.phyl_pca.PC1.trend.AICc", "morphrate.phyl_pca.PC1.trend.AIC", "morphrate.phyl_pca.PC1.EB.alpha", "morphrate.phyl_pca.PC1.EB.sigsq", "morphrate.phyl_pca.PC1.EB.AICc", "morphrate.phyl_pca.PC1.EB.AIC", "morphrate.phyl_pca.PC1.delta.delta", "morphrate.phyl_pca.PC1.delta.sigsq", "morphrate.phyl_pca.PC1.delta.AICc", "morphrate.phyl_pca.PC1.delta.AIC", "morphrate.phyl_pca.PC13.BAMM", "morphrate.geomean_scaled.phyl_pca.BM.sigsq", "morphrate.geomean_scaled.phyl_pca.PC1.BM.AICc", "morphrate.geomean_scaled.phyl_pca.PC1.BM.AIC", "morphrate.geomean_scaled.phyl_pca.PC1.OU.sigsq", "morphrate.geomean_scaled.phyl_pca.PC1.OU.alpha", "morphrate.geomean_scaled.phyl_pca.PC1.OU.AICc", "morphrate.geomean_scaled.phyl_pca.PC1.OU.AIC", "morphrate.geomean_scaled.phyl_pca.PC1.trend.slope", "morphrate.geomean_scaled.phyl_pca.PC1.trend.sigsq", "morphrate.geomean_scaled.phyl_pca.PC1.trend.AICc", "morphrate.geomean_scaled.phyl_pca.PC1.trend.AIC", "morphrate.geomean_scaled.phyl_pca.PC1.EB.alpha", "morphrate.geomean_scaled.phyl_pca.PC1.EB.sigsq", "morphrate.geomean_scaled.phyl_pca.PC1.EB.AICc", "morphrate.geomean_scaled.phyl_pca.PC1.EB.AIC", "morphrate.geomean_scaled.phyl_pca.PC1.delta.delta", "morphrate.geomean_scaled.phyl_pca.PC1.delta.sigsq", "morphrate.geomean_scaled.phyl_pca.PC1.delta.AICc", "morphrate.geomean_scaled.phyl_pca.PC1.delta.AIC", "morphrate.geomean_scaled.phyl_pca.PC13.BAMM", "avg_overlaps.rangesize_scaled", "avg_overlaps.euclidean_scaled.geomean", "avg_overlaps.euclidean_scaled.phyl_pca", "avg_overlaps.euclidean_scaled.geomean_scaled.phyl_pca"), return_format="matrix", quiet=TRUE) {
# build a list of vectors to store the various metrics (list above is NOT complete)
# loop over subclades.treedata, which is a list of treedata-like objects, each with a phy, and a bunch of different data
# calculate metrics for each subclade, and store them in the vectors
# reformat the list of vectors if necessary (e.g. to matrix)
# return the reformatted subclade metrics
require(geiger)
require(laser)
for (metric in metrics) { # set up vectors to store subclade data in
assign(metric, value=numeric())
if (!quiet) print(metric)
}
for (i in names(subclades.treedata)) { # loop over subclades, calculating metrics
cat("\nStarting clade ", i, ", ", which(names(subclades.treedata)==i), " of ", length(subclades.treedata), " total subclades.\n\n", sep="")
## diversification and tree-shape stuff
cat("Starting diversification analyses.\n")
# calculate total number of taxa
if ("ntaxa" %in% metrics) ntaxa[[i]] <- length(subclades.treedata[[i]]$phy$tip.label) + subclades.treedata[[i]]$missing_count
if ("ntaxa.on_morph_tree" %in% metrics) ntaxa.on_morph_tree[[i]] <- length(subclades.treedata[[i]]$phy$tip.label)
# calculate total diversification
if ("total_div" %in% metrics) total_div[[i]] <- log(length(subclades.treedata[[i]]$phy$tip.label) + subclades.treedata[[i]]$missing_count)
# calculate clade age
if ("crown_age" %in% metrics) crown_age[[i]] <- max(node.depth.edgelength(subclades.treedata[[i]]$phy))
# calculate Magallon-Sanderson diversification rates
if (length(intersect(c("divrate.ms.e10","divrate.ms.e50","divrate.ms.e90"), metrics)) > 0) cat("Calculating Magallon-Sanderson diversification rates.\n")
if ("divrate.ms.e10" %in% metrics) divrate.ms.e10[[i]] <- geiger::bd.ms(phy=subclades.treedata[[i]]$phy.full_tree, missing=subclades.treedata[[i]]$missing_count.full_tree, crown=TRUE, epsilon=0.10) # calculate Magallon-Sanderson diversification rate with extinction fraction 0.10
if ("divrate.ms.e50" %in% metrics) divrate.ms.e50[[i]] <- geiger::bd.ms(phy=subclades.treedata[[i]]$phy.full_tree, missing=subclades.treedata[[i]]$missing_count.full_tree, crown=TRUE, epsilon=0.50) # calculate Magallon-Sanderson diversification rate with extinction fraction 0.50
if ("divrate.ms.e90" %in% metrics) divrate.ms.e90[[i]] <- geiger::bd.ms(phy=subclades.treedata[[i]]$phy.full_tree, missing=subclades.treedata[[i]]$missing_count.full_tree, crown=TRUE, epsilon=0.90) # calculate Magallon-Sanderson diversification rate with extinction fraction 0.90
# calculate diversification rate using laser
if ("divrate.laser" %in% metrics) {
cat("Calculating laser diversification rate.\n")
divrate.laser[[i]] <- laser::bd(subclades.treedata[[i]]$phy.full_tree)$r # get diversification rate from laser model fitting
}
# fit constant-rate model, and return diversification rate (lambda-mu) and/or AICc
if (length(intersect(c("divrate.ML.constant.rate","divrate.ML.constant.AICc", "divrate.ML.constant.AIC"), metrics)) > 1) {
cat("Fitting constant-rate diversification model.\n")
sink("/dev/null")
divmodel.tmp <- try(bd_ML(branching.times(subclades.treedata[[i]]$phy.full_tree), missnumspec=subclades.treedata[[i]]$missing_count.full_tree, tdmodel=0)) # fit a constant-rate model
sink()
if (class(divmodel.tmp) == "try-error") {
if ("divrate.ML.constant.rate" %in% metrics) divrate.ML.constant.rate[[i]] <- NA
if ("divrate.ML.constant.AICc" %in% metrics) divrate.ML.constant.AICc[[i]] <- NA
if ("divrate.ML.constant.AIC" %in% metrics) divrate.ML.constant.AIC[[i]] <- NA
} else {
if ("divrate.ML.constant.rate" %in% metrics) divrate.ML.constant.rate[[i]] <- with(divmodel.tmp, lambda0-mu0) # extract diversification rate (lambda - mu) from the constant-rate model
if ("divrate.ML.constant.AICc" %in% metrics) divrate.ML.constant.AICc[[i]] <- (-2 * divmodel.tmp$loglik) + (2 * divmodel.tmp$df) + (((2 * divmodel.tmp$df) * (divmodel.tmp$df + 1)) / (subclades.treedata[[i]]$phy$Nnode - divmodel.tmp$df - 1)) # calculate AICc for the constant-rate model
if ("divrate.ML.constant.AIC" %in% metrics) divrate.ML.constant.AIC[[i]] <- (-2 * divmodel.tmp$loglik) + (2 * divmodel.tmp$df) # calculate AIC for the constant-rate model
}
rm(divmodel.tmp) # remove the temporary model
}
# fit time-dependent-rate model, and return diversification rate (lambda-mu), lambda1, mu1, and/or AICc
if (length(intersect(c("divrate.ML.time_dependent.rate", "divrate.ML.time_dependent.lambda1", "divrate.ML.time_dependent.mu1", "divrate.ML.time_dependent.AICc", "divrate.ML.time_dependent.AIC"), metrics)) > 1) {
cat("Fitting time-dependent diversification model.\n")
sink("/dev/null")
divmodel.tmp <- try(bd_ML(branching.times(subclades.treedata[[i]]$phy.full_tree), missnumspec=subclades.treedata[[i]]$missing_count.full_tree, tdmodel=1, idparsopt=1:3, initparsopt=c(0.1, 0.05, 0.1))) # fit a time-dependent-rate model
sink()
if (class(divmodel.tmp) == "try-error") {
if ("divrate.ML.time_dependent.rate" %in% metrics) divrate.ML.time_dependent.rate[[i]] <- NA
if ("divrate.ML.time_dependent.lambda1" %in% metrics) divrate.ML.time_dependent.lambda1[[i]] <- NA
if ("divrate.ML.time_dependent.mu1" %in% metrics) divrate.ML.time_dependent.mu1[[i]] <- NA
if ("divrate.ML.time_dependent.AICc" %in% metrics) divrate.ML.time_dependent.AICc[[i]] <- NA
if ("divrate.ML.time_dependent.AIC" %in% metrics) divrate.ML.time_dependent.AIC[[i]] <- NA
} else {
if ("divrate.ML.time_dependent.rate" %in% metrics) divrate.ML.time_dependent.rate[[i]] <- with(divmodel.tmp, lambda0-mu0) # extract diversification rate (lambda - mu) from the time-dependent-rate model
if ("divrate.ML.time_dependent.lambda1" %in% metrics) divrate.ML.time_dependent.lambda1[[i]] <- with(divmodel.tmp, lambda1) # extract diversification rate (lambda - mu) from the time-dependent-rate model
if ("divrate.ML.time_dependent.mu1" %in% metrics) divrate.ML.time_dependent.mu1[[i]] <- with(divmodel.tmp, mu1) # extract diversification rate (lambda - mu) from the time-dependent-rate model
if ("divrate.ML.time_dependent.AICc" %in% metrics) divrate.ML.time_dependent.AICc[[i]] <- (-2 * divmodel.tmp$loglik) + (2 * divmodel.tmp$df) + (((2 * divmodel.tmp$df) * (divmodel.tmp$df + 1)) / (subclades.treedata[[i]]$phy$Nnode - divmodel.tmp$df - 1)) # calculate AICc for the time-dependent-rate model
if ("divrate.ML.time_dependent.AIC" %in% metrics) divrate.ML.time_dependent.AIC[[i]] <- (-2 * divmodel.tmp$loglik) + (2 * divmodel.tmp$df) # calculate AIC for the time-dependent-rate model
}
rm(divmodel.tmp) # remove the temporary model
}
# fit diversity-dependent-rate model, and return diversification rate (lambda-mu), K, and/or AICc
if (length(intersect(c("divrate.ML.diversity_dependent.rate", "divrate.ML.diversity_dependent.K", "divrate.ML.diversity_dependent.AICc", "divrate.ML.diversity_dependent.AIC"), metrics)) > 1) {
cat("Fitting diversity-dependent diversification model.\n")
sink("/dev/null")
divmodel.tmp <- try(dd_ML(branching.times(subclades.treedata[[i]]$phy.full_tree), missnumspec=subclades.treedata[[i]]$missing_count.full_tree, ddmodel=1)) # fit a diversity-dependent-rate model, with exponential dependence in speciation rate
sink()
if (class(divmodel.tmp) == "try-error") {
if ("divrate.ML.diversity_dependent.rate" %in% metrics) divrate.ML.diversity_dependent.rate[[i]] <- NA
if ("divrate.ML.diversity_dependent.K" %in% metrics) divrate.ML.diversity_dependent.K[[i]] <- NA
if ("divrate.ML.diversity_dependent.AICc" %in% metrics) divrate.ML.diversity_dependent.AICc[[i]] <- NA
if ("divrate.ML.diversity_dependent.AIC" %in% metrics) divrate.ML.diversity_dependent.AIC[[i]] <- NA
} else {
if ("divrate.ML.diversity_dependent.rate" %in% metrics) divrate.ML.diversity_dependent.rate[[i]] <- with(divmodel.tmp, lambda-mu) # extract diversification rate (lambda - mu) from the diversity-dependent-rate model
if ("divrate.ML.diversity_dependent.K" %in% metrics) divrate.ML.diversity_dependent.K[[i]] <- with(divmodel.tmp, K) # extract diversification rate (lambda - mu) from the diversity-dependent-rate model
if ("divrate.ML.diversity_dependent.AICc" %in% metrics) divrate.ML.diversity_dependent.AICc[[i]] <- (-2 * divmodel.tmp$loglik) + (2 * divmodel.tmp$df) + (((2 * divmodel.tmp$df) * (divmodel.tmp$df + 1)) / (subclades.treedata[[i]]$phy$Nnode - divmodel.tmp$df - 1)) # calculate AICc for the time-dependent-rate model
if ("divrate.ML.diversity_dependent.AIC" %in% metrics) divrate.ML.diversity_dependent.AIC[[i]] <- (-2 * divmodel.tmp$loglik) + (2 * divmodel.tmp$df) # calculate AIC for the time-dependent-rate model
}
rm(divmodel.tmp) # remove the temporary model
}
# extract average diversification rate from BAMM
if ("divrate.BAMM" %in% metrics) divrate.BAMM[[i]] <- BAMM_divrates$full_tree[as.character(subclades.treedata[[i]]$node.full_tree)] # get average subclade diversification rate from BAMM
# extract average diversification rate from BAMM
if ("divrate.BAMM.morph_tree" %in% metrics) divrate.BAMM.morph_tree[[i]] <- BAMM_divrates$morph_tree[i] # get average subclade diversification rate from BAMM
# calculate gamma
if ("gamma" %in% metrics) gamma[[i]] <- gammaStat(subclades.treedata[[i]]$phy.full_tree)
## morphological evolution stuff; I use fitContinuous because the functions in the mvMORPH package are really slow with more than a few variables
cat("Starting morphological evolution analyses.\n")
# fit BM model to geomean data, and extract sigma-squared and/or AICc
if (length(intersect(c("morphrate.geomean.BM.sigsq", "morphrate.geomean.BM.AICc", "morphrate.geomean.BM.AIC"), metrics)) > 0) {
cat("Fitting BM model to geomean data.\n")
morphmodel.tmp <- try(fitContinuous(phy=subclades.treedata[[i]]$phy, dat=subclades.treedata[[i]]$geomean, model="BM"))
if ((class(morphmodel.tmp) == "try-error") | (min(abs(morphmodel.tmp$res[,"convergence"])) > 0)) {
if ("morphrate.geomean.BM.sigsq" %in% metrics) morphrate.geomean.BM.sigsq[[i]] <- NA
if ("morphrate.geomean.BM.AICc" %in% metrics) morphrate.geomean.BM.AICc[[i]] <- NA
if ("morphrate.geomean.BM.AIC" %in% metrics) morphrate.geomean.BM.AIC[[i]] <- NA
} else {
if ("morphrate.geomean.BM.sigsq" %in% metrics) morphrate.geomean.BM.sigsq[[i]] <- morphmodel.tmp$opt$sigsq
if ("morphrate.geomean.BM.AICc" %in% metrics) morphrate.geomean.BM.AICc[[i]] <- morphmodel.tmp$opt$aicc
if ("morphrate.geomean.BM.AIC" %in% metrics) morphrate.geomean.BM.AIC[[i]] <- morphmodel.tmp$opt$aic
}
rm(morphmodel.tmp)
}
# fit Ornstein-Uhlenbeck (OU) model to geomean data, and and extract sigsq, alpha (the stable attractor parameter) and/or AICc
if (length(intersect(c("morphrate.geomean.OU.alpha", "morphrate.geomean.OU.sigsq", "morphrate.geomean.OU.AICc", "morphrate.geomean.OU.AIC"), metrics)) > 0) {
cat("Fitting OU model to geomean data.\n")
morphmodel.tmp <- try(fitContinuous(phy=subclades.treedata[[i]]$phy, dat=subclades.treedata[[i]]$geomean, model="OU"))
if ((class(morphmodel.tmp) == "try-error") | (min(abs(morphmodel.tmp$res[,"convergence"])) > 0)) {
if ("morphrate.geomean.OU.alpha" %in% metrics) morphrate.geomean.OU.alpha[[i]] <- NA
if ("morphrate.geomean.OU.sigsq" %in% metrics) morphrate.geomean.OU.sigsq[[i]] <- NA
if ("morphrate.geomean.OU.AICc" %in% metrics) morphrate.geomean.OU.AICc[[i]] <- NA
if ("morphrate.geomean.OU.AIC" %in% metrics) morphrate.geomean.OU.AIC[[i]] <- NA
} else {
if ("morphrate.geomean.OU.alpha" %in% metrics) morphrate.geomean.OU.alpha[[i]] <- morphmodel.tmp$opt$alpha
if ("morphrate.geomean.OU.sigsq" %in% metrics) morphrate.geomean.OU.sigsq[[i]] <- morphmodel.tmp$opt$sigsq
if ("morphrate.geomean.OU.AICc" %in% metrics) morphrate.geomean.OU.AICc[[i]] <- morphmodel.tmp$opt$aicc
if ("morphrate.geomean.OU.AIC" %in% metrics) morphrate.geomean.OU.AIC[[i]] <- morphmodel.tmp$opt$aic
}
rm(morphmodel.tmp)
}
# fit trend model to geomean data, and extract sigma-squared and/or AICc
if (length(intersect(c("morphrate.geomean.trend.slope", "morphrate.geomean.trend.sigsq", "morphrate.geomean.trend.AICc", "morphrate.geomean.trend.AIC"), metrics)) > 0) {
cat("Fitting trend model to geomean data.\n")
morphmodel.tmp <- try(fitContinuous(phy=subclades.treedata[[i]]$phy, dat=subclades.treedata[[i]]$geomean, model="trend"))
if ((class(morphmodel.tmp) == "try-error") | (min(abs(morphmodel.tmp$res[,"convergence"])) > 0)) {
if ("morphrate.geomean.trend.slope" %in% metrics) morphrate.geomean.trend.slope[[i]] <- NA
if ("morphrate.geomean.trend.sigsq" %in% metrics) morphrate.geomean.trend.sigsq[[i]] <- NA
if ("morphrate.geomean.trend.AICc" %in% metrics) morphrate.geomean.trend.AICc[[i]] <- NA
if ("morphrate.geomean.trend.AIC" %in% metrics) morphrate.geomean.trend.AIC[[i]] <- NA
} else {
if ("morphrate.geomean.trend.slope" %in% metrics) morphrate.geomean.trend.slope[[i]] <- morphmodel.tmp$opt$slope
if ("morphrate.geomean.trend.sigsq" %in% metrics) morphrate.geomean.trend.sigsq[[i]] <- morphmodel.tmp$opt$sigsq
if ("morphrate.geomean.trend.AICc" %in% metrics) morphrate.geomean.trend.AICc[[i]] <- morphmodel.tmp$opt$aicc
if ("morphrate.geomean.trend.AIC" %in% metrics) morphrate.geomean.trend.AIC[[i]] <- morphmodel.tmp$opt$aic
}
rm(morphmodel.tmp)
}
# fit early burst (EB) model to geomean data, and extract alpha and/or AICc
if (length(intersect(c("morphrate.geomean.EB.alpha", "morphrate.geomean.EB.sigsq", "morphrate.geomean.EB.AICc", "morphrate.geomean.EB.AIC"), metrics)) > 0) {
cat("Fitting EB model to geomean data.\n")
morphmodel.tmp <- try(fitContinuous(phy=subclades.treedata[[i]]$phy, dat=subclades.treedata[[i]]$geomean, model="EB"))
if ((class(morphmodel.tmp) == "try-error") | (min(abs(morphmodel.tmp$res[,"convergence"])) > 0)) {
if ("morphrate.geomean.EB.alpha" %in% metrics) morphrate.geomean.EB.alpha[[i]] <- NA
if ("morphrate.geomean.EB.sigsq" %in% metrics) morphrate.geomean.EB.sigsq[[i]] <- NA
if ("morphrate.geomean.EB.AICc" %in% metrics) morphrate.geomean.EB.AICc[[i]] <- NA
if ("morphrate.geomean.EB.AIC" %in% metrics) morphrate.geomean.EB.AIC[[i]] <- NA
} else {
if ("morphrate.geomean.EB.alpha" %in% metrics) morphrate.geomean.EB.alpha[[i]] <- morphmodel.tmp$opt$a
if ("morphrate.geomean.EB.sigsq" %in% metrics) morphrate.geomean.EB.sigsq[[i]] <- morphmodel.tmp$opt$sigsq
if ("morphrate.geomean.EB.AICc" %in% metrics) morphrate.geomean.EB.AICc[[i]] <- morphmodel.tmp$opt$aicc
if ("morphrate.geomean.EB.AIC" %in% metrics) morphrate.geomean.EB.AIC[[i]] <- morphmodel.tmp$opt$aic
}
rm(morphmodel.tmp)
}
# fit delta model to geomean data, and extract delta and/or AICc
if (length(intersect(c("morphrate.geomean.delta.delta", "morphrate.geomean.delta.sigsq", "morphrate.geomean.delta.AICc", "morphrate.geomean.delta.AIC"), metrics)) > 0) {
cat("Fitting delta model to geomean data.\n")
morphmodel.tmp <- try(fitContinuous(phy=subclades.treedata[[i]]$phy, dat=subclades.treedata[[i]]$geomean, model="delta"))
if ((class(morphmodel.tmp) == "try-error") | (min(abs(morphmodel.tmp$res[,"convergence"])) > 0)) {
if ("morphrate.geomean.delta.delta" %in% metrics) morphrate.geomean.delta.delta[[i]] <- NA
if ("morphrate.geomean.delta.sigsq" %in% metrics) morphrate.geomean.delta.sigsq[[i]] <- NA
if ("morphrate.geomean.delta.AICc" %in% metrics) morphrate.geomean.delta.AICc[[i]] <- NA
if ("morphrate.geomean.delta.AIC" %in% metrics) morphrate.geomean.delta.AIC[[i]] <- NA
} else {
if ("morphrate.geomean.delta.delta" %in% metrics) morphrate.geomean.delta.delta[[i]] <- morphmodel.tmp$opt$delta
if ("morphrate.geomean.delta.sigsq" %in% metrics) morphrate.geomean.delta.sigsq[[i]] <- morphmodel.tmp$opt$sigsq
if ("morphrate.geomean.delta.AICc" %in% metrics) morphrate.geomean.delta.AICc[[i]] <- morphmodel.tmp$opt$aicc
if ("morphrate.geomean.delta.AIC" %in% metrics) morphrate.geomean.delta.AIC[[i]] <- morphmodel.tmp$opt$aic
}
rm(morphmodel.tmp)
}
# extract average geomean morphological evolution rate from BAMM
if ("morphrate.geomean.BAMM" %in% metrics) morphrate.geomean.BAMM[[i]] <- BAMM_morphrates[[grep("geomean(?!_scaled)", names(BAMM_morphrates), value=TRUE, perl=TRUE)]][i]
# fit BM model to phyl_pca data, and extract sigma-squared and/or AICc
if (length(intersect(c("morphrate.phyl_pca.BM.sigsq", "morphrate.phyl_pca.PC1.BM.AICc", "morphrate.phyl_pca.PC1.BM.AIC"), metrics)) > 0) {
cat("Fitting BM model to phyl_pca data.\n")
morphmodel.tmp <- try(fitContinuous(phy=subclades.treedata[[i]]$phy, dat=subclades.treedata[[i]]$phyl_pca, model="BM"))
if ((class(morphmodel.tmp) == "try-error") | (min(abs(morphmodel.tmp[["PC1"]]$res[,"convergence"])) > 0)) {
if ("morphrate.phyl_pca.BM.sigsq" %in% metrics) morphrate.phyl_pca.BM.sigsq[[i]] <- NA
if ("morphrate.phyl_pca.PC1.BM.AICc" %in% metrics) morphrate.phyl_pca.PC1.BM.AICc[[i]] <- NA
if ("morphrate.phyl_pca.PC1.BM.AIC" %in% metrics) morphrate.phyl_pca.PC1.BM.AIC[[i]] <- NA
} else {
if ("morphrate.phyl_pca.BM.sigsq" %in% metrics) morphrate.phyl_pca.BM.sigsq[[i]] <- sum(sapply(morphmodel.tmp, function(x) x$opt$sigsq))
if ("morphrate.phyl_pca.PC1.BM.AICc" %in% metrics) morphrate.phyl_pca.PC1.BM.AICc[[i]] <- morphmodel.tmp[["PC1"]]$opt$aicc
if ("morphrate.phyl_pca.PC1.BM.AIC" %in% metrics) morphrate.phyl_pca.PC1.BM.AIC[[i]] <- morphmodel.tmp[["PC1"]]$opt$aic
}
rm(morphmodel.tmp)
}
# fit Ornstein-Uhlenbeck (OU) model to phyl_pca data, and extract sigsq, alpha (the stable attractor parameter) and/or AICc
if (length(intersect(c("morphrate.phyl_pca.PC1.OU.alpha", "morphrate.phyl_pca.PC1.OU.sigsq", "morphrate.phyl_pca.PC1.OU.AICc"), metrics)) > 0) {
cat("Fitting OU model to phyl_pca data.\n")
morphmodel.tmp <- try(fitContinuous(phy=subclades.treedata[[i]]$phy, dat=subclades.treedata[[i]]$phyl_pca[,"PC1"], model="OU"))
if ((class(morphmodel.tmp) == "try-error") | (min(abs(morphmodel.tmp$res[,"convergence"])) > 0)) {
if ("morphrate.phyl_pca.PC1.OU.alpha" %in% metrics) morphrate.phyl_pca.PC1.OU.alpha[[i]] <- NA
if ("morphrate.phyl_pca.PC1.OU.sigsq" %in% metrics) morphrate.phyl_pca.PC1.OU.sigsq[[i]] <- NA
if ("morphrate.phyl_pca.PC1.OU.AICc" %in% metrics) morphrate.phyl_pca.PC1.OU.AICc[[i]] <- NA
if ("morphrate.phyl_pca.PC1.OU.AIC" %in% metrics) morphrate.phyl_pca.PC1.OU.AIC[[i]] <- NA
} else {
if ("morphrate.phyl_pca.PC1.OU.alpha" %in% metrics) morphrate.phyl_pca.PC1.OU.alpha[[i]] <- morphmodel.tmp$opt$alpha
if ("morphrate.phyl_pca.PC1.OU.sigsq" %in% metrics) morphrate.phyl_pca.PC1.OU.sigsq[[i]] <- morphmodel.tmp$opt$sigsq
if ("morphrate.phyl_pca.PC1.OU.AICc" %in% metrics) morphrate.phyl_pca.PC1.OU.AICc[[i]] <- morphmodel.tmp$opt$aicc
if ("morphrate.phyl_pca.PC1.OU.AIC" %in% metrics) morphrate.phyl_pca.PC1.OU.AIC[[i]] <- morphmodel.tmp$opt$aic
}
rm(morphmodel.tmp)
}
# fit trend model to phyl_pca data, and extract slope and/or AICc
if (length(intersect(c("morphrate.phyl_pca.PC1.trend.slope", "morphrate.phyl_pca.PC1.trend.sigsq", "morphrate.phyl_pca.PC1.trend.AICc", "morphrate.phyl_pca.PC1.trend.AIC"), metrics)) > 0) {
cat("Fitting trend model to phyl_pca data.\n")
morphmodel.tmp <- try(fitContinuous(phy=subclades.treedata[[i]]$phy, dat=subclades.treedata[[i]]$phyl_pca[,"PC1"], model="trend"))
if ((class(morphmodel.tmp) == "try-error") | (min(abs(morphmodel.tmp$res[,"convergence"])) > 0)) {
if ("morphrate.phyl_pca.PC1.trend.slope" %in% metrics) morphrate.phyl_pca.PC1.trend.slope[[i]] <- NA
if ("morphrate.phyl_pca.PC1.trend.sigsq" %in% metrics) morphrate.phyl_pca.PC1.trend.sigsq[[i]] <- NA
if ("morphrate.phyl_pca.PC1.trend.AICc" %in% metrics) morphrate.phyl_pca.PC1.trend.AICc[[i]] <- NA
if ("morphrate.phyl_pca.PC1.trend.AIC" %in% metrics) morphrate.phyl_pca.PC1.trend.AIC[[i]] <- NA
} else {
if ("morphrate.phyl_pca.PC1.trend.slope" %in% metrics) morphrate.phyl_pca.PC1.trend.slope[[i]] <- morphmodel.tmp$opt$slope
if ("morphrate.phyl_pca.PC1.trend.sigsq" %in% metrics) morphrate.phyl_pca.PC1.trend.sigsq[[i]] <- morphmodel.tmp$opt$sigsq
if ("morphrate.phyl_pca.PC1.trend.AICc" %in% metrics) morphrate.phyl_pca.PC1.trend.AICc[[i]] <- morphmodel.tmp$opt$aicc
if ("morphrate.phyl_pca.PC1.trend.AIC" %in% metrics) morphrate.phyl_pca.PC1.trend.AIC[[i]] <- morphmodel.tmp$opt$aic
}
rm(morphmodel.tmp)
}
# fit early burst (EB) model to phyl_pca data, and extract alpha (the rate decline parameter) and/or AICc
if (length(intersect(c("morphrate.phyl_pca.PC1.EB.alpha", "morphrate.phyl_pca.PC1.EB.sigsq", "morphrate.phyl_pca.PC1.EB.AICc", "morphrate.phyl_pca.PC1.EB.AIC"), metrics)) > 0) {
cat("Fitting EB model to phyl_pca data.\n")
morphmodel.tmp <- try(fitContinuous(phy=subclades.treedata[[i]]$phy, dat=subclades.treedata[[i]]$phyl_pca[,"PC1"], model="EB"))
if ((class(morphmodel.tmp) == "try-error") | (min(abs(morphmodel.tmp$res[,"convergence"])) > 0)) {
if ("morphrate.phyl_pca.PC1.EB.alpha" %in% metrics) morphrate.phyl_pca.PC1.EB.alpha[[i]] <- NA
if ("morphrate.phyl_pca.PC1.EB.sigsq" %in% metrics) morphrate.phyl_pca.PC1.EB.sigsq[[i]] <- NA
if ("morphrate.phyl_pca.PC1.EB.AICc" %in% metrics) morphrate.phyl_pca.PC1.EB.AICc[[i]] <- NA
if ("morphrate.phyl_pca.PC1.EB.AIC" %in% metrics) morphrate.phyl_pca.PC1.EB.AIC[[i]] <- NA
} else {
if ("morphrate.phyl_pca.PC1.EB.alpha" %in% metrics) morphrate.phyl_pca.PC1.EB.alpha[[i]] <- morphmodel.tmp$opt$a
if ("morphrate.phyl_pca.PC1.EB.sigsq" %in% metrics) morphrate.phyl_pca.PC1.EB.sigsq[[i]] <- morphmodel.tmp$opt$sigsq
if ("morphrate.phyl_pca.PC1.EB.AICc" %in% metrics) morphrate.phyl_pca.PC1.EB.AICc[[i]] <- morphmodel.tmp$opt$aicc
if ("morphrate.phyl_pca.PC1.EB.AIC" %in% metrics) morphrate.phyl_pca.PC1.EB.AIC[[i]] <- morphmodel.tmp$opt$aic
}
rm(morphmodel.tmp)
}
# fit delta model to phyl_pca data, and extract alpha (the rate decline parameter) and/or AICc
if (length(intersect(c("morphrate.phyl_pca.PC1.delta.delta", "morphrate.phyl_pca.PC1.delta.sigsq", "morphrate.phyl_pca.PC1.delta.AICc", "morphrate.phyl_pca.PC1.delta.AIC"), metrics)) > 0) {
cat("Fitting delta model to phyl_pca data.\n")
morphmodel.tmp <- try(fitContinuous(phy=subclades.treedata[[i]]$phy, dat=subclades.treedata[[i]]$phyl_pca[,"PC1"], model="delta"))
if ((class(morphmodel.tmp) == "try-error") | (min(abs(morphmodel.tmp$res[,"convergence"])) > 0)) {
if ("morphrate.phyl_pca.PC1.delta.delta" %in% metrics) morphrate.phyl_pca.PC1.delta.delta[[i]] <- NA
if ("morphrate.phyl_pca.PC1.delta.sigsq" %in% metrics) morphrate.phyl_pca.PC1.delta.sigsq[[i]] <- NA
if ("morphrate.phyl_pca.PC1.delta.AICc" %in% metrics) morphrate.phyl_pca.PC1.delta.AICc[[i]] <- NA
if ("morphrate.phyl_pca.PC1.delta.AIC" %in% metrics) morphrate.phyl_pca.PC1.delta.AIC[[i]] <- NA
} else {
if ("morphrate.phyl_pca.PC1.delta.delta" %in% metrics) morphrate.phyl_pca.PC1.delta.delta[[i]] <- morphmodel.tmp$opt$delta
if ("morphrate.phyl_pca.PC1.delta.sigsq" %in% metrics) morphrate.phyl_pca.PC1.delta.sigsq[[i]] <- morphmodel.tmp$opt$sigsq
if ("morphrate.phyl_pca.PC1.delta.AICc" %in% metrics) morphrate.phyl_pca.PC1.delta.AICc[[i]] <- morphmodel.tmp$opt$aicc
if ("morphrate.phyl_pca.PC1.delta.AIC" %in% metrics) morphrate.phyl_pca.PC1.delta.AIC[[i]] <- morphmodel.tmp$opt$aic
}
rm(morphmodel.tmp)
}
# extract average phyl_pca morphological evolution rate from BAMM
if ("morphrate.phyl_pca.PC13.BAMM" %in% metrics) morphrate.phyl_pca.PC13.BAMM[[i]] <- BAMM_morphrates[[grep("(?<!scaled_)phyl_pca_PC1", names(BAMM_morphrates), value=TRUE, perl=TRUE)]][[i]]
# fit BM model to geomean_scaled.phyl_pca data, and extract sigma-squared and/or AICc
if (length(intersect(c("morphrate.geomean_scaled.phyl_pca.BM.sigsq", "morphrate.geomean_scaled.phyl_pca.PC1.BM.AICc", "morphrate.geomean_scaled.phyl_pca.PC1.BM.AIC"), metrics)) > 0) {
cat("Fitting BM model to geomean-scaled phyl_pca data.\n")
morphmodel.tmp <- try(fitContinuous(phy=subclades.treedata[[i]]$phy, dat=subclades.treedata[[i]]$geomean_scaled.phyl_pca, model="BM"))
if ((class(morphmodel.tmp) == "try-error") | (min(abs(morphmodel.tmp[["PC1"]]$res[,"convergence"])) > 0)) {
if ("morphrate.geomean_scaled.phyl_pca.BM.sigsq" %in% metrics) morphrate.geomean_scaled.phyl_pca.BM.sigsq[[i]] <- NA
if ("morphrate.geomean_scaled.phyl_pca.PC1.BM.AICc" %in% metrics) morphrate.geomean_scaled.phyl_pca.PC1.BM.AICc[[i]] <- NA
if ("morphrate.geomean_scaled.phyl_pca.PC1.BM.AIC" %in% metrics) morphrate.geomean_scaled.phyl_pca.PC1.BM.AIC[[i]] <- NA
} else {
if ("morphrate.geomean_scaled.phyl_pca.BM.sigsq" %in% metrics) morphrate.geomean_scaled.phyl_pca.BM.sigsq[[i]] <- sum(sapply(morphmodel.tmp, function(x) x$opt$sigsq))
if ("morphrate.geomean_scaled.phyl_pca.PC1.BM.AICc" %in% metrics) morphrate.geomean_scaled.phyl_pca.PC1.BM.AICc[[i]] <- morphmodel.tmp[["PC1"]]$opt$aicc
if ("morphrate.geomean_scaled.phyl_pca.PC1.BM.AIC" %in% metrics) morphrate.geomean_scaled.phyl_pca.PC1.BM.AIC[[i]] <- morphmodel.tmp[["PC1"]]$opt$aic
}
rm(morphmodel.tmp)
}
# fit Ornstein-Uhlenbeck (OU) model to geomean_scaled.phyl_pca data, and extract sigsq, alpha (the stable attractor parameter) and/or AICc
if (length(intersect(c("morphrate.geomean_scaled.phyl_pca.PC1.OU.alpha", "morphrate.geomean_scaled.phyl_pca.PC1.OU.sigsq", "morphrate.geomean_scaled.phyl_pca.PC1.OU.AICc", "morphrate.geomean_scaled.phyl_pca.PC1.OU.AIC"), metrics)) > 0) {
cat("Fitting OU model to geomean-scaled phyl_pca data.\n")
morphmodel.tmp <- try(fitContinuous(phy=subclades.treedata[[i]]$phy, dat=subclades.treedata[[i]]$geomean_scaled.phyl_pca[,"PC1"], model="OU"))
if ((class(morphmodel.tmp) == "try-error") | (min(abs(morphmodel.tmp$res[,"convergence"])) > 0)) {
if ("morphrate.geomean_scaled.phyl_pca.PC1.OU.alpha" %in% metrics) morphrate.geomean_scaled.phyl_pca.PC1.OU.alpha[[i]] <- NA
if ("morphrate.geomean_scaled.phyl_pca.PC1.OU.sigsq" %in% metrics) morphrate.geomean_scaled.phyl_pca.PC1.OU.sigsq[[i]] <- NA
if ("morphrate.geomean_scaled.phyl_pca.PC1.OU.AICc" %in% metrics) morphrate.geomean_scaled.phyl_pca.PC1.OU.AICc[[i]] <- NA
if ("morphrate.geomean_scaled.phyl_pca.PC1.OU.AIC" %in% metrics) morphrate.geomean_scaled.phyl_pca.PC1.OU.AIC[[i]] <- NA
} else {
if ("morphrate.geomean_scaled.phyl_pca.PC1.OU.alpha" %in% metrics) morphrate.geomean_scaled.phyl_pca.PC1.OU.alpha[[i]] <- morphmodel.tmp$opt$alpha
if ("morphrate.geomean_scaled.phyl_pca.PC1.OU.sigsq" %in% metrics) morphrate.geomean_scaled.phyl_pca.PC1.OU.sigsq[[i]] <- morphmodel.tmp$opt$sigsq
if ("morphrate.geomean_scaled.phyl_pca.PC1.OU.AICc" %in% metrics) morphrate.geomean_scaled.phyl_pca.PC1.OU.AICc[[i]] <- morphmodel.tmp$opt$aicc
if ("morphrate.geomean_scaled.phyl_pca.PC1.OU.AIC" %in% metrics) morphrate.geomean_scaled.phyl_pca.PC1.OU.AIC[[i]] <- morphmodel.tmp$opt$aic
}
rm(morphmodel.tmp)
}
# fit trend model to geomean_scaled.phyl_pca data, and extract slope and/or AICc
if (length(intersect(c("morphrate.geomean_scaled.phyl_pca.PC1.trend.slope", "morphrate.geomean_scaled.phyl_pca.PC1.trend.sigsq", "morphrate.geomean_scaled.phyl_pca.PC1.trend.AICc", "morphrate.geomean_scaled.phyl_pca.PC1.trend.AIC"), metrics)) > 0) {
cat("Fitting trend model to geomean-scaled phyl_pca data.\n")
morphmodel.tmp <- try(fitContinuous(phy=subclades.treedata[[i]]$phy, dat=subclades.treedata[[i]]$geomean_scaled.phyl_pca[,"PC1"], model="trend"))
if ((class(morphmodel.tmp) == "try-error") | (min(abs(morphmodel.tmp$res[,"convergence"])) > 0)) {
if ("morphrate.geomean_scaled.phyl_pca.PC1.trend.slope" %in% metrics) morphrate.geomean_scaled.phyl_pca.PC1.trend.slope[[i]] <- NA
if ("morphrate.geomean_scaled.phyl_pca.PC1.trend.sigsq" %in% metrics) morphrate.geomean_scaled.phyl_pca.PC1.trend.sigsq[[i]] <- NA
if ("morphrate.geomean_scaled.phyl_pca.PC1.trend.AICc" %in% metrics) morphrate.geomean_scaled.phyl_pca.PC1.trend.AICc[[i]] <- NA
if ("morphrate.geomean_scaled.phyl_pca.PC1.trend.AIC" %in% metrics) morphrate.geomean_scaled.phyl_pca.PC1.trend.AIC[[i]] <- NA
} else {
if ("morphrate.geomean_scaled.phyl_pca.PC1.trend.slope" %in% metrics) morphrate.geomean_scaled.phyl_pca.PC1.trend.slope[[i]] <- morphmodel.tmp$opt$slope
if ("morphrate.geomean_scaled.phyl_pca.PC1.trend.sigsq" %in% metrics) morphrate.geomean_scaled.phyl_pca.PC1.trend.sigsq[[i]] <- morphmodel.tmp$opt$sigsq
if ("morphrate.geomean_scaled.phyl_pca.PC1.trend.AICc" %in% metrics) morphrate.geomean_scaled.phyl_pca.PC1.trend.AICc[[i]] <- morphmodel.tmp$opt$aicc
if ("morphrate.geomean_scaled.phyl_pca.PC1.trend.AIC" %in% metrics) morphrate.geomean_scaled.phyl_pca.PC1.trend.AIC[[i]] <- morphmodel.tmp$opt$aic
}
rm(morphmodel.tmp)
}
# fit early burst (EB) model to geomean_scaled.phyl_pca data, and extract alpha (the rate decline parameter) and/or AICc
if (length(intersect(c("morphrate.geomean_scaled.phyl_pca.PC1.EB.alpha", "morphrate.geomean_scaled.phyl_pca.PC1.EB.sigsq", "morphrate.geomean_scaled.phyl_pca.PC1.EB.AICc", "morphrate.geomean_scaled.phyl_pca.PC1.EB.AIC"), metrics)) > 0) {
cat("Fitting EB model to geomean-scaled phyl_pca data.\n")
morphmodel.tmp <- try(fitContinuous(phy=subclades.treedata[[i]]$phy, dat=subclades.treedata[[i]]$geomean_scaled.phyl_pca[,"PC1"], model="EB"))
if ((class(morphmodel.tmp) == "try-error") | (min(abs(morphmodel.tmp$res[,"convergence"])) > 0)) {
if ("morphrate.geomean_scaled.phyl_pca.PC1.EB.alpha" %in% metrics) morphrate.geomean_scaled.phyl_pca.PC1.EB.alpha[[i]] <- NA
if ("morphrate.geomean_scaled.phyl_pca.PC1.EB.sigsq" %in% metrics) morphrate.geomean_scaled.phyl_pca.PC1.EB.sigsq[[i]] <- NA
if ("morphrate.geomean_scaled.phyl_pca.PC1.EB.AICc" %in% metrics) morphrate.geomean_scaled.phyl_pca.PC1.EB.AICc[[i]] <- NA
if ("morphrate.geomean_scaled.phyl_pca.PC1.EB.AIC" %in% metrics) morphrate.geomean_scaled.phyl_pca.PC1.EB.AIC[[i]] <- NA
} else {
if ("morphrate.geomean_scaled.phyl_pca.PC1.EB.alpha" %in% metrics) morphrate.geomean_scaled.phyl_pca.PC1.EB.alpha[[i]] <- morphmodel.tmp$opt$a
if ("morphrate.geomean_scaled.phyl_pca.PC1.EB.sigsq" %in% metrics) morphrate.geomean_scaled.phyl_pca.PC1.EB.sigsq[[i]] <- morphmodel.tmp$opt$sigsq
if ("morphrate.geomean_scaled.phyl_pca.PC1.EB.AICc" %in% metrics) morphrate.geomean_scaled.phyl_pca.PC1.EB.AICc[[i]] <- morphmodel.tmp$opt$aicc
if ("morphrate.geomean_scaled.phyl_pca.PC1.EB.AIC" %in% metrics) morphrate.geomean_scaled.phyl_pca.PC1.EB.AIC[[i]] <- morphmodel.tmp$opt$aic
}
rm(morphmodel.tmp)
}
# fit delta model to geomean_scaled.phyl_pca data, and extract alpha (the rate decline parameter) and/or AICc
if (length(intersect(c("morphrate.geomean_scaled.phyl_pca.PC1.delta.delta", "morphrate.geomean_scaled.phyl_pca.PC1.delta.sigsq", "morphrate.geomean_scaled.phyl_pca.PC1.delta.AICc", "morphrate.geomean_scaled.phyl_pca.PC1.delta.AIC"), metrics)) > 0) {
cat("Fitting delta model to geomean-scaled phyl_pca data.\n")
morphmodel.tmp <- try(fitContinuous(phy=subclades.treedata[[i]]$phy, dat=subclades.treedata[[i]]$geomean_scaled.phyl_pca[,"PC1"], model="delta"))
if ((class(morphmodel.tmp) == "try-error") | (min(abs(morphmodel.tmp$res[,"convergence"])) > 0)) {
if ("morphrate.geomean_scaled.phyl_pca.PC1.delta.delta" %in% metrics) morphrate.geomean_scaled.phyl_pca.PC1.delta.delta[[i]] <- NA
if ("morphrate.geomean_scaled.phyl_pca.PC1.delta.sigsq" %in% metrics) morphrate.geomean_scaled.phyl_pca.PC1.delta.sigsq[[i]] <- NA
if ("morphrate.geomean_scaled.phyl_pca.PC1.delta.AICc" %in% metrics) morphrate.geomean_scaled.phyl_pca.PC1.delta.AICc[[i]] <- NA
if ("morphrate.geomean_scaled.phyl_pca.PC1.delta.AIC" %in% metrics) morphrate.geomean_scaled.phyl_pca.PC1.delta.AIC[[i]] <- NA
} else {
if ("morphrate.geomean_scaled.phyl_pca.PC1.delta.delta" %in% metrics) morphrate.geomean_scaled.phyl_pca.PC1.delta.delta[[i]] <- morphmodel.tmp$opt$delta
if ("morphrate.geomean_scaled.phyl_pca.PC1.delta.sigsq" %in% metrics) morphrate.geomean_scaled.phyl_pca.PC1.delta.sigsq[[i]] <- morphmodel.tmp$opt$sigsq
if ("morphrate.geomean_scaled.phyl_pca.PC1.delta.AICc" %in% metrics) morphrate.geomean_scaled.phyl_pca.PC1.delta.AICc[[i]] <- morphmodel.tmp$opt$aicc
if ("morphrate.geomean_scaled.phyl_pca.PC1.delta.AIC" %in% metrics) morphrate.geomean_scaled.phyl_pca.PC1.delta.AIC[[i]] <- morphmodel.tmp$opt$aic
}
rm(morphmodel.tmp)
}
# extract average geomean_scaled.phyl_pca morphological evolution rate from BAMM
if ("morphrate.geomean_scaled.phyl_pca.PC13.BAMM" %in% metrics) morphrate.geomean_scaled.phyl_pca.PC13.BAMM[[i]] <- BAMM_morphrates[[grep("geomean_scaled_phyl_pca_PC1", names(BAMM_morphrates), value=TRUE)]][[i]]
## overlap metrics
cat("Starting overlap metrics.\n")
# calculate average of summed overlaps scaled by focal taxon range
if ("avg_overlaps.rangesize_scaled" %in% metrics) avg_overlaps.rangesize_scaled[[i]] <- mean(subclades.treedata[[i]]$overlaps.scaled)
if ("avg_overlaps.euclidean_scaled.geomean" %in% metrics) avg_overlaps.euclidean_scaled.geomean[[i]] <- mean(subclades.treedata[[i]]$overlaps.euclidean_scaled$geomean)
if ("avg_overlaps.euclidean_scaled.phyl_pca" %in% metrics) avg_overlaps.euclidean_scaled.phyl_pca[[i]] <- mean(subclades.treedata[[i]]$overlaps.euclidean_scaled$phyl_pca)
if ("avg_overlaps.euclidean_scaled.geomean_scaled.phyl_pca" %in% metrics) avg_overlaps.euclidean_scaled.geomean_scaled.phyl_pca[[i]] <- mean(subclades.treedata[[i]]$overlaps.euclidean_scaled$geomean_scaled.phyl_pca)
}
if (return_format == "matrix") {
subclade_data <- matrix(nrow=length(subclades.treedata), ncol=length(metrics), dimnames=list(names(subclades.treedata), metrics))
for (i in 1:length(metrics)) {
if (!quiet) cat(metrics[i], ": ", get(metrics[i]), "\n", sep="")
subclade_data[,i] <- get(metrics[i])
}
}
if (return_format == "list") {
subclade_data <- list()
for (metric in metrics) subclade_data[[metric]] <- get(metric)
}
return(subclade_data)
}
## loop over the list of subclades, calculating metrics for each
picidae.subclade.data <- list()
for (i in names(picidae.morph.log.fully_reduced.subclades.treedata)) { # loop over individual inclusion
picidae.subclade.data[[i]] <- calc.subclade.metrics(subclades.treedata=picidae.morph.log.fully_reduced.subclades.treedata[[i]], BAMM_divrates=picidae.BAMM.divrates_by_node, BAMM_morphrates=picidae.BAMM.morphrates_by_node)
}
rm(i)
## calculate delta_aicc for all models vs. basic models (e.g time-dependent and diversity-dependent vs. constant-rate diversification, OU and trend and EB vs. BM for all morph variables)
for (i in names(picidae.subclade.data)) { # loop over individual inclusion
for (m in grep("divrate(?!.ML.constant)[a-zA-Z0-9._]+AICc", colnames(picidae.subclade.data[[i]]), value=TRUE, perl=TRUE)) {
picidae.subclade.data[[i]] <- cbind(picidae.subclade.data[[i]], picidae.subclade.data[[i]][,m] - picidae.subclade.data[[i]][,"divrate.ML.constant.AICc"])
colnames(picidae.subclade.data[[i]])[ncol(picidae.subclade.data[[i]])] <- sub("AICc", "delta_AICc", m)
}
for (m in grep("morphrate[a-zA-Z0-9._]+(?<!BM.)AICc", colnames(picidae.subclade.data[[i]]), value=TRUE, perl=TRUE)) {
picidae.subclade.data[[i]] <- cbind(picidae.subclade.data[[i]], picidae.subclade.data[[i]][,m] - picidae.subclade.data[[i]][,sub("[a-zA-Z0-9]+(?=.AICc)", "BM", m, perl=TRUE)])
colnames(picidae.subclade.data[[i]])[ncol(picidae.subclade.data[[i]])] <- sub("AICc", "delta_AICc", m)
}
}
rm(i,m)
### generate subclade combinations for my subclade regressions
## generate subclade combinations using the random method (100 iterations), and one set using the sequential selection method; for each, set a minimum of 5 clades and 6 taxa per clade
picidae.subclade.combinations.6sp.random <- list()
picidae.subclade.combinations.6sp.sequential <- list()
for (i in names(picidae.morph.log.fully_reduced.subclades.treedata)) {
picidae.subclade.combinations.6sp.random[[i]] <- subcladeCombinations.random(phylist=picidae.morph.log.fully_reduced.subclades.treedata[[i]], ncombs=100, min.clades=5, min.taxa=6)
picidae.subclade.combinations.6sp.sequential[[i]] <- subcladeCombinations.sequential(phy=picidae.morph.log.fully_reduced.treedata[[i]]$phy, min.clades=5, min.taxa=6)
}
rm(i)
### generate backbone trees, subclade treedata objects, and fit models to subclade combinations
## general process
## loop over subclade combinations
# generate backbone tree for each subclade combination
# generate treedata object for each subclade combination (using the backbone tree and the subclade data)
# fit models to subclade data using pgls, with the treedata$phy and treedata$data
# save off the relevant bits from the models (slopes and intercepts, R^2)
# examine distributions of r_squared, p_values, slopes, etc.
## the function fit.subcladeModels.bivariate() fits regression models to subclade data, iterating over various combinations of subclades
# as input, it takes phy (the phylogenetic tree of taxa, as phylo object), subclade.combinations (a list of subclade combinations, each containing a vector of node numbers in phy), subclade.data (a matrix of subclade metrics, with rownames as the subclade numbers (the node numbers)), models (an optional character vector containing models to test, formatted as "var1_vs_var2"), models_filename (the name of an optional text file with models to test, with one model on each line, formatted as "var1_vs_var2"), return_format (format to return results in; can be "matrix" or "list"), model_fitting (method for fitting models; either "pgls" or "lm"), quiet.subclade.combinations (boolean to output to console when starting the next subclade combination), quiet.models (boolean to output to console when starting fitting the next model)
# it returns a matrix or list, with the columns of the matrix or the elements of the list containig the parameter values from the specified models fit to each subclade combination
fit.subcladeModels.bivariate <- function(phy, subclade.combinations, subclade.data, models=NULL, models_filename="Picidae_subclade_models_bivariate.txt", return_format="matrix", model_fitting="pgls", quiet.subclade.combinations=TRUE, quiet.models=TRUE) {
# if models not provided as argument, read them from file
if (is.null(models)) {
models <- read.table(file=models_filename, header=F, stringsAsFactors=F)[,1]
}
# create an empty set of vectors for storing parameter values from each model
for (model in models) {
for (measure in c("r_squared","p_value","slope","intercept")) {
assign(paste(model, measure, sep="."), value=numeric())
}
}
for (i in 1:length(subclade.combinations)) { # loop over subclade combinations
# generate backbone tree and treedata object
if (!quiet.subclade.combinations) cat("\nStarting model fitting for combination ", i, " of ", length(subclade.combinations), ".\n", sep="")
subclade.treedata.tmp <- getTreedata.subclades(phy=phy, subclade.combination=subclade.combinations[[i]], subclade.data=subclade.data)
for (model in models) { # loop over models
if (!quiet.models) cat("Starting model ", model, "\n", sep="")
model_split <- strsplit(model, "_vs_")[[1]] # split the model name into the two component variables
y_var <- model_split[1] # extract variable names
x_var <- model_split[2] # extract variable names
if (model_fitting=="pgls") {
# fit model using pgls
model.tmp <- try(gls(data = data.frame(y = subclade.treedata.tmp$data[,y_var], x = subclade.treedata.tmp$data[,x_var]), model = y ~ x, na.action=na.exclude, correlation=corPagel(value=1, phy=subclade.treedata.tmp$phy), method="REML")) # model with correlation structure based on tree, with lambda estimated
if (class(model.tmp)=="try-error") {
model.tmp <- try(gls(data = data.frame(y = subclade.treedata.tmp$data[,y_var], x = subclade.treedata.tmp$data[,x_var]), model = y ~ x, na.action=na.exclude, correlation=corBrownian(value=1, phy=subclade.treedata.tmp$phy), method="REML")) # model with correlation structure based on tree, assuming Brownian Motion (if lambda estimation fails)
}
# if model still fails, set parameter values to NA
if (class(model.tmp)=="try-error") {
assign(paste(model, "r_squared", sep="."), value=c(get(paste(model, "r_squared", sep=".")), NA))
assign(paste(model, "p_value", sep="."), value=c(get(paste(model, "p_value", sep=".")), NA))
} else {
assign(paste(model, "r_squared", sep="."), value=c(get(paste(model, "r_squared", sep=".")), cor(subclade.treedata.tmp$data[,y_var], model.tmp$fitted)^2))
assign(paste(model, "p_value", sep="."), value=c(get(paste(model, "p_value", sep=".")), summary(model.tmp)$tTable["x","p-value"]))
}
} else if (model_fitting=="lm") {
# fit model using lm
model.tmp <- try(lm(subclade.treedata.tmp$data[,y_var] ~ subclade.treedata.tmp$data[,x_var]))
if (class(model.tmp)=="try-error") {
assign(paste(model, "r_squared", sep="."), value=c(get(paste(model, "r_squared", sep=".")), NA))
assign(paste(model, "p_value", sep="."), value=c(get(paste(model, "p_value", sep=".")), NA))
} else {
assign(paste(model, "r_squared", sep="."), value=c(get(paste(model, "r_squared", sep=".")), summary(model.tmp)$r.squared))
assign(paste(model, "p_value", sep="."), value=c(get(paste(model, "p_value", sep=".")), summary(model.tmp.lm)$coefficients["x","Pr(>|t|)"]))
}
}
# if model fitting fails, set parameter values to NA
if (class(model.tmp)=="try-error") {
assign(paste(model, "slope", sep="."), value=c(get(paste(model, "slope", sep=".")), NA))
assign(paste(model, "intercept", sep="."), value=c(get(paste(model, "intercept", sep=".")), NA))
} else {
assign(paste(model, "slope", sep="."), value=c(get(paste(model, "slope", sep=".")), model.tmp$coefficients["x"]))
assign(paste(model, "intercept", sep="."), value=c(get(paste(model, "intercept", sep=".")), model.tmp$coefficients["(Intercept)"]))
}
}
}
if (return_format == "matrix") {
# generate a matrix with the values of r_squared, slope, and intercept for all models (in columns), and the subclade combinations as rows
subclade_combination_model_results <- matrix(nrow=length(subclade.combinations), ncol=length(models)*4, dimnames=list(as.character(1:length(subclade.combinations)), as.vector(sapply(models, function(y) paste(y, c("r_squared","p_value","slope","intercept"), sep=".")))))
for (i in colnames(subclade_combination_model_results)) {
subclade_combination_model_results[,i] <- get(i)
}
} else if (return_format == "list") {
# generate a list of vectors with the values of r_squared, slope, and intercept for all models (in separate list items), and the subclade combinations as elements of vectors
subclade_combination_model_results <- list()
for (i in as.vector(sapply(models, function(y) paste(y, c("r_squared","p_value","slope","intercept", sep="."))))) subclade_combination_model_results[[i]] <- get(i)
} else if (return_format == "array") {
# generate an array with all models in one dimension, the values of r_squared, slope, and intercept in another dimension, and all subclade combinations in another dimension
subclade_combination_model_results <- array(dim=c(length(models), 4, length(subclade.combinations)), dimnames=list(models, c("r_squared","p_value","slope","intercept"), as.character(1:length(subclade.combinations))))
for (i in models) {
for (j in c("r_squared","p_value","slope","intercept")) {
subclade_combination_model_results[i,j,] <- get(paste(i,j, sep="."))
}
}
}
return(subclade_combination_model_results)
}
picidae.subclade.combinations.6sp.random.model_params <- list()
picidae.subclade.combinations.6sp.sequential.model_params <- list()
for (i in names(picidae.subclade.combinations.6sp.random)) { # loop over individual inclusions
cat("\nStarting subclade model fitting for random combinations of", i, "\n", sep=" ")
picidae.subclade.combinations.6sp.random.model_params[[i]] <- fit.subcladeModels.bivariate(phy=picidae.morph.log.fully_reduced.treedata[[i]]$phy, subclade.combinations=picidae.subclade.combinations.6sp.random[[i]], subclade.data=picidae.subclade.data[[i]], quiet.subclade.combinations=FALSE)
cat("\nStarting subclade model fitting for sequential combinations of", i, "\n", sep=" ")
picidae.subclade.combinations.6sp.sequential.model_params[[i]] <- fit.subcladeModels.bivariate(phy=picidae.morph.log.fully_reduced.treedata[[i]]$phy, subclade.combinations=picidae.subclade.combinations.6sp.sequential[[i]], subclade.data=picidae.subclade.data[[i]], quiet.subclade.combinations=FALSE)
}
rm(i)
### summarize results
## histograms of important models (across the different random subclade combinations)
for (i in names(picidae.subclade.combinations.6sp.random.model_params)) {
for (j in sub(".r_squared", "", grep("[a-zA-z0-9_.]+.r_squared", colnames(picidae.subclade.combinations.6sp.random.model_params[[i]]), perl=TRUE, value=TRUE))) {
pdf(file=paste("picidae_6sp_random", i , j, "histogram.pdf", sep="_"), height=10, width=10)
par(mfrow=c(2,2))
for (k in c("r_squared","p_value","slope","intercept")) {
if (k == "p_value") {
hist(picidae.subclade.combinations.6sp.random.model_params[[i]][,paste(j, k, sep=".")], xlab=NULL, main=k, col="gray", breaks=seq(0,1, by=0.05))
abline(v=0.05, lwd=2, col="red")
} else {
hist(picidae.subclade.combinations.6sp.random.model_params[[i]][,paste(j, k, sep=".")], xlab=NULL, main=k, col="gray", breaks=10)
}
if (k == "slope") abline(v=0, lwd=2, col="red")
}
dev.off()
}
}
rm(i,j,k)
## histograms and line plots of important models (across the different sequential subclade combinations)
for (i in names(picidae.subclade.combinations.6sp.sequential.model_params)) {
for (j in sub(".r_squared", "", grep("[a-zA-z0-9_.]+.r_squared", colnames(picidae.subclade.combinations.6sp.sequential.model_params[[i]]), perl=TRUE, value=TRUE))) {
pdf(file=paste("picidae_6sp_sequential.", j, ".histogram_lineplot.pdf", sep=""), height=10, width=20)
par(mfcol=c(2,4))
for (k in c("r_squared","p_value","slope","intercept")) {
if (k == "p_value") {
hist(picidae.subclade.combinations.6sp.sequential.model_params[[i]][,paste(j, k, sep=".")], xlab=NULL, main=k, col="gray", breaks=seq(0,1, by=0.05))
abline(v=0.05, lwd=2, col="red")
} else {
hist(picidae.subclade.combinations.6sp.sequential.model_params[[i]][,paste(j, k, sep=".")], xlab=NULL, main=k, col="gray", breaks=10)
}
if (k == "slope") abline(v=0, lwd=2, col="red")
plot(picidae.subclade.combinations.6sp.sequential.model_params[[i]][,paste(j, k, sep=".")] ~ rownames(picidae.subclade.combinations.6sp.sequential.model_params[[i]]), ylab=k, xlab="Tree slice (starting at root)", main=k, type="l")
if (k == "p_value") abline(h=0.05, lty="dashed")
if (k == "slope") abline(h=0, lty="dashed")
}
dev.off()
}
}
rm(i,j,k)
## capture median of parameter values
picidae.subclade.combinations.6sp.random.model_params.median <- list()
for (i in names(picidae.subclade.combinations.6sp.random.model_params)) {
medians.tmp <- apply(picidae.subclade.combinations.6sp.random.model_params[[i]], MARGIN=2, median, na.rm=TRUE)
picidae.subclade.combinations.6sp.random.model_params.median[[i]] <- matrix(nrow=ncol(picidae.subclade.combinations.6sp.random.model_params[[i]])/4, ncol=4, dimnames=list(sub(".r_squared", "", grep("[a-zA-z0-9_.]+.r_squared", colnames(picidae.subclade.combinations.6sp.sequential.model_params[[i]]), perl=TRUE, value=TRUE)), c("r_squared", "p_value", "slope", "intercept")))
picidae.subclade.combinations.6sp.random.model_params.median[[i]][,1] <- medians.tmp[grep("r_squared", names(medians.tmp), perl=TRUE, value=TRUE)]
picidae.subclade.combinations.6sp.random.model_params.median[[i]][,2] <- medians.tmp[grep("p_value", names(medians.tmp), perl=TRUE, value=TRUE)]
picidae.subclade.combinations.6sp.random.model_params.median[[i]][,3] <- medians.tmp[grep("(?<!trend.)slope", names(medians.tmp), perl=TRUE, value=TRUE)]
picidae.subclade.combinations.6sp.random.model_params.median[[i]][,4] <- medians.tmp[grep("intercept", names(medians.tmp), perl=TRUE, value=TRUE)]
## output median values to a table
write.csv(picidae.subclade.combinations.6sp.random.model_params.median[[i]], file=paste("picidae_subclade_combinations.6sp_random", i, "model_params.median.csv", sep="."))
}
rm(i,medians.tmp)
## capture median of parameter values without outliers (Picidae clades 234, 235; Picinae clades 208, 209)
picidae.subclade.combinations.6sp.random.model_params.no_outliers.median <- list()
for (i in names(picidae.subclade.combinations.6sp.random.model_params)) {
medians.tmp <- apply(picidae.subclade.combinations.6sp.random.model_params[[i]][!sapply(picidae.subclade.combinations.6sp.random[[i]], function(x) length(intersect(x, c("234","235"))) > 0),], MARGIN=2, median, na.rm=TRUE)
picidae.subclade.combinations.6sp.random.model_params.no_outliers.median[[i]] <- matrix(nrow=ncol(picidae.subclade.combinations.6sp.random.model_params[[i]])/4, ncol=4, dimnames=list(sub(".r_squared", "", grep("[a-zA-z0-9_.]+.r_squared", colnames(picidae.subclade.combinations.6sp.sequential.model_params[[i]]), perl=TRUE, value=TRUE)), c("r_squared", "p_value", "slope", "intercept")))
picidae.subclade.combinations.6sp.random.model_params.no_outliers.median[[i]][,1] <- medians.tmp[grep("r_squared", names(medians.tmp), perl=TRUE, value=TRUE)]
picidae.subclade.combinations.6sp.random.model_params.no_outliers.median[[i]][,2] <- medians.tmp[grep("p_value", names(medians.tmp), perl=TRUE, value=TRUE)]
picidae.subclade.combinations.6sp.random.model_params.no_outliers.median[[i]][,3] <- medians.tmp[grep("(?<!trend.)slope", names(medians.tmp), perl=TRUE, value=TRUE)]
picidae.subclade.combinations.6sp.random.model_params.no_outliers.median[[i]][,4] <- medians.tmp[grep("intercept", names(medians.tmp), perl=TRUE, value=TRUE)]
}
rm(i,medians.tmp)
### output scatterplots of clade-level metrics for all subclades
i <- "all_inds"
picidae.subclade.data.6sp.main_variant <- picidae.subclade.data[[i]][picidae.subclade.data[[i]][,"ntaxa.on_morph_tree"] >= 6,] # trim subclade data to only include clades with at least 6 taxa on the tree, as those were the ones used in model fitting
## plots of diversification rate vs. average range overlap and rate of shape evolution
pdf(file="Picidae_diversification_rates_vs_overlap_and_shape_evolution_rate.pdf", width=10, height=5, useDingbats=FALSE)
par(mfrow=c(1,2))
plot(picidae.subclade.data.6sp.main_variant[,"divrate.ML.constant.rate"] ~ picidae.subclade.data.6sp.main_variant[,"avg_overlaps.rangesize_scaled"], xlab="Average Range Overlap", ylab="Diversification Rate", pch=19)
abline(a=picidae.subclade.combinations.6sp.random.model_params.median[[i]]["divrate.ML.constant.rate_vs_avg_overlaps.rangesize_scaled","intercept"], b=picidae.subclade.combinations.6sp.random.model_params.median[[i]]["divrate.ML.constant.rate_vs_avg_overlaps.rangesize_scaled","slope"])
text(x=8.5,y=0.13,labels=bquote(paste("median ", R^2, " = ", .(format(round(picidae.subclade.combinations.6sp.random.model_params.median[[i]]["divrate.ML.constant.rate_vs_avg_overlaps.rangesize_scaled","r_squared"], 2), nsmall=2)), sep="")))
plot(picidae.subclade.data.6sp.main_variant[,"divrate.ML.constant.rate"] ~ picidae.subclade.data.6sp.main_variant[,"morphrate.geomean_scaled.phyl_pca.BM.sigsq"], xlab="Rate of Size-scaled Shape Evolution", ylab="", pch=19)
abline(a=picidae.subclade.combinations.6sp.random.model_params.median[[i]]["divrate.ML.constant.rate_vs_morphrate.geomean_scaled.phyl_pca.BM.sigsq","intercept"], b=picidae.subclade.combinations.6sp.random.model_params.median[[i]]["divrate.ML.constant.rate_vs_morphrate.geomean_scaled.phyl_pca.BM.sigsq","slope"])
text(x=0.0105,y=0.13,labels=bquote(paste("median ", R^2, " = ", .(format(round(picidae.subclade.combinations.6sp.random.model_params.median[[i]]["divrate.ML.constant.rate_vs_morphrate.geomean_scaled.phyl_pca.BM.sigsq","r_squared"], 2), nsmall=2)), sep="")))
par(mfrow=c(1,1))
dev.off()
## plots of the three morphological evolution rates vs. overlaps
pdf(file="Morpholopgical_evolution_rates_vs_overlap.pdf", width=10.5, height=3.5, useDingbats=FALSE)
par(mfrow=c(1,3), mar=c(5,5,4,2)+0.1)
plot(picidae.subclade.data.6sp.main_variant[,"morphrate.geomean.BM.sigsq"] ~ picidae.subclade.data.6sp.main_variant[,"avg_overlaps.rangesize_scaled"], xlab="Average Range Overlap", ylab=expression("Rate " ~ (sigma^2)), pch=19, main="Size Evolution")
abline(a=picidae.subclade.combinations.6sp.random.model_params.median[[i]]["morphrate.geomean.BM.sigsq_vs_avg_overlaps.rangesize_scaled","intercept"], b=picidae.subclade.combinations.6sp.random.model_params.median[[i]]["morphrate.geomean.BM.sigsq_vs_avg_overlaps.rangesize_scaled","slope"])
text(x=8.5,y=0.006,labels=bquote(paste("median ", R^2, " = ", .(format(round(picidae.subclade.combinations.6sp.random.model_params.median[[i]]["morphrate.geomean.BM.sigsq_vs_avg_overlaps.rangesize_scaled","r_squared"], 2), nsmall=2)), sep="")))
plot(picidae.subclade.data.6sp.main_variant[,"morphrate.phyl_pca.BM.sigsq"] ~ picidae.subclade.data.6sp.main_variant[,"avg_overlaps.rangesize_scaled"], xlab="Average Range Overlap", ylab="", main="Overall Morphological Evolution", pch=19)
abline(a=picidae.subclade.combinations.6sp.random.model_params.median[[i]]["morphrate.phyl_pca.BM.sigsq_vs_avg_overlaps.rangesize_scaled","intercept"], b=picidae.subclade.combinations.6sp.random.model_params.median[[i]]["morphrate.phyl_pca.BM.sigsq_vs_avg_overlaps.rangesize_scaled","slope"])
text(x=8.5,y=0.07,labels=bquote(paste("median ", R^2, " = ", .(format(round(picidae.subclade.combinations.6sp.random.model_params.median[[i]]["morphrate.phyl_pca.BM.sigsq_vs_avg_overlaps.rangesize_scaled","r_squared"], 2), nsmall=2)), sep="")))
plot(picidae.subclade.data.6sp.main_variant[,"morphrate.geomean_scaled.phyl_pca.BM.sigsq"] ~ picidae.subclade.data.6sp.main_variant[,"avg_overlaps.rangesize_scaled"], xlab="Average Range Overlap", ylab="", main="Size-scaled Shape Evolution", pch=19)
abline(a=picidae.subclade.combinations.6sp.random.model_params.median[[i]]["morphrate.geomean_scaled.phyl_pca.BM.sigsq_vs_avg_overlaps.rangesize_scaled","intercept"], b=picidae.subclade.combinations.6sp.random.model_params.median[[i]]["morphrate.geomean_scaled.phyl_pca.BM.sigsq_vs_avg_overlaps.rangesize_scaled","slope"])
text(x=8.5,y=0.002,labels=bquote(paste("median ", R^2, " = ", .(format(round(picidae.subclade.combinations.6sp.random.model_params.median[[i]]["morphrate.geomean_scaled.phyl_pca.BM.sigsq_vs_avg_overlaps.rangesize_scaled","r_squared"], 2), nsmall=2)), sep="")))
par(mfrow=c(1,1))
dev.off()
### quantifying the inclusion of subclades in the random combinations and taxa in subclades
mean(sapply(picidae.subclade.combinations.6sp.random[[i]], function(x) length(x))) # calculate the average number of subclades included in the random combinations
mean(sapply(picidae.subclade.combinations.6sp.random[[i]], function(x) sum(sapply(x, function(y) length(picidae.morph.log.fully_reduced.subclades.treedata[[i]][[y]]$phy$tip.label))))) # calculate the average number of taxa from the morph tree included in the subclade
mean(sapply(picidae.subclade.combinations.6sp.random[[i]], function(x) sum(sapply(x, function(y) length(picidae.morph.log.fully_reduced.subclades.treedata[[i]][[y]]$phy.full_tree$tip.label))))) # calculate the average number of taxa from the full tree included in the subclade
## checking median values of variables from subclade combinations with and without the two outlier clades (234 and 235 in picidae analyses, 208 and 209 in picinae analyses)
sapply(picidae.subclade.combinations.6sp.random[[i]], function(x) length(intersect(x, c("234","235"))) > 0) # identify subclade combinations including one of those 2 outlier subclades
apply(picidae.subclade.combinations.6sp.random.model_params[[i]][sapply(picidae.subclade.combinations.6sp.random[[i]], function(x) length(intersect(x, c("234","235"))) > 0),], MARGIN=2, FUN = median) # median values for subclades combinations with the two outlier clades
apply(picidae.subclade.combinations.6sp.random.model_params[[i]][!sapply(picidae.subclade.combinations.6sp.random[[i]], function(x) length(intersect(x, c("234","235"))) > 0),], MARGIN=2, FUN = median) # median values for subclades combinations without the two outlier clades
(apply(picidae.subclade.combinations.6sp.random.model_params[[i]][sapply(picidae.subclade.combinations.6sp.random[[i]], function(x) length(intersect(x, c("234","235"))) > 0),], MARGIN=2, FUN = median) - apply(picidae.subclade.combinations.6sp.random.model_params[[i]][!sapply(picidae.subclade.combinations.6sp.random[[i]], function(x) length(intersect(x, c("234","235"))) > 0),], MARGIN=2, FUN = median)) / (apply(picidae.subclade.combinations.6sp.random.model_params[[i]][sapply(picidae.subclade.combinations.6sp.random[[i]], function(x) length(intersect(x, c("234","235"))) > 0),], MARGIN=2, FUN = max) - apply(picidae.subclade.combinations.6sp.random.model_params[[i]][sapply(picidae.subclade.combinations.6sp.random[[i]], function(x) length(intersect(x, c("234","235"))) > 0),], MARGIN=2, FUN = min)) # quantify the difference between the parameters from the model fits to subclade combinations with and without the outlier clades, as a percentage of the range of values across all subclade combinations
### output table of median AICc and delta-AICc values from fitting diversification models and morphological evolution models by subclade
i <- "all_inds"
## output diversification models: AICc for constant-rate; delta AICc for time-dependent, diversity-dependent
for (m in c("divrate.ML.constant.AICc","divrate.ML.time_dependent.delta_AICc", "divrate.ML.diversity_dependent.delta_AICc")) {
cat(m, ": ", median(picidae.subclade.data[[i]][picidae.subclade.data[[i]][,"ntaxa.on_morph_tree"]>=6,m]), sep="")
}
## generate table of morph evolution models: AICc for BM; delta AICc for OU, EB, trend
morph_vars <- c("geomean", "phyl_pca.PC1", "geomean_scaled.phyl_pca.PC1")
models <- c("OU", "EB", "trend")
picidae.morph_models.delta_AICc <- matrix(nrow=length(morph_vars), ncol=length(models), dimnames=list(morph_vars, models))
for (m in morph_vars) {
for (n in models) {
picidae.morph_models.delta_AICc.table[m,n] <- median(picidae.subclade.data[[i]][picidae.subclade.data[[i]][,"ntaxa.on_morph_tree"]>=6,paste("morphrate.", m, ".", n, ".delta_AICc", sep="")])
}
}
rm(i,m,n)
### output table of median parameter values (slope, pseudo-R^2, and p-value) from model fits to subclade combinations
i <- "all_inds"
subclade_models_to_output <- c("total_div_vs_crown_age", "divrate.ML.constant.rate_vs_morphrate.geomean.BM.sigsq", "divrate.ML.constant.rate_vs_morphrate.phyl_pca.BM.sigsq", "divrate.ML.constant.rate_vs_morphrate.geomean_scaled.phyl_pca.BM.sigsq", "divrate.ML.constant.rate_vs_avg_overlaps.rangesize_scaled", "divrate.ML.constant.rate_vs_avg_overlaps.euclidean_scaled.geomean", "divrate.ML.constant.rate_vs_avg_overlaps.euclidean_scaled.phyl_pca", "divrate.ML.constant.rate_vs_avg_overlaps.euclidean_scaled.geomean_scaled.phyl_pca", "morphrate.geomean.BM.sigsq_vs_avg_overlaps.rangesize_scaled", "morphrate.geomean.BM.sigsq_vs_avg_overlaps.euclidean_scaled.geomean", "morphrate.phyl_pca.BM.sigsq_vs_avg_overlaps.rangesize_scaled", "morphrate.phyl_pca.BM.sigsq_vs_avg_overlaps.euclidean_scaled.phyl_pca", "morphrate.geomean_scaled.phyl_pca.BM.sigsq_vs_avg_overlaps.rangesize_scaled", "morphrate.geomean_scaled.phyl_pca.BM.sigsq_vs_avg_overlaps.euclidean_scaled.geomean_scaled.phyl_pca", "gamma_vs_avg_overlaps.rangesize_scaled", "gamma_vs_avg_overlaps.euclidean_scaled.geomean", "gamma_vs_avg_overlaps.euclidean_scaled.phyl_pca", "gamma_vs_avg_overlaps.euclidean_scaled.geomean_scaled.phyl_pca", "divrate.ML.time_dependent.delta_AICc_vs_avg_overlaps.rangesize_scaled", "divrate.ML.time_dependent.lambda1_vs_avg_overlaps.rangesize_scaled", "divrate.ML.diversity_dependent.delta_AICc_vs_avg_overlaps.rangesize_scaled", "morphrate.geomean.EB.delta_AICc_vs_avg_overlaps.rangesize_scaled", "morphrate.geomean.EB.alpha_vs_avg_overlaps.rangesize_scaled", "morphrate.phyl_pca.PC1.EB.delta_AICc_vs_avg_overlaps.rangesize_scaled", "morphrate.phyl_pca.PC1.EB.alpha_vs_avg_overlaps.rangesize_scaled", "morphrate.geomean_scaled.phyl_pca.PC1.EB.delta_AICc_vs_avg_overlaps.rangesize_scaled", "morphrate.geomean_scaled.phyl_pca.PC1.EB.alpha_vs_avg_overlaps.rangesize_scaled")
params_to_output <- c("r_squared", "p_value", "slope")
## generate table to store median parameter values
picidae.subclade_models.params.table <- matrix(nrow=length(subclade_models_to_output), ncol=length(params_to_output), dimnames=list(subclade_models_to_output,params_to_output))
for (m in subclade_models_to_output) {
for (n in params_to_output) {
picidae.subclade_models.params.table[m,n] <- picidae.subclade.combinations.6sp.random.model_params.median[[i]][m,n]
}
}
rm(m,n)
write.csv(picidae.subclade_models.params.table, file="picidae.subclade_models.params.median.csv") # output table to file
## generate table to store median parameter values, without outliers
picidae.subclade_models.params.no_outliers.table <- matrix(nrow=length(subclade_models_to_output), ncol=length(params_to_output), dimnames=list(subclade_models_to_output,params_to_output))
for (m in subclade_models_to_output) {
for (n in params_to_output) {
picidae.subclade_models.params.no_outliers.table[m,n] <- picidae.subclade.combinations.6sp.random.model_params.no_outliers.median[[i]][m,n]
}
}
rm(m,n)
write.csv(picidae.subclade_models.params.no_outliers.table, file="picidae.subclade_models.params.no_outliers.median.csv") # output table to file
### output boxplots of slope, R^2, and p_value for the most important models
## generate vectors of names of models to output
subclade_models_to_output.divrates_morphrates <-c("divrate.ML.constant.rate_vs_morphrate.phyl_pca.BM.sigsq", "divrate.ML.constant.rate_vs_morphrate.geomean.BM.sigsq", "divrate.ML.constant.rate_vs_morphrate.geomean_scaled.phyl_pca.BM.sigsq")
subclade_models_to_output.divrates_overlaps <- c("divrate.ML.constant.rate_vs_avg_overlaps.rangesize_scaled", "divrate.ML.constant.rate_vs_avg_overlaps.euclidean_scaled.phyl_pca", "divrate.ML.constant.rate_vs_avg_overlaps.euclidean_scaled.geomean", "divrate.ML.constant.rate_vs_avg_overlaps.euclidean_scaled.geomean_scaled.phyl_pca")
subclade_models_to_output.morphrates_overlaps <- c("morphrate.phyl_pca.BM.sigsq_vs_avg_overlaps.rangesize_scaled", "morphrate.geomean.BM.sigsq_vs_avg_overlaps.rangesize_scaled", "morphrate.geomean_scaled.phyl_pca.BM.sigsq_vs_avg_overlaps.rangesize_scaled")
subclade_models_to_output.divmodels_overlaps <- c("divrate.ML.time_dependent.delta_AICc_vs_avg_overlaps.rangesize_scaled", "divrate.ML.diversity_dependent.delta_AICc_vs_avg_overlaps.rangesize_scaled")
subclade_models_to_output.morphmodels_overlaps <- c("morphrate.geomean.EB.delta_AICc_vs_avg_overlaps.rangesize_scaled", "morphrate.phyl_pca.PC1.EB.delta_AICc_vs_avg_overlaps.rangesize_scaled", "morphrate.geomean_scaled.phyl_pca.PC1.EB.delta_AICc_vs_avg_overlaps.rangesize_scaled")
## output boxplots for divrates vs. morphrates
for (m in params_to_output) {
pdf(file=paste("picidae_subclade_combinations.6sp_random.model_params.boxplots.divrates_morphrates.", m, ".pdf", sep=""), height=6, width=4)
data.tmp <- numeric()
names.tmp <- character()
# loop over models to output, storing data and the name of the data
for (n in subclade_models_to_output.divrates_morphrates) {
data.tmp <- c(data.tmp, picidae.subclade.combinations.6sp.random.model_params[[i]][,grep(paste(n, m, sep="."), colnames(picidae.subclade.combinations.6sp.sequential.model_params[[i]]), value=TRUE)]) # get data for current model and parameter
names.tmp <- c(names.tmp, rep(n, times=nrow(picidae.subclade.combinations.6sp.random.model_params[[i]])))
}
names.tmp <- factor(x=names.tmp, levels=subclade_models_to_output.divrates_morphrates) # set the names of the data to a factor, so that they plot in the correct order
boxplot(data.tmp ~ names.tmp, names=c("Overall", "Size", "Shape"), ylim=switch(m, r_squared = c(0,1), p_value = c(0,1), slope = range(data.tmp))) # create boxplot
# add horizontal lines to boxplots as needed
switch(m,
r_squared = NULL,
p_value = abline(h=0.05, lty=2),
slope = abline(h=0, lty=2)
)
dev.off()
}
rm(m,n, data.tmp, names.tmp)
## output boxplots for divrates vs. overlaps
for (m in params_to_output) {
pdf(file=paste("picidae_subclade_combinations.6sp_random.model_params.boxplots.divrates_overlaps.", m, ".pdf", sep=""), height=6, width=4)
data.tmp <- numeric()
names.tmp <- character()
# loop over models to output, storing data and the name of the data
for (n in subclade_models_to_output.divrates_overlaps) {
data.tmp <- c(data.tmp, picidae.subclade.combinations.6sp.random.model_params[[i]][,grep(paste(n, m, sep="."), colnames(picidae.subclade.combinations.6sp.sequential.model_params[[i]]), value=TRUE)])
names.tmp <- c(names.tmp, rep(n, times=nrow(picidae.subclade.combinations.6sp.random.model_params[[i]])))
}
names.tmp <- factor(x=names.tmp, levels=subclade_models_to_output.divrates_overlaps)
boxplot(data.tmp ~ names.tmp, names=c("Unscaled", "Overall", "Size", "Shape"), ylim=switch(m, r_squared = c(0,1), p_value = c(0,1), slope = range(data.tmp))) # create boxplot
# add horizontal lines to boxplots as needed
switch(m,
r_squared = NULL,
p_value = abline(h=0.05, lty=2),
slope = abline(h=0, lty=2)
)
dev.off()
}
rm(m,n, data.tmp, names.tmp)
## output boxplots for morphrates vs. overlaps
for (m in params_to_output) {
pdf(file=paste("picidae_subclade_combinations.6sp_random.model_params.boxplots.morphrates_overlaps.", m, ".pdf", sep=""), height=6, width=4)
data.tmp <- numeric()
names.tmp <- character()
# loop over models to output, storing data and the name of the data
for (n in subclade_models_to_output.morphrates_overlaps) {
data.tmp <- c(data.tmp, picidae.subclade.combinations.6sp.random.model_params[[i]][,grep(paste(n, m, sep="."), colnames(picidae.subclade.combinations.6sp.sequential.model_params[[i]]), value=TRUE)])
names.tmp <- c(names.tmp, rep(n, times=nrow(picidae.subclade.combinations.6sp.random.model_params[[i]])))
}
names.tmp <- factor(x=names.tmp, levels=subclade_models_to_output.morphrates_overlaps)
boxplot(data.tmp ~ names.tmp, names=c("Overall", "Size", "Shape"), ylim=switch(m, r_squared = c(0,1), p_value = c(0,1), slope = range(data.tmp))) # create boxplot
# add horizontal lines to boxplots as needed
switch(m,
r_squared = NULL,
p_value = abline(h=0.05, lty=2),
slope = abline(h=0, lty=2)
)
dev.off()
}
rm(m,n, data.tmp, names.tmp)
###### run the analyses for Picinae ######
### extract subclades from full tree and morph trees
## extract subclades from full trees
picinae.RAxML.all.BEAST_calibrated.with_proxies.subclades <- extractSubclades.all(picinae.RAxML.all.BEAST_calibrated.with_proxies)
# extract subclades from morph trees
picinae.morph.log.fully_reduced.treedata.subclades <- list()
for (i in c("all_inds", "complete_ind_only")) {
picinae.morph.log.fully_reduced.treedata.subclades[[i]] <- extractSubclades.all(picinae.morph.log.fully_reduced.treedata[[i]]$phy)
}
rm(i)
### to complete analyses for Picinae, use same code as above, replacing picidae with picinae for variable and file names
|
library(ape)
testtree <- read.tree("4429_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="4429_0_unrooted.txt")
|
/codeml_files/newick_trees_processed/4429_0/rinput.R
|
no_license
|
DaniBoo/cyanobacteria_project
|
R
| false
| false
| 135
|
r
|
library(ape)
testtree <- read.tree("4429_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="4429_0_unrooted.txt")
|
#'---
#'author: "Thomas Goossens (CRA-W) - t.goossens@cra.wallonie.be"
#'output:
#' html_document:
#' theme: default
#' toc: false
#' toc_depth: 6
#' toc_float:
#' collapsed: false
#' smooth_scroll: true
#' md_document:
#' theme: default
#' toc: false
#' toc_depth: 6
#' toc_float:
#' collapsed: false
#' smooth_scroll: true
#'title: "Collection of R Scripts of the Agromet project"
#'date: \ 20-04-2018\
#'---
benchmark.hourly_sets <- function(nested.records.df, target.chr){
require(mlr)
# defining the target var
target.chr = "tsa"
# defining the validation (resampling) strategy
resampling.l = mlr::makeResampleDesc(
method = "LOO"#,
#predict = "test"
)
# converting each tibble of the nested records to a strict dataframe
# ::todo:: need to use transmute_at
nested.records.df <- nested.records.df %>%
mutate(data_as_df = purrr::map(
.x = data,
.f = data.frame
))
# defining the regression tasks for each of the hourly datasets
# https://stackoverflow.com/questions/46868706/failed-to-use-map2-with-mutate-with-purrr-and-dplyr
#https://stackoverflow.com/questions/42518156/use-purrrmap-to-apply-multiple-arguments-to-a-function?rq=1
nested.records.df <- nested.records.df %>%
mutate(task = purrr::map2(
as.character(mtime),
data_as_df,
mlr::makeRegrTask,
target = target.chr
)
)
# keeping only the useful features (vars)
# u.nested.records.df <- nested.records.df %>%
# mutate(data_u = purrr::map(
# .x = data,
# .f = dplyr::select_(
# one_of(c("longitude", "latitude", "altitude", "tsa"))
# )
# ))
# defining the list of tasks from the nested records
tasks.l <- nested.records.df$task
# defining the learners who will be compared
lrns.l <- list(
makeFilterWrapper(
learner = makeLearner(cl = "regr.lm", id="linear regression"), fw.method = "information.gain", fw.abs = 2),
# makeLearner(cl = "regr.lm", id="linear regression"),
# makeLearner(cl = "regr.elmNN", id="single layer neural net"),
# makeLearner(cl ="regr.kknn", id="nearest neighbours"),
makeLearner(cl = "regr.km", id="kriging")
)
bmr.l <- benchmark(learners = lrns.l, tasks = tasks.l, resamplings = resampling.l, keep.pred = TRUE, show.info = TRUE)
return(bmr.l)
# https://mlr-org.github.io/mlr/articles/tutorial/devel/nested_resampling.html
# https://mlr-org.github.io/mlr/articles/tutorial/devel/feature_selection.html
# # getting the predictions from the model
# stations.pred.l <- getBMRPredictions(benchmark.l)
#
# # predicting on the grid
# meuse.grid.pred <- predict(
# train(lrns.l[[1]], spatialization.task),
# newdata = meuse.grid.df
# )
#
# meuse.grid.pred.data <- dplyr::bind_cols(meuse.grid.df, meuse.grid.pred$data )
# coordinates(meuse.grid.pred.data) <- ~x+y
# class(meuse.grid.pred.data)
#
# spplot(meuse.grid.pred.data)
#
#
#
# spplot(meuse$zinc)
#
#
# # Group in a spatial sf
# #pred_data.grid.df <- dplyr::bind_cols(prediction_grid.df, as.data.frame(resp.task.pred), as.data.frame(se.task.pred))
# pred_data.grid.df <- dplyr::bind_cols(prediction_grid.df, as.data.frame(se.task.pred))
# pred_data.grid.sf <- tsa.model.sf <- st_as_sf(x = pred_data.grid.df,
# coords = c("longitude", "latitude"),
# crs = 4326)
#
# plot <- plot(pred_data.grid.sf)
#
# # Inspect the difference between the true, predicted and SE values
# print(head(getPredictionResponse(resp.task.pred)))
#
# # Return the predicted data and the error
# return(plot)
}
#+ ---------------------------------
#' ## Terms of service
#' To use the [AGROMET API](https://app.pameseb.be/fr/pages/api_call_test/) you need to provide your own user token.
#' The present script is available under the [GNU-GPL V3](https://www.gnu.org/licenses/gpl-3.0.en.html) license and comes with ABSOLUTELY NO WARRANTY.
#'
#' Copyright : Thomas Goossens - t.goossens@cra.wallonie.be 2018.
#'
#' *(This document was generated using [R software](https://www.r-project.org/) with the [knitr library](https://deanattali.com/2015/03/24/knitrs-best-hidden-gem-spin/))*.
#+ TOS,echo=TRUE,warning=FALSE,message=FALSE,error=FALSE
|
/R/benchmark.hourly_sets.R
|
no_license
|
pokyah/agrometeor-spatial-benchmarking
|
R
| false
| false
| 4,410
|
r
|
#'---
#'author: "Thomas Goossens (CRA-W) - t.goossens@cra.wallonie.be"
#'output:
#' html_document:
#' theme: default
#' toc: false
#' toc_depth: 6
#' toc_float:
#' collapsed: false
#' smooth_scroll: true
#' md_document:
#' theme: default
#' toc: false
#' toc_depth: 6
#' toc_float:
#' collapsed: false
#' smooth_scroll: true
#'title: "Collection of R Scripts of the Agromet project"
#'date: \ 20-04-2018\
#'---
benchmark.hourly_sets <- function(nested.records.df, target.chr){
require(mlr)
# defining the target var
target.chr = "tsa"
# defining the validation (resampling) strategy
resampling.l = mlr::makeResampleDesc(
method = "LOO"#,
#predict = "test"
)
# converting each tibble of the nested records to a strict dataframe
# ::todo:: need to use transmute_at
nested.records.df <- nested.records.df %>%
mutate(data_as_df = purrr::map(
.x = data,
.f = data.frame
))
# defining the regression tasks for each of the hourly datasets
# https://stackoverflow.com/questions/46868706/failed-to-use-map2-with-mutate-with-purrr-and-dplyr
#https://stackoverflow.com/questions/42518156/use-purrrmap-to-apply-multiple-arguments-to-a-function?rq=1
nested.records.df <- nested.records.df %>%
mutate(task = purrr::map2(
as.character(mtime),
data_as_df,
mlr::makeRegrTask,
target = target.chr
)
)
# keeping only the useful features (vars)
# u.nested.records.df <- nested.records.df %>%
# mutate(data_u = purrr::map(
# .x = data,
# .f = dplyr::select_(
# one_of(c("longitude", "latitude", "altitude", "tsa"))
# )
# ))
# defining the list of tasks from the nested records
tasks.l <- nested.records.df$task
# defining the learners who will be compared
lrns.l <- list(
makeFilterWrapper(
learner = makeLearner(cl = "regr.lm", id="linear regression"), fw.method = "information.gain", fw.abs = 2),
# makeLearner(cl = "regr.lm", id="linear regression"),
# makeLearner(cl = "regr.elmNN", id="single layer neural net"),
# makeLearner(cl ="regr.kknn", id="nearest neighbours"),
makeLearner(cl = "regr.km", id="kriging")
)
bmr.l <- benchmark(learners = lrns.l, tasks = tasks.l, resamplings = resampling.l, keep.pred = TRUE, show.info = TRUE)
return(bmr.l)
# https://mlr-org.github.io/mlr/articles/tutorial/devel/nested_resampling.html
# https://mlr-org.github.io/mlr/articles/tutorial/devel/feature_selection.html
# # getting the predictions from the model
# stations.pred.l <- getBMRPredictions(benchmark.l)
#
# # predicting on the grid
# meuse.grid.pred <- predict(
# train(lrns.l[[1]], spatialization.task),
# newdata = meuse.grid.df
# )
#
# meuse.grid.pred.data <- dplyr::bind_cols(meuse.grid.df, meuse.grid.pred$data )
# coordinates(meuse.grid.pred.data) <- ~x+y
# class(meuse.grid.pred.data)
#
# spplot(meuse.grid.pred.data)
#
#
#
# spplot(meuse$zinc)
#
#
# # Group in a spatial sf
# #pred_data.grid.df <- dplyr::bind_cols(prediction_grid.df, as.data.frame(resp.task.pred), as.data.frame(se.task.pred))
# pred_data.grid.df <- dplyr::bind_cols(prediction_grid.df, as.data.frame(se.task.pred))
# pred_data.grid.sf <- tsa.model.sf <- st_as_sf(x = pred_data.grid.df,
# coords = c("longitude", "latitude"),
# crs = 4326)
#
# plot <- plot(pred_data.grid.sf)
#
# # Inspect the difference between the true, predicted and SE values
# print(head(getPredictionResponse(resp.task.pred)))
#
# # Return the predicted data and the error
# return(plot)
}
#+ ---------------------------------
#' ## Terms of service
#' To use the [AGROMET API](https://app.pameseb.be/fr/pages/api_call_test/) you need to provide your own user token.
#' The present script is available under the [GNU-GPL V3](https://www.gnu.org/licenses/gpl-3.0.en.html) license and comes with ABSOLUTELY NO WARRANTY.
#'
#' Copyright : Thomas Goossens - t.goossens@cra.wallonie.be 2018.
#'
#' *(This document was generated using [R software](https://www.r-project.org/) with the [knitr library](https://deanattali.com/2015/03/24/knitrs-best-hidden-gem-spin/))*.
#+ TOS,echo=TRUE,warning=FALSE,message=FALSE,error=FALSE
|
test_that("Checking show_segmatrix ggplot class", {
result <- show_segmatrix(epcdata)
expect_is(result, 'ggplot')
})
test_that("Checking show_segmatrix plotly class", {
result <- show_segmatrix(epcdata, plotly = T)
expect_is(result, 'plotly')
})
|
/tests/testthat/test-show_segmatrix.R
|
permissive
|
tbep-tech/tbeptools
|
R
| false
| false
| 255
|
r
|
test_that("Checking show_segmatrix ggplot class", {
result <- show_segmatrix(epcdata)
expect_is(result, 'ggplot')
})
test_that("Checking show_segmatrix plotly class", {
result <- show_segmatrix(epcdata, plotly = T)
expect_is(result, 'plotly')
})
|
draw.bubble.arc <-
function(mat,proportions,rescale,inches,...){
# the proportions is a matrix of the same dimention as mat, with the each element
# reprenting the proportion within each cell of mat
nx <- ncol(mat)
ny <- nrow(mat)
if(rescale){
inches.mat <- inches*sqrt(mat/rescale) # because need to plot circle one at a time to deal with NA
inches <- inches*sqrt(Max(mat)/rescale)
} else {
inches.mat <- inches*sqrt(mat) # because need to plot circle one at a time to deal with NA
inches <- inches*sqrt(Max(mat))
}
mat <- sqrt(mat/pi)
for(j in 1:ny){
for(i in 1:nx){
if(!is.na(proportions[j,i]) & !is.na(mat[j,i])){
rx <- xinch(inches.mat[j,i])
ry <- yinch(inches.mat[j,i])
angles <- seq(-round(proportions[j,i]*180),round(proportions[j,i]*180),by=1)/360*2*pi
x <- i+rx*cos(angles)
y <- j+ry*sin(angles)
polygon(c(i,x),c(j,y),...)
}
}
}
}
|
/R-libraries/myUtilities/R/draw.bubble.arc.R
|
no_license
|
jyqalan/myUtilities
|
R
| false
| false
| 1,071
|
r
|
draw.bubble.arc <-
function(mat,proportions,rescale,inches,...){
# the proportions is a matrix of the same dimention as mat, with the each element
# reprenting the proportion within each cell of mat
nx <- ncol(mat)
ny <- nrow(mat)
if(rescale){
inches.mat <- inches*sqrt(mat/rescale) # because need to plot circle one at a time to deal with NA
inches <- inches*sqrt(Max(mat)/rescale)
} else {
inches.mat <- inches*sqrt(mat) # because need to plot circle one at a time to deal with NA
inches <- inches*sqrt(Max(mat))
}
mat <- sqrt(mat/pi)
for(j in 1:ny){
for(i in 1:nx){
if(!is.na(proportions[j,i]) & !is.na(mat[j,i])){
rx <- xinch(inches.mat[j,i])
ry <- yinch(inches.mat[j,i])
angles <- seq(-round(proportions[j,i]*180),round(proportions[j,i]*180),by=1)/360*2*pi
x <- i+rx*cos(angles)
y <- j+ry*sin(angles)
polygon(c(i,x),c(j,y),...)
}
}
}
}
|
library(ape)
testtree <- read.tree("4577_8.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="4577_8_unrooted.txt")
|
/codeml_files/newick_trees_processed/4577_8/rinput.R
|
no_license
|
DaniBoo/cyanobacteria_project
|
R
| false
| false
| 135
|
r
|
library(ape)
testtree <- read.tree("4577_8.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="4577_8_unrooted.txt")
|
# DIRECTION TO NEXT IMAGE -------------------------------------------------
add_direction <- function() {
.exif$base <-
.exif$base %>%
dplyr::mutate(ANGLE = angles_next(LON, LAT)) %>%
tidyr::fill(ANGLE)
}
# EXIFTOOL ----------------------------------------------------------------
#' Writing exif tags with exiftool
#'
#' Runs \href{https://sno.phy.queensu.ca/~phil/exiftool/}{exiftool}
#' commands to write computed longitudes, latitudes (and directions) in image
#' files.
#'
#' In order to write image files, \strong{exiftool} must be installed.
#'
#' Files are not overwritten : a copy of the images including new exif tags
#' are written in the \code{output} directory.
#'
#' @param direction should direction to next photo be calculated and included
#' in the file?
#' @inheritParams interp_josm
#' @export
write_exiftool <- function(path = ".",
direction = TRUE) {
init_check()
if (direction) add_direction()
cmds <-
with(.exif$base,
paste0(
"exiftool ",
"-GPSLatitude=", LAT, " ",
"-GPSLongitude=" , LON, " ",
if (direction) paste0("-GPSImgDirection=", ANGLE, " "),
"-o \"output\" \"input/photos/", PHOTO, "\""
)
)
old_wd <- getwd()
setwd(path)
for (cmd in cmds) {
cat(cmd, sep = "\n")
system(cmd)
}
setwd(old_wd)
}
# EXPORT CSV --------------------------------------------------------------
#' Write coordinates and direction in a csv file.
#'
#' Write computed longitudes, latitudes (and directions) in a csv file.
#' Images are not modified.
#'
#' @inheritParams write_exiftool
#' @param file the name of the csv file (default : "interp_gps.csv")
#' @param ... other arguments passed to \code{write.csv}
#' @export
#' @importFrom utils write.csv
write_coord_csv <- function(path = ".",
file = "interp_gps.csv",
direction = TRUE,
...) {
init_check()
if (direction) add_direction()
write.csv(
subset(.exif$base, select = -ACTION),
file = file.path(path, file),
row.names = FALSE,
...
)
}
|
/R/write.R
|
no_license
|
py-b/gpsinterp
|
R
| false
| false
| 2,151
|
r
|
# DIRECTION TO NEXT IMAGE -------------------------------------------------
add_direction <- function() {
.exif$base <-
.exif$base %>%
dplyr::mutate(ANGLE = angles_next(LON, LAT)) %>%
tidyr::fill(ANGLE)
}
# EXIFTOOL ----------------------------------------------------------------
#' Writing exif tags with exiftool
#'
#' Runs \href{https://sno.phy.queensu.ca/~phil/exiftool/}{exiftool}
#' commands to write computed longitudes, latitudes (and directions) in image
#' files.
#'
#' In order to write image files, \strong{exiftool} must be installed.
#'
#' Files are not overwritten : a copy of the images including new exif tags
#' are written in the \code{output} directory.
#'
#' @param direction should direction to next photo be calculated and included
#' in the file?
#' @inheritParams interp_josm
#' @export
write_exiftool <- function(path = ".",
direction = TRUE) {
init_check()
if (direction) add_direction()
cmds <-
with(.exif$base,
paste0(
"exiftool ",
"-GPSLatitude=", LAT, " ",
"-GPSLongitude=" , LON, " ",
if (direction) paste0("-GPSImgDirection=", ANGLE, " "),
"-o \"output\" \"input/photos/", PHOTO, "\""
)
)
old_wd <- getwd()
setwd(path)
for (cmd in cmds) {
cat(cmd, sep = "\n")
system(cmd)
}
setwd(old_wd)
}
# EXPORT CSV --------------------------------------------------------------
#' Write coordinates and direction in a csv file.
#'
#' Write computed longitudes, latitudes (and directions) in a csv file.
#' Images are not modified.
#'
#' @inheritParams write_exiftool
#' @param file the name of the csv file (default : "interp_gps.csv")
#' @param ... other arguments passed to \code{write.csv}
#' @export
#' @importFrom utils write.csv
write_coord_csv <- function(path = ".",
file = "interp_gps.csv",
direction = TRUE,
...) {
init_check()
if (direction) add_direction()
write.csv(
subset(.exif$base, select = -ACTION),
file = file.path(path, file),
row.names = FALSE,
...
)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/f_bin.R
\name{f_bin}
\alias{f_bin}
\alias{f_bin_text}
\alias{f_bin_text_right}
\alias{f_bin_right}
\alias{ff_bin}
\alias{ff_bin_text}
\alias{ff_bin_right}
\alias{ff_bin_text_right}
\alias{f_interval}
\alias{f_interval_text}
\alias{f_interval_text_right}
\alias{f_interval_right}
\alias{ff_interval}
\alias{ff_interval_text}
\alias{ff_interval_text_right}
\alias{ff_interval_right}
\title{Convert Binned Intervals to Readable Form}
\usage{
f_bin(x, l = "<", le = "<=", parse = FALSE, ...)
f_bin_text(
x,
greater = "Greater than",
middle = "to",
less = "less than",
equal = "or equal to",
...
)
f_bin_text_right(x, l = "up to", le = "to", equal.digits = FALSE, ...)
f_bin_right(x, l = "<", le = "<=", equal.digits = FALSE, parse = FALSE, ...)
ff_bin(l = "<", le = "<=", parse = TRUE, ...)
ff_bin_text(
greater = "Greater than",
middle = "to",
less = "less than",
equal = "or equal to",
...
)
ff_bin_right(l = "<", le = "<=", equal.digits = FALSE, parse = TRUE, ...)
ff_bin_text_right(l = "up to", le = "to", equal.digits = FALSE, ...)
f_interval(x, l = "<", le = "<=", parse = FALSE, ...)
f_interval_text(
x,
greater = "Greater than",
middle = "to",
less = "less than",
equal = "or equal to",
...
)
f_interval_text_right(x, l = "up to", le = "to", equal.digits = FALSE, ...)
f_interval_right(
x,
l = "<",
le = "<=",
equal.digits = FALSE,
parse = FALSE,
...
)
ff_interval(l = "<", le = "<=", parse = TRUE, ...)
ff_interval_text(
greater = "Greater than",
middle = "to",
less = "less than",
equal = "or equal to",
...
)
ff_interval_text_right(l = "up to", le = "to", equal.digits = FALSE, ...)
ff_interval_right(l = "<", le = "<=", equal.digits = FALSE, parse = TRUE, ...)
}
\arguments{
\item{x}{A vector of binned numbers from \code{cut}.}
\item{l}{Less than symbol.}
\item{le}{Less than or equal to symbol.}
\item{parse}{logical. If \code{TRUE} is parsed for \pkg{ggplot2} facet labels.}
\item{greater}{String to use for greater.}
\item{middle}{String to use for middle (defaults to \code{'to'}).}
\item{less}{String to use for less.}
\item{equal}{String to use for equal to. This is combined with the \code{less} or \code{greater}.}
\item{equal.digits}{logical. If \code{TRUE} digits are given equal number of decimal places.}
\item{\ldots}{ignored.}
}
\value{
\code{f_bin} - Returns human readable intervals in symbol form.
\code{f_bin} - Returns human readable intervals in word form.
\code{f_bin_text_right} - Returns human readable right hand of intervals in word form.
\code{f_bin_right} - Returns human readable right hand intervals in symbol form.
}
\description{
\code{f_bin} - Convert binned intervals to symbol form (e.g., \code{"1 < x <= 3"}).
\code{f_bin_text} - Convert binned intervals to text form (e.g., \code{"Greater than or equal to 1 to less than 3"}).
}
\examples{
x <- cut(-1:5, 3, right = FALSE)
y <- cut(-4:10, c(-5, 2, 6, 10), right = TRUE)
z <- cut(-4:10, c(-4, 2, 6, 11), right = FALSE)
f_bin(x)
f_interval(x) #`_interval` and `_bin` are interchangeable aliases in the function names
f_bin(y)
f_bin(z)
## HTML
f_bin(z, le = '≤')
f_bin_text(x)
f_bin_text(y)
f_bin_text(z)
f_bin_text(x, middle = 'but')
f_bin_text(x, greater = 'Above', middle = '', equal = '', less = 'to')
f_bin_text(z, greater = 'From', middle = '', equal = '', less = 'up to')
f_bin_text_right(x)
f_bin_text_right(y)
f_bin_text_right(cut(-4:10, c(-3, 2, 6, 11)))
f_bin_text_right(x, equal.digits = TRUE)
f_bin_right(x)
f_bin_right(y)
f_bin_right(x, equal.digits = TRUE)
## HTML
f_bin_right(y, le = '≤')
\dontrun{
library(tidyverse)
mtcars \%>\%
mutate(mpg2 = cut(mpg, 3)) \%>\%
ggplot(aes(disp, hp)) +
geom_point() +
facet_wrap(~ mpg2,
labeller = ff_bin()
)
mtcars \%>\%
mutate(mpg2 = cut(mpg, 3)) \%>\%
ggplot(aes(disp, hp)) +
geom_point() +
facet_wrap(~ mpg2,
labeller = function(x) f_bin_right(x, parse = TRUE)
)
mtcars \%>\%
mutate(mpg2 = cut(mpg, 3, right = FALSE)) \%>\%
ggplot(aes(disp, hp)) +
geom_point() +
facet_wrap(~ mpg2,
labeller = function(x) f_bin_right(x, parse = TRUE)
)
mtcars \%>\%
mutate(mpg2 = cut(mpg, 5, right = FALSE)) \%>\%
ggplot(aes(mpg2)) +
geom_bar() +
scale_x_discrete(labels = ff_bin_text_right(l = 'Up to')) +
coord_flip()
mtcars \%>\%
mutate(mpg2 = cut(mpg, 10, right = FALSE)) \%>\%
ggplot(aes(mpg2)) +
geom_bar(fill = '#33A1DE') +
scale_x_discrete(labels = function(x) f_wrap(f_bin_text_right(x, l = 'up to'), width = 8)) +
scale_y_continuous(breaks = seq(0, 14, by = 2), limits = c(0, 7)) +
theme_minimal() +
theme(
panel.grid.major.x = element_blank(),
axis.text.x = element_text(size = 14, margin = margin(t = -12)),
axis.text.y = element_text(size = 14),
plot.title = element_text(hjust = .5)
) +
labs(title = 'Histogram', x = NULL, y = NULL)
}
}
|
/man/f_bin.Rd
|
no_license
|
trinker/numform
|
R
| false
| true
| 5,139
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/f_bin.R
\name{f_bin}
\alias{f_bin}
\alias{f_bin_text}
\alias{f_bin_text_right}
\alias{f_bin_right}
\alias{ff_bin}
\alias{ff_bin_text}
\alias{ff_bin_right}
\alias{ff_bin_text_right}
\alias{f_interval}
\alias{f_interval_text}
\alias{f_interval_text_right}
\alias{f_interval_right}
\alias{ff_interval}
\alias{ff_interval_text}
\alias{ff_interval_text_right}
\alias{ff_interval_right}
\title{Convert Binned Intervals to Readable Form}
\usage{
f_bin(x, l = "<", le = "<=", parse = FALSE, ...)
f_bin_text(
x,
greater = "Greater than",
middle = "to",
less = "less than",
equal = "or equal to",
...
)
f_bin_text_right(x, l = "up to", le = "to", equal.digits = FALSE, ...)
f_bin_right(x, l = "<", le = "<=", equal.digits = FALSE, parse = FALSE, ...)
ff_bin(l = "<", le = "<=", parse = TRUE, ...)
ff_bin_text(
greater = "Greater than",
middle = "to",
less = "less than",
equal = "or equal to",
...
)
ff_bin_right(l = "<", le = "<=", equal.digits = FALSE, parse = TRUE, ...)
ff_bin_text_right(l = "up to", le = "to", equal.digits = FALSE, ...)
f_interval(x, l = "<", le = "<=", parse = FALSE, ...)
f_interval_text(
x,
greater = "Greater than",
middle = "to",
less = "less than",
equal = "or equal to",
...
)
f_interval_text_right(x, l = "up to", le = "to", equal.digits = FALSE, ...)
f_interval_right(
x,
l = "<",
le = "<=",
equal.digits = FALSE,
parse = FALSE,
...
)
ff_interval(l = "<", le = "<=", parse = TRUE, ...)
ff_interval_text(
greater = "Greater than",
middle = "to",
less = "less than",
equal = "or equal to",
...
)
ff_interval_text_right(l = "up to", le = "to", equal.digits = FALSE, ...)
ff_interval_right(l = "<", le = "<=", equal.digits = FALSE, parse = TRUE, ...)
}
\arguments{
\item{x}{A vector of binned numbers from \code{cut}.}
\item{l}{Less than symbol.}
\item{le}{Less than or equal to symbol.}
\item{parse}{logical. If \code{TRUE} is parsed for \pkg{ggplot2} facet labels.}
\item{greater}{String to use for greater.}
\item{middle}{String to use for middle (defaults to \code{'to'}).}
\item{less}{String to use for less.}
\item{equal}{String to use for equal to. This is combined with the \code{less} or \code{greater}.}
\item{equal.digits}{logical. If \code{TRUE} digits are given equal number of decimal places.}
\item{\ldots}{ignored.}
}
\value{
\code{f_bin} - Returns human readable intervals in symbol form.
\code{f_bin} - Returns human readable intervals in word form.
\code{f_bin_text_right} - Returns human readable right hand of intervals in word form.
\code{f_bin_right} - Returns human readable right hand intervals in symbol form.
}
\description{
\code{f_bin} - Convert binned intervals to symbol form (e.g., \code{"1 < x <= 3"}).
\code{f_bin_text} - Convert binned intervals to text form (e.g., \code{"Greater than or equal to 1 to less than 3"}).
}
\examples{
x <- cut(-1:5, 3, right = FALSE)
y <- cut(-4:10, c(-5, 2, 6, 10), right = TRUE)
z <- cut(-4:10, c(-4, 2, 6, 11), right = FALSE)
f_bin(x)
f_interval(x) #`_interval` and `_bin` are interchangeable aliases in the function names
f_bin(y)
f_bin(z)
## HTML
f_bin(z, le = '≤')
f_bin_text(x)
f_bin_text(y)
f_bin_text(z)
f_bin_text(x, middle = 'but')
f_bin_text(x, greater = 'Above', middle = '', equal = '', less = 'to')
f_bin_text(z, greater = 'From', middle = '', equal = '', less = 'up to')
f_bin_text_right(x)
f_bin_text_right(y)
f_bin_text_right(cut(-4:10, c(-3, 2, 6, 11)))
f_bin_text_right(x, equal.digits = TRUE)
f_bin_right(x)
f_bin_right(y)
f_bin_right(x, equal.digits = TRUE)
## HTML
f_bin_right(y, le = '≤')
\dontrun{
library(tidyverse)
mtcars \%>\%
mutate(mpg2 = cut(mpg, 3)) \%>\%
ggplot(aes(disp, hp)) +
geom_point() +
facet_wrap(~ mpg2,
labeller = ff_bin()
)
mtcars \%>\%
mutate(mpg2 = cut(mpg, 3)) \%>\%
ggplot(aes(disp, hp)) +
geom_point() +
facet_wrap(~ mpg2,
labeller = function(x) f_bin_right(x, parse = TRUE)
)
mtcars \%>\%
mutate(mpg2 = cut(mpg, 3, right = FALSE)) \%>\%
ggplot(aes(disp, hp)) +
geom_point() +
facet_wrap(~ mpg2,
labeller = function(x) f_bin_right(x, parse = TRUE)
)
mtcars \%>\%
mutate(mpg2 = cut(mpg, 5, right = FALSE)) \%>\%
ggplot(aes(mpg2)) +
geom_bar() +
scale_x_discrete(labels = ff_bin_text_right(l = 'Up to')) +
coord_flip()
mtcars \%>\%
mutate(mpg2 = cut(mpg, 10, right = FALSE)) \%>\%
ggplot(aes(mpg2)) +
geom_bar(fill = '#33A1DE') +
scale_x_discrete(labels = function(x) f_wrap(f_bin_text_right(x, l = 'up to'), width = 8)) +
scale_y_continuous(breaks = seq(0, 14, by = 2), limits = c(0, 7)) +
theme_minimal() +
theme(
panel.grid.major.x = element_blank(),
axis.text.x = element_text(size = 14, margin = margin(t = -12)),
axis.text.y = element_text(size = 14),
plot.title = element_text(hjust = .5)
) +
labs(title = 'Histogram', x = NULL, y = NULL)
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/M2.R
\name{modelfit}
\alias{modelfit}
\title{Model fit statistics}
\usage{
modelfit(GDINA.obj, CI = 0.9, ...)
}
\arguments{
\item{GDINA.obj}{An estimated model object of class \code{GDINA}}
\item{CI}{numeric value from 0 to 1 indicating the range of the confidence interval for RMSEA. Default returns the 90\% interval.}
\item{...}{arguments passed to the function}
}
\description{
Calculate various model-data fit statistics
}
\details{
Various model-data fit statistics including M2 statistic for G-DINA model with dichotmous responses (Liu, Tian, & Xin, 2016; Hansen, Cai, Monroe, & Li, 2016) and for sequential G-DINA model with graded responses (Ma, under review).
It also calculates SRMSR and RMSEA2.
}
\examples{
\dontrun{
dat <- sim10GDINA$simdat
Q <- sim10GDINA$simQ
mod1 <- GDINA(dat = dat, Q = Q, model = "DINA")
modelfit(mod1)
}
}
\references{
Ma, W. (2019). Evaluating model data fit using limited information statistics for the sequential G-DINA model.\emph{Applied Psychological Measurement.}
Maydeu-Olivares, A. (2013). Goodness-of-Fit Assessment of Item Response Theory Models. \emph{Measurement, 11}, 71-101.
Hansen, M., Cai, L., Monroe, S., & Li, Z. (2016). Limited-information goodness-of-fit testing of diagnostic classification item response models. \emph{British Journal of Mathematical and Statistical Psychology. 69,} 225--252.
Liu, Y., Tian, W., & Xin, T. (2016). An Application of M2 Statistic to Evaluate the Fit of Cognitive Diagnostic Models. \emph{Journal of Educational and Behavioral Statistics, 41}, 3-26.
}
\author{
{Wenchao Ma, The University of Alabama, \email{wenchao.ma@ua.edu}}
}
|
/man/modelfit.Rd
|
no_license
|
cywongnorman/GDINA
|
R
| false
| true
| 1,704
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/M2.R
\name{modelfit}
\alias{modelfit}
\title{Model fit statistics}
\usage{
modelfit(GDINA.obj, CI = 0.9, ...)
}
\arguments{
\item{GDINA.obj}{An estimated model object of class \code{GDINA}}
\item{CI}{numeric value from 0 to 1 indicating the range of the confidence interval for RMSEA. Default returns the 90\% interval.}
\item{...}{arguments passed to the function}
}
\description{
Calculate various model-data fit statistics
}
\details{
Various model-data fit statistics including M2 statistic for G-DINA model with dichotmous responses (Liu, Tian, & Xin, 2016; Hansen, Cai, Monroe, & Li, 2016) and for sequential G-DINA model with graded responses (Ma, under review).
It also calculates SRMSR and RMSEA2.
}
\examples{
\dontrun{
dat <- sim10GDINA$simdat
Q <- sim10GDINA$simQ
mod1 <- GDINA(dat = dat, Q = Q, model = "DINA")
modelfit(mod1)
}
}
\references{
Ma, W. (2019). Evaluating model data fit using limited information statistics for the sequential G-DINA model.\emph{Applied Psychological Measurement.}
Maydeu-Olivares, A. (2013). Goodness-of-Fit Assessment of Item Response Theory Models. \emph{Measurement, 11}, 71-101.
Hansen, M., Cai, L., Monroe, S., & Li, Z. (2016). Limited-information goodness-of-fit testing of diagnostic classification item response models. \emph{British Journal of Mathematical and Statistical Psychology. 69,} 225--252.
Liu, Y., Tian, W., & Xin, T. (2016). An Application of M2 Statistic to Evaluate the Fit of Cognitive Diagnostic Models. \emph{Journal of Educational and Behavioral Statistics, 41}, 3-26.
}
\author{
{Wenchao Ma, The University of Alabama, \email{wenchao.ma@ua.edu}}
}
|
save_output <- function(output,
save_dir,
model,
n.workers,
working_dir,
implementation){
file <- file.path(save_dir, paste0(implementation, "-",
n.workers, "-",
model, ".rds"))
saveRDS(output, file = file)
}
|
/R/save.R
|
no_license
|
MultiBUGS/multibugstests
|
R
| false
| false
| 406
|
r
|
save_output <- function(output,
save_dir,
model,
n.workers,
working_dir,
implementation){
file <- file.path(save_dir, paste0(implementation, "-",
n.workers, "-",
model, ".rds"))
saveRDS(output, file = file)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/run.R
\name{get_current_run}
\alias{get_current_run}
\title{Get the context object for a run}
\usage{
get_current_run(allow_offline = TRUE)
}
\arguments{
\item{allow_offline}{If \code{TRUE}, allow the service context to
fall back to offline mode so that the training script can be
tested locally without submitting a job with the SDK.}
}
\value{
The \code{Run} object.
}
\description{
This function is commonly used to retrieve the authenticated
run object inside of a script to be submitted for execution
via \code{submit_experiment()}. Note that the logging functions
(\code{log_*} methods, \code{upload_files_to_run()}, \code{upload_folder_to_run()})
will by default log the specified metrics or files to the
run returned from \code{get_current_run()}.
}
|
/man/get_current_run.Rd
|
permissive
|
revodavid/azureml-sdk-for-r
|
R
| false
| true
| 836
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/run.R
\name{get_current_run}
\alias{get_current_run}
\title{Get the context object for a run}
\usage{
get_current_run(allow_offline = TRUE)
}
\arguments{
\item{allow_offline}{If \code{TRUE}, allow the service context to
fall back to offline mode so that the training script can be
tested locally without submitting a job with the SDK.}
}
\value{
The \code{Run} object.
}
\description{
This function is commonly used to retrieve the authenticated
run object inside of a script to be submitted for execution
via \code{submit_experiment()}. Note that the logging functions
(\code{log_*} methods, \code{upload_files_to_run()}, \code{upload_folder_to_run()})
will by default log the specified metrics or files to the
run returned from \code{get_current_run()}.
}
|
library(shiny)
library(plotly)
shinyServer(function(input, output, session) {
library(DiagrammeR)
library(DiagrammeRsvg)
library(rsvg)
# Create a node data frame (ndf)
ndf <- create_node_df(n = 5,
label = c("ST10", "ST20", "ST30", "ST40", "ST50"),
shape = c("rectangle"))
# Create an edge data frame (edf)
edf <- create_edge_df(from = c(1, 2, 3, 4, 2, 5),
to = c(2, 3, 4, 5, 2, 2),
#rel = c("a", "b", "c", "d"),
label = c(100,200,300,400, 50, 50),
arrowsize = 1:6,
fontsize = (1:6)*10
)
# Create a graph with the ndf and edf
graph <- create_graph(nodes_df = ndf,
edges_df = edf
)
# graph$global_attrs$value[1] = 'dot'
# graph$global_attrs$value[1] = 'twopi'
# graph$global_attrs$value[1] = 'circo'
output$CHART = renderGrViz({
#DiagrammeR::render_graph(graph)
DiagrammeR::render_graph(graph)
})
})
|
/server.R
|
no_license
|
jiayi9/diagrammer
|
R
| false
| false
| 1,062
|
r
|
library(shiny)
library(plotly)
shinyServer(function(input, output, session) {
library(DiagrammeR)
library(DiagrammeRsvg)
library(rsvg)
# Create a node data frame (ndf)
ndf <- create_node_df(n = 5,
label = c("ST10", "ST20", "ST30", "ST40", "ST50"),
shape = c("rectangle"))
# Create an edge data frame (edf)
edf <- create_edge_df(from = c(1, 2, 3, 4, 2, 5),
to = c(2, 3, 4, 5, 2, 2),
#rel = c("a", "b", "c", "d"),
label = c(100,200,300,400, 50, 50),
arrowsize = 1:6,
fontsize = (1:6)*10
)
# Create a graph with the ndf and edf
graph <- create_graph(nodes_df = ndf,
edges_df = edf
)
# graph$global_attrs$value[1] = 'dot'
# graph$global_attrs$value[1] = 'twopi'
# graph$global_attrs$value[1] = 'circo'
output$CHART = renderGrViz({
#DiagrammeR::render_graph(graph)
DiagrammeR::render_graph(graph)
})
})
|
#
# This is the server logic of a Shiny web application. You can run the
# application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(shinydashboard)
library(plotly)
library(readr)
library(dplyr)
library(wordcloud)
library(jsonlite)
library(tidyverse)
library(ggplot2)
library(lubridate)
library(gridExtra)
library(formattable)
library(DT)
library(RColorBrewer)
library(randomForest)
library(e1071)
library(rpart)
library(MASS)
library(glmnet)
library(PerformanceAnalytics)
library(corrplot)
library(car)
library(kernlab)
library(keras)
library(xgboost)
library(stringr)
library(caret)
library(Matrix)
library(ROCR)
library(pROC)
f1 = list(
family = "Old Standard TT, serif",
size = 14,
color = "grey"
)
f2 = list(
family = "Old Standard TT, serif",
size = 10,
color = "black"
)
a = list(
titlefont = f1,
showticklabels = T,
tickangle = -45,
tickfont = f2
)
m = list(
l = 50,
r = 50,
b = 100,
t = 100,
pad = 4
)
# annotations for subplot
a1 = list(x = 0.5, y = 1.0,
showarrow = FALSE,
text = "Distribution of bugdet",
xanchor = "center",
xref = "paper",
yanchor = "bottom",
yref = "paper",
font = f1)
b1 = list(x = 0.5, y = 1.0,
showarrow = FALSE,
text = "Distribution of gross",
xanchor = "center",
xref = "paper",
yanchor = "bottom",
yref = "paper",
font = f1)
# creating a function called scatter_plot for
# plotting scatter plots using ggplot and plotly
scatter_plot = function(x, y, xlabel, ylabel, title,
text1, text2, text3,
alpha = NULL){
if(is.null(alpha)) alpha = 0.4
gp = ggplot(data = movie, mapping = aes(x = x, y = y,
text = paste(text1, x,
text2, y,
text3, movie_title)))
plot = gp + geom_point(position = "jitter",
show.legend = F, shape = 21,
stroke = .2, alpha = alpha) +
xlab(xlabel) +
ylab(ylabel) +
ggtitle(title) +
theme_minimal() +
theme(legend.position = "none",
plot.title = element_text(size = 12, face = "bold",
family = "Times",
color = "darkgrey"))
ggplotly(plot, tooltip = "text") %>%
layout(m, xaxis = a, yaxis = a)
}
# creating function for plotting scatter plot using facet wrap
facet_plot = function(x, y, xlabel, ylabel, alpha = NULL){
if(is.null(alpha)) alpha = 1
fp = ggplot(data = movie, mapping = aes(x = x, y = y))
fp + geom_point(aes(fill = genres), position = "jitter",
show.legend = F, shape = 21,
stroke = 0.2, alpha = alpha) +
xlab(xlabel) +
ylab(ylabel) +
facet_wrap(~genres, scales = "free") +
theme_minimal()
}
# creating a function for plotting a simple histogram
hist_plot = function(x, xlabel, bwidth, fill = NULL, color = NULL){
if(is.null(fill)) fill = "orange"
if(is.null(color)) color = "black"
hp = ggplot(data = movie, mapping = aes(x = x))
gp = hp + geom_histogram(binwidth = bwidth, fill = fill,
color = color,
size = 0.2,
alpha = 0.7,
show.legend = F) +
xlab(xlabel) +
theme_minimal()
ggplotly(gp) %>%
layout(margin = m, xaxis = a, yaxis = a)
}
# creating a function for plotting histogram using facet wrap
facet_hist_plot = function(x, xlabel, bwidth){
hp = ggplot(data = movie, mapping = aes(x = x))
hp + geom_histogram(aes(fill = genres), binwidth = bwidth,
show.legend = F,
color = "black", size = 0.2,
alpha = 0.8) +
xlab(xlabel) +
theme_minimal() +
theme(legend.position = "none",
axis.text = element_text(size = 12, angle = 20),
axis.title = element_text(size = 14,
family = "Times",
color = "darkgrey",
face = "bold")) +
facet_wrap(~ genres, scales = "free_y", ncol = 4)
}
# creating function for plotting histograms for budget and gross
budg_gross_hist = function(x){
bh = ggplot(movie, aes(x = x))
bh + geom_histogram(binwidth = 0.05,
fill = sample(brewer.pal(11, "Spectral"), 1),
color = "black",
size = 0.09,
alpha = 0.7) +
scale_x_log10() +
theme_minimal()
ggplotly() %>%
layout(m, xaxis = a, yaxis = a)
}
# creating a function for ploting bar graphs
bar_plot = function(data, x, y, info, xlabl, ylabl, title,
deci = NULL, suf = NULL){
if(is.null(suf)) suf = ""
if(is.null(deci)) deci = 0
b1 = ggplot(data, aes(x = reorder(genres, x),
y = y,
text = paste("Genre:", genres,
info,
round(y, deci), suf)))
b1 + geom_bar(aes(fill = genres), stat = "identity",
show.legend = F, color = "black", size = 0.2,
width = 0.7, alpha = 0.7) +
xlab(xlabl) +
ylab(ylabl) +
ggtitle(title) +
theme_minimal() +
theme(legend.position = "none",
plot.title = element_text(size = 14,
color = "grey", family = "Times")) +
scale_fill_brewer(palette = "Spectral")
ggplotly(tooltip = "text") %>%
layout(margin = m, xaxis = a, yaxis = a)
}
# creating a function for plotting plotly line graph for title_year
line_graph = function(data, y, name){
scat_p1 = plot_ly(data, x = ~title_year, y = ~ y,
name = name, type = 'scatter', mode = 'lines',
line = list(color = sample(brewer.pal(11, "Spectral"), 1))) %>%
layout(xaxis = list(title = "Title Year", zeroline = F,
showline = F,
showticklabels = T),
yaxis = list(title = "Average Score"),
title = "Line Graph for Avg Score/Avg Votes/Avg User Review by Title Year",
font = list(family = "Serif", color = "grey"),
legend = list(orientation = "h", size = 6,
bgcolor = "#E2E2E2",
bordercolor = "darkgrey",
borderwidth = 1),
margin = m)
scat_p1
}
colors = c(brewer.pal(n = 11, name = "Spectral"))
# Define server logic required to draw a histogram
shinyServer(function(input, output) {
movie = read_csv("../Data/movie.csv")
output$genre <- renderPlotly({
p = movie %>%
group_by(genres) %>%
summarise(count = n()) %>%
arrange(desc(count)) %>%
head(10) %>%
plot_ly(labels = ~genres, values = ~count,
insidetextfont = list(color = 'Black'),
marker = list(colors = colors,
line = list(color = 'Black', width = .5)),
opacity = 0.8) %>%
add_pie(hole = 0.6) %>%
layout(title = "",
titlefont = list(family = "Times", size = 20, color = "grey"),
xaxis = list(showgrid = T, zeroline = F, showticklabels = F),
yaxis = list(showgrid = T, zeroline = F, showticklabels = F),
showlegend = T,
margin = list(t = 50, b = 50))
})
output$profit = renderPlotly({
p = movie %>%
group_by(if_profit) %>%
summarise(count = n()) %>%
plot_ly(labels = ~factor(if_profit), values = ~count,
insidetextfont = list(color = 'Black'),
marker = list(colors = sample(brewer.pal(11, "Spectral")),
line = list(color = 'Black', width = .5)),
opacity = 0.8) %>%
add_pie(hole = 0.6) %>%
layout(title = "",
titlefont = list(family = "Times", size = 20, color = "grey"),
xaxis = list(showgrid = T, zeroline = F, showticklabels = F),
yaxis = list(showgrid = T, zeroline = F, showticklabels = F),
margin = list(t = 50, b = 50))
})
output$score = renderPlotly({
p = hist_plot(movie$imdb_score, bwidth = 0.1,
"IMDB Score",
fill = sample(brewer.pal(11, "Spectral"), 1),
color = "black")
})
output$money = renderPlotly({
options(scipen = 999)
# transformed budget histograms
p1 = budg_gross_hist(movie$budget) %>%
layout(annotations = a1)
# transformed gross histograms
p2 = budg_gross_hist(movie$gross) %>%
layout(annotations = b1)
p = subplot(p1, p2, widths = c(0.5, 0.5))
})
output$year = renderPlotly({
movie$before_n_after_2000 = ifelse(movie$title_year >= 2000, 1, 0)
# plotting a histogram separated by before_n_after_2000
his = movie %>%
ggplot(aes(x = imdb_score, fill = factor(before_n_after_2000))) +
geom_histogram(binwidth = 0.1,
color = "black",
position = "dodge",
size = 0.2,
alpha = 0.7) +
xlab("IMDB Score") +
ggtitle("") +
scale_fill_manual(values = c(sample(brewer.pal(11, "Spectral")))) +
theme_minimal() +
theme(plot.title = element_text(size = 14,
colour = "darkgrey",
family = "Times"))
p = ggplotly(his) %>%
layout(margin = list(t = 50, b = 100),
xaxis = a, yaxis = a,
legend = list(orientation = "h", size = 4,
bgcolor = "#E2E2E2",
bordercolor = "darkgrey",
borderwidth = 1,
x = 0,
y = -0.1))
})
})
|
/Shiny/server.R
|
no_license
|
qswangstat/IMDB-Data-Analysis
|
R
| false
| false
| 10,253
|
r
|
#
# This is the server logic of a Shiny web application. You can run the
# application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(shinydashboard)
library(plotly)
library(readr)
library(dplyr)
library(wordcloud)
library(jsonlite)
library(tidyverse)
library(ggplot2)
library(lubridate)
library(gridExtra)
library(formattable)
library(DT)
library(RColorBrewer)
library(randomForest)
library(e1071)
library(rpart)
library(MASS)
library(glmnet)
library(PerformanceAnalytics)
library(corrplot)
library(car)
library(kernlab)
library(keras)
library(xgboost)
library(stringr)
library(caret)
library(Matrix)
library(ROCR)
library(pROC)
f1 = list(
family = "Old Standard TT, serif",
size = 14,
color = "grey"
)
f2 = list(
family = "Old Standard TT, serif",
size = 10,
color = "black"
)
a = list(
titlefont = f1,
showticklabels = T,
tickangle = -45,
tickfont = f2
)
m = list(
l = 50,
r = 50,
b = 100,
t = 100,
pad = 4
)
# annotations for subplot
a1 = list(x = 0.5, y = 1.0,
showarrow = FALSE,
text = "Distribution of bugdet",
xanchor = "center",
xref = "paper",
yanchor = "bottom",
yref = "paper",
font = f1)
b1 = list(x = 0.5, y = 1.0,
showarrow = FALSE,
text = "Distribution of gross",
xanchor = "center",
xref = "paper",
yanchor = "bottom",
yref = "paper",
font = f1)
# creating a function called scatter_plot for
# plotting scatter plots using ggplot and plotly
scatter_plot = function(x, y, xlabel, ylabel, title,
text1, text2, text3,
alpha = NULL){
if(is.null(alpha)) alpha = 0.4
gp = ggplot(data = movie, mapping = aes(x = x, y = y,
text = paste(text1, x,
text2, y,
text3, movie_title)))
plot = gp + geom_point(position = "jitter",
show.legend = F, shape = 21,
stroke = .2, alpha = alpha) +
xlab(xlabel) +
ylab(ylabel) +
ggtitle(title) +
theme_minimal() +
theme(legend.position = "none",
plot.title = element_text(size = 12, face = "bold",
family = "Times",
color = "darkgrey"))
ggplotly(plot, tooltip = "text") %>%
layout(m, xaxis = a, yaxis = a)
}
# creating function for plotting scatter plot using facet wrap
facet_plot = function(x, y, xlabel, ylabel, alpha = NULL){
if(is.null(alpha)) alpha = 1
fp = ggplot(data = movie, mapping = aes(x = x, y = y))
fp + geom_point(aes(fill = genres), position = "jitter",
show.legend = F, shape = 21,
stroke = 0.2, alpha = alpha) +
xlab(xlabel) +
ylab(ylabel) +
facet_wrap(~genres, scales = "free") +
theme_minimal()
}
# creating a function for plotting a simple histogram
hist_plot = function(x, xlabel, bwidth, fill = NULL, color = NULL){
if(is.null(fill)) fill = "orange"
if(is.null(color)) color = "black"
hp = ggplot(data = movie, mapping = aes(x = x))
gp = hp + geom_histogram(binwidth = bwidth, fill = fill,
color = color,
size = 0.2,
alpha = 0.7,
show.legend = F) +
xlab(xlabel) +
theme_minimal()
ggplotly(gp) %>%
layout(margin = m, xaxis = a, yaxis = a)
}
# creating a function for plotting histogram using facet wrap
facet_hist_plot = function(x, xlabel, bwidth){
hp = ggplot(data = movie, mapping = aes(x = x))
hp + geom_histogram(aes(fill = genres), binwidth = bwidth,
show.legend = F,
color = "black", size = 0.2,
alpha = 0.8) +
xlab(xlabel) +
theme_minimal() +
theme(legend.position = "none",
axis.text = element_text(size = 12, angle = 20),
axis.title = element_text(size = 14,
family = "Times",
color = "darkgrey",
face = "bold")) +
facet_wrap(~ genres, scales = "free_y", ncol = 4)
}
# creating function for plotting histograms for budget and gross
budg_gross_hist = function(x){
bh = ggplot(movie, aes(x = x))
bh + geom_histogram(binwidth = 0.05,
fill = sample(brewer.pal(11, "Spectral"), 1),
color = "black",
size = 0.09,
alpha = 0.7) +
scale_x_log10() +
theme_minimal()
ggplotly() %>%
layout(m, xaxis = a, yaxis = a)
}
# creating a function for ploting bar graphs
bar_plot = function(data, x, y, info, xlabl, ylabl, title,
deci = NULL, suf = NULL){
if(is.null(suf)) suf = ""
if(is.null(deci)) deci = 0
b1 = ggplot(data, aes(x = reorder(genres, x),
y = y,
text = paste("Genre:", genres,
info,
round(y, deci), suf)))
b1 + geom_bar(aes(fill = genres), stat = "identity",
show.legend = F, color = "black", size = 0.2,
width = 0.7, alpha = 0.7) +
xlab(xlabl) +
ylab(ylabl) +
ggtitle(title) +
theme_minimal() +
theme(legend.position = "none",
plot.title = element_text(size = 14,
color = "grey", family = "Times")) +
scale_fill_brewer(palette = "Spectral")
ggplotly(tooltip = "text") %>%
layout(margin = m, xaxis = a, yaxis = a)
}
# creating a function for plotting plotly line graph for title_year
line_graph = function(data, y, name){
scat_p1 = plot_ly(data, x = ~title_year, y = ~ y,
name = name, type = 'scatter', mode = 'lines',
line = list(color = sample(brewer.pal(11, "Spectral"), 1))) %>%
layout(xaxis = list(title = "Title Year", zeroline = F,
showline = F,
showticklabels = T),
yaxis = list(title = "Average Score"),
title = "Line Graph for Avg Score/Avg Votes/Avg User Review by Title Year",
font = list(family = "Serif", color = "grey"),
legend = list(orientation = "h", size = 6,
bgcolor = "#E2E2E2",
bordercolor = "darkgrey",
borderwidth = 1),
margin = m)
scat_p1
}
colors = c(brewer.pal(n = 11, name = "Spectral"))
# Define server logic required to draw a histogram
shinyServer(function(input, output) {
movie = read_csv("../Data/movie.csv")
output$genre <- renderPlotly({
p = movie %>%
group_by(genres) %>%
summarise(count = n()) %>%
arrange(desc(count)) %>%
head(10) %>%
plot_ly(labels = ~genres, values = ~count,
insidetextfont = list(color = 'Black'),
marker = list(colors = colors,
line = list(color = 'Black', width = .5)),
opacity = 0.8) %>%
add_pie(hole = 0.6) %>%
layout(title = "",
titlefont = list(family = "Times", size = 20, color = "grey"),
xaxis = list(showgrid = T, zeroline = F, showticklabels = F),
yaxis = list(showgrid = T, zeroline = F, showticklabels = F),
showlegend = T,
margin = list(t = 50, b = 50))
})
output$profit = renderPlotly({
p = movie %>%
group_by(if_profit) %>%
summarise(count = n()) %>%
plot_ly(labels = ~factor(if_profit), values = ~count,
insidetextfont = list(color = 'Black'),
marker = list(colors = sample(brewer.pal(11, "Spectral")),
line = list(color = 'Black', width = .5)),
opacity = 0.8) %>%
add_pie(hole = 0.6) %>%
layout(title = "",
titlefont = list(family = "Times", size = 20, color = "grey"),
xaxis = list(showgrid = T, zeroline = F, showticklabels = F),
yaxis = list(showgrid = T, zeroline = F, showticklabels = F),
margin = list(t = 50, b = 50))
})
output$score = renderPlotly({
p = hist_plot(movie$imdb_score, bwidth = 0.1,
"IMDB Score",
fill = sample(brewer.pal(11, "Spectral"), 1),
color = "black")
})
output$money = renderPlotly({
options(scipen = 999)
# transformed budget histograms
p1 = budg_gross_hist(movie$budget) %>%
layout(annotations = a1)
# transformed gross histograms
p2 = budg_gross_hist(movie$gross) %>%
layout(annotations = b1)
p = subplot(p1, p2, widths = c(0.5, 0.5))
})
output$year = renderPlotly({
movie$before_n_after_2000 = ifelse(movie$title_year >= 2000, 1, 0)
# plotting a histogram separated by before_n_after_2000
his = movie %>%
ggplot(aes(x = imdb_score, fill = factor(before_n_after_2000))) +
geom_histogram(binwidth = 0.1,
color = "black",
position = "dodge",
size = 0.2,
alpha = 0.7) +
xlab("IMDB Score") +
ggtitle("") +
scale_fill_manual(values = c(sample(brewer.pal(11, "Spectral")))) +
theme_minimal() +
theme(plot.title = element_text(size = 14,
colour = "darkgrey",
family = "Times"))
p = ggplotly(his) %>%
layout(margin = list(t = 50, b = 100),
xaxis = a, yaxis = a,
legend = list(orientation = "h", size = 4,
bgcolor = "#E2E2E2",
bordercolor = "darkgrey",
borderwidth = 1,
x = 0,
y = -0.1))
})
})
|
# Problem 7
# https://projecteuler.net/problem=7
# By listing the first six prime numbers: 2, 3, 5, 7, 11, and 13,
# we can see that the 6th prime is 13.
#What is the 10,001st prime number?
library(primes)
i <- 1
latest_prime <- 2
while(i < 10001) {
latest_prime <- next_prime(c(latest_prime:(latest_prime+2)))
i <- i+1
}
# Answer is: 104743
cat("Answer is:", latest_prime[1], "\n")
|
/problem_7.R
|
no_license
|
kbelcher3/project_euler
|
R
| false
| false
| 396
|
r
|
# Problem 7
# https://projecteuler.net/problem=7
# By listing the first six prime numbers: 2, 3, 5, 7, 11, and 13,
# we can see that the 6th prime is 13.
#What is the 10,001st prime number?
library(primes)
i <- 1
latest_prime <- 2
while(i < 10001) {
latest_prime <- next_prime(c(latest_prime:(latest_prime+2)))
i <- i+1
}
# Answer is: 104743
cat("Answer is:", latest_prime[1], "\n")
|
x=c(1,2,3,5)
x
y=c(2,3,4,6)
plot(x,y)
length(x)
length(y)
ls()
a=matrix(data=c(1,2,3,4), nrow=2, ncol=2)
a
b=matrix(data=c(1,2,3,4),2,2,byrow=TRUE)
b
sqrt(b)
u=rnorm(50)
v=x+rnorm(50, mean=50, sd=1)
plot(u,v)
set.seed(3)
y=rnorm(100)
mean(y)
var(y)
x=rnorm(100)
y=rnorm(100)
plot(x,y)
x= seq(1,10)
x
x=seq(-pi, pi, length=50)
y=x
f=outer(x,y, function(x,y)cos(y)/(1+x^2))
contour(x,y,f)
plot(x,f[1:50]) # lenght of f is 50
fa=(f-t(f))/2
contour(x,y,fa, nlevels=15)
contour(x,y,f,nlevels=45, add=T)
mean(x)
var(x)
sqrt(var(x))
image(x,y,fa)
persp(x,y,fa)
persp(x,y,fa,theta=30)
A = matrix(1:16,4,4)
A[2,3]
A
|
/Practice/Ch2Intro.R
|
no_license
|
animohan/s216
|
R
| false
| false
| 622
|
r
|
x=c(1,2,3,5)
x
y=c(2,3,4,6)
plot(x,y)
length(x)
length(y)
ls()
a=matrix(data=c(1,2,3,4), nrow=2, ncol=2)
a
b=matrix(data=c(1,2,3,4),2,2,byrow=TRUE)
b
sqrt(b)
u=rnorm(50)
v=x+rnorm(50, mean=50, sd=1)
plot(u,v)
set.seed(3)
y=rnorm(100)
mean(y)
var(y)
x=rnorm(100)
y=rnorm(100)
plot(x,y)
x= seq(1,10)
x
x=seq(-pi, pi, length=50)
y=x
f=outer(x,y, function(x,y)cos(y)/(1+x^2))
contour(x,y,f)
plot(x,f[1:50]) # lenght of f is 50
fa=(f-t(f))/2
contour(x,y,fa, nlevels=15)
contour(x,y,f,nlevels=45, add=T)
mean(x)
var(x)
sqrt(var(x))
image(x,y,fa)
persp(x,y,fa)
persp(x,y,fa,theta=30)
A = matrix(1:16,4,4)
A[2,3]
A
|
load(file='output_data/GSE11121_new_exprSet.Rdata')
exprSet=new_exprSet
dim(exprSet)
colnames(phe)
group_list=phe[,1]
table(group_list)
group_list=ifelse(group_list==1,'died','lived')
library(limma)
tmp=data.frame(case=c(0,0,0,1,1,1),
control=c(1,1,1,0,0,0))
(design <- model.matrix(~0+factor(group_list)))
colnames(design)=levels(factor(group_list))
rownames(design)=colnames(exprSet)
head(design)
contrast.matrix<-makeContrasts(paste0(unique(group_list),collapse = "-"),
levels = design)
contrast.matrix<-makeContrasts("lived-died",
levels = design)
contrast.matrix ##这个矩阵声明,我们要把progres.组跟stable进行差异分析比较
deg = function(exprSet,design,contrast.matrix){
##step1
fit <- lmFit(exprSet,design)
##step2
fit2 <- contrasts.fit(fit, contrast.matrix)
##这一步很重要,大家可以自行看看效果
fit2 <- eBayes(fit2) ## default no trend !!!
##eBayes() with trend=TRUE
##step3
tempOutput = topTable(fit2, coef=1, n=Inf)
nrDEG = na.omit(tempOutput)
#write.csv(nrDEG2,"limma_notrend.results.csv",quote = F)
head(nrDEG)
return(nrDEG)
}
re = deg(exprSet, design, contrast.matrix)
nrDEG=re
## heatmap
library(pheatmap)
choose_gene=head(rownames(nrDEG),50) ## 50 maybe better
choose_matrix=exprSet[choose_gene,]
choose_matrix=t(scale(t(choose_matrix)))
pheatmap(choose_matrix,filename = 'output_plots/DEG_top50_heatmap.png')
library(ggplot2)
## volcano plot
colnames(nrDEG)
plot(nrDEG$logFC,-log10(nrDEG$P.Value))
DEG=nrDEG
logFC_cutoff <- with(DEG,mean(abs( logFC)) + 2*sd(abs( logFC)) )
# logFC_cutoff=1
DEG$change = as.factor(ifelse(DEG$P.Value < 0.05 & abs(DEG$logFC) > logFC_cutoff,
ifelse(DEG$logFC > logFC_cutoff ,'UP','DOWN'),'NOT')
)
this_tile <- paste0('Cutoff for logFC is ',round(logFC_cutoff,3),
'\nThe number of up gene is ',nrow(DEG[DEG$change =='UP',]) ,
'\nThe number of down gene is ',nrow(DEG[DEG$change =='DOWN',])
)
g = ggplot(data=DEG,
aes(x=logFC, y=-log10(P.Value),
color=change)) +
geom_point(alpha=0.4, size=1.75) +
theme_set(theme_set(theme_bw(base_size=20)))+
xlab("log2 fold change") + ylab("-log10 p-value") +
ggtitle( this_tile ) + theme(plot.title = element_text(size=15,hjust = 0.5))+
scale_colour_manual(values = c('blue','black','red')) ## corresponding to the levels(res$change)
print(g)
ggsave(g,filename = 'output_plots/volcano.png')
save(new_exprSet,group_list,nrDEG,DEG, file='output_data/GSE11121_DEG.Rdata')
|
/GSE11121/step3-DEG.R
|
no_license
|
Zheng7AI310/GEO
|
R
| false
| false
| 2,622
|
r
|
load(file='output_data/GSE11121_new_exprSet.Rdata')
exprSet=new_exprSet
dim(exprSet)
colnames(phe)
group_list=phe[,1]
table(group_list)
group_list=ifelse(group_list==1,'died','lived')
library(limma)
tmp=data.frame(case=c(0,0,0,1,1,1),
control=c(1,1,1,0,0,0))
(design <- model.matrix(~0+factor(group_list)))
colnames(design)=levels(factor(group_list))
rownames(design)=colnames(exprSet)
head(design)
contrast.matrix<-makeContrasts(paste0(unique(group_list),collapse = "-"),
levels = design)
contrast.matrix<-makeContrasts("lived-died",
levels = design)
contrast.matrix ##这个矩阵声明,我们要把progres.组跟stable进行差异分析比较
deg = function(exprSet,design,contrast.matrix){
##step1
fit <- lmFit(exprSet,design)
##step2
fit2 <- contrasts.fit(fit, contrast.matrix)
##这一步很重要,大家可以自行看看效果
fit2 <- eBayes(fit2) ## default no trend !!!
##eBayes() with trend=TRUE
##step3
tempOutput = topTable(fit2, coef=1, n=Inf)
nrDEG = na.omit(tempOutput)
#write.csv(nrDEG2,"limma_notrend.results.csv",quote = F)
head(nrDEG)
return(nrDEG)
}
re = deg(exprSet, design, contrast.matrix)
nrDEG=re
## heatmap
library(pheatmap)
choose_gene=head(rownames(nrDEG),50) ## 50 maybe better
choose_matrix=exprSet[choose_gene,]
choose_matrix=t(scale(t(choose_matrix)))
pheatmap(choose_matrix,filename = 'output_plots/DEG_top50_heatmap.png')
library(ggplot2)
## volcano plot
colnames(nrDEG)
plot(nrDEG$logFC,-log10(nrDEG$P.Value))
DEG=nrDEG
logFC_cutoff <- with(DEG,mean(abs( logFC)) + 2*sd(abs( logFC)) )
# logFC_cutoff=1
DEG$change = as.factor(ifelse(DEG$P.Value < 0.05 & abs(DEG$logFC) > logFC_cutoff,
ifelse(DEG$logFC > logFC_cutoff ,'UP','DOWN'),'NOT')
)
this_tile <- paste0('Cutoff for logFC is ',round(logFC_cutoff,3),
'\nThe number of up gene is ',nrow(DEG[DEG$change =='UP',]) ,
'\nThe number of down gene is ',nrow(DEG[DEG$change =='DOWN',])
)
g = ggplot(data=DEG,
aes(x=logFC, y=-log10(P.Value),
color=change)) +
geom_point(alpha=0.4, size=1.75) +
theme_set(theme_set(theme_bw(base_size=20)))+
xlab("log2 fold change") + ylab("-log10 p-value") +
ggtitle( this_tile ) + theme(plot.title = element_text(size=15,hjust = 0.5))+
scale_colour_manual(values = c('blue','black','red')) ## corresponding to the levels(res$change)
print(g)
ggsave(g,filename = 'output_plots/volcano.png')
save(new_exprSet,group_list,nrDEG,DEG, file='output_data/GSE11121_DEG.Rdata')
|
# Created by Matthew A. Birk
# Dependencies: birk, marelac
# Converts % air saturation to other O2 units
# Last updated: Feb 2015
#' Convert Units of Oxygen
#'
#' Given the percent of oxygen compared to air-saturated water (at equilibrium with air) (i.e. percent air saturation), a list of commonly used units of oxygen partial pressures and concentrations are returned.
#'
#' Conversions are based on relationships and values from the package \code{\link[marelac]{marelac}}.
#'
#' @param perc_a.s. percent of air saturation. Default is 100\%.
#' @param salinity salinity of water sample (ppt). Default is 35 ppt.
#' @param temp temperature of water sample (°C). Default is 25 °C.
#' @param air_pres pressure of air overlying water sample (bar). Default is 1.013253 bar.
#'
#' @author Matthew A. Birk, \email{matthewabirk@@gmail.com}
#'
#' @examples
#' o2_unit_conv(perc_a.s. = 50)
#' o2_unit_conv(perc_a.s. = 50, salinity = 0, temp = 10, air_pres = 1.2)['umol_per_l']
#'
#' @encoding UTF-8
#' @export
#' @import marelac
#' @import birk
o2_unit_conv=function(perc_a.s.=100,salinity=35,temp=25,air_pres=1.013253){
x=list(
percent_a.s.=perc_a.s.,
percent_o2=marelac::atmComp('O2')*perc_a.s.,
hPa=birk::conv_unit((air_pres-marelac::vapor(S=salinity,t=temp))*marelac::atmComp('O2')*perc_a.s./100,'atm','hPa'),
torr=birk::conv_unit((air_pres-marelac::vapor(S=salinity,t=temp))*marelac::atmComp('O2')*perc_a.s./100,'atm','torr'),
mg_per_l=marelac::gas_satconc(S=salinity,t=temp,P=air_pres,species='O2')*1e-6*marelac::molweight('O2')*1e3*perc_a.s./100,
umol_per_l=marelac::gas_satconc(S=salinity,t=temp,P=air_pres,species='O2')*perc_a.s./100
)
attr(x[['percent_o2']],'names')=NULL
attr(x[['hPa']],'names')=NULL
attr(x[['torr']],'names')=NULL
attr(x[['mg_per_l']],'names')=NULL
attr(x[['umol_per_l']],'names')=NULL
return(x)
}
|
/presens_1.0.0/presens.Rcheck/00_pkg_src/presens/R/o2_unit_conv.R
|
no_license
|
matthewabirk/presens
|
R
| false
| false
| 1,836
|
r
|
# Created by Matthew A. Birk
# Dependencies: birk, marelac
# Converts % air saturation to other O2 units
# Last updated: Feb 2015
#' Convert Units of Oxygen
#'
#' Given the percent of oxygen compared to air-saturated water (at equilibrium with air) (i.e. percent air saturation), a list of commonly used units of oxygen partial pressures and concentrations are returned.
#'
#' Conversions are based on relationships and values from the package \code{\link[marelac]{marelac}}.
#'
#' @param perc_a.s. percent of air saturation. Default is 100\%.
#' @param salinity salinity of water sample (ppt). Default is 35 ppt.
#' @param temp temperature of water sample (°C). Default is 25 °C.
#' @param air_pres pressure of air overlying water sample (bar). Default is 1.013253 bar.
#'
#' @author Matthew A. Birk, \email{matthewabirk@@gmail.com}
#'
#' @examples
#' o2_unit_conv(perc_a.s. = 50)
#' o2_unit_conv(perc_a.s. = 50, salinity = 0, temp = 10, air_pres = 1.2)['umol_per_l']
#'
#' @encoding UTF-8
#' @export
#' @import marelac
#' @import birk
o2_unit_conv=function(perc_a.s.=100,salinity=35,temp=25,air_pres=1.013253){
x=list(
percent_a.s.=perc_a.s.,
percent_o2=marelac::atmComp('O2')*perc_a.s.,
hPa=birk::conv_unit((air_pres-marelac::vapor(S=salinity,t=temp))*marelac::atmComp('O2')*perc_a.s./100,'atm','hPa'),
torr=birk::conv_unit((air_pres-marelac::vapor(S=salinity,t=temp))*marelac::atmComp('O2')*perc_a.s./100,'atm','torr'),
mg_per_l=marelac::gas_satconc(S=salinity,t=temp,P=air_pres,species='O2')*1e-6*marelac::molweight('O2')*1e3*perc_a.s./100,
umol_per_l=marelac::gas_satconc(S=salinity,t=temp,P=air_pres,species='O2')*perc_a.s./100
)
attr(x[['percent_o2']],'names')=NULL
attr(x[['hPa']],'names')=NULL
attr(x[['torr']],'names')=NULL
attr(x[['mg_per_l']],'names')=NULL
attr(x[['umol_per_l']],'names')=NULL
return(x)
}
|
# Put custom tests in this file.
# Uncommenting the following line of code will disable
# auto-detection of new variables and thus prevent swirl from
# executing every command twice, which can slow things down.
# AUTO_DETECT_NEWVAR <- FALSE
# However, this means that you should detect user-created
# variables when appropriate. The answer test, creates_new_var()
# can be used for for the purpose, but it also re-evaluates the
# expression which the user entered, so care must be taken.
test_package_version <- function(pkg_name, pkg_version) {
e <- get("e", parent.frame())
tryCatch(
packageVersion(pkg_name) >= package_version(pkg_version),
error = function(e) FALSE)
}
test_search_path <- function(pkg_name) {
tryCatch(
length(grep(sprintf("/%s$", pkg_name), searchpaths())) > 0,
error = function(e) FALSE)
}
source_by_l10n_info <- function(path) {
info <- l10n_info()
if (info$MBCS & !info$`UTF-8`) {
try(source(path, local = new.env()), silent = TRUE)
} else {
try(source(path, local = new.env(), encoding = "UTF-8"), silent = TRUE)
}
}
rdatamining_01_test <- function() {
e <- get("e", parent.frame())
check_then_install("mlbench", "2.1.1")
source_result <- source_by_l10n_info(e$script_temp_path)
if (class(source_result)[1] == "try-error") return(FALSE)
name.list <- c("cl1", "cl2", "cl3")
reference <- as.integer(get("shapes", envir = globalenv())$classes)
tryCatch({
for(name in name.list) {
if (!isTRUE(all.equal(
get(name, envir = globalenv()),
reference
))) stop(sprintf("%s is wrong! Try again.\n", name))
}
TRUE
}, error = function(e) {
cat(conditionMessage(e))
FALSE
})
}
rdatamining_taipower_test <- function(){
e <- get("e", parent.frame())
tryCatch({
for(name in name.list) {
if (!isTRUE(all.equal(
get(name, envir = globalenv()),
reference
))) stop(sprintf("%s is wrong! Try again.\n", name))
}
TRUE
}, error = function(e) {
cat(conditionMessage(e))
FALSE
})
}
|
/RDM-04-Clustering/customTests.R
|
no_license
|
hjhsu/RDM_hj2016
|
R
| false
| false
| 2,100
|
r
|
# Put custom tests in this file.
# Uncommenting the following line of code will disable
# auto-detection of new variables and thus prevent swirl from
# executing every command twice, which can slow things down.
# AUTO_DETECT_NEWVAR <- FALSE
# However, this means that you should detect user-created
# variables when appropriate. The answer test, creates_new_var()
# can be used for for the purpose, but it also re-evaluates the
# expression which the user entered, so care must be taken.
test_package_version <- function(pkg_name, pkg_version) {
e <- get("e", parent.frame())
tryCatch(
packageVersion(pkg_name) >= package_version(pkg_version),
error = function(e) FALSE)
}
test_search_path <- function(pkg_name) {
tryCatch(
length(grep(sprintf("/%s$", pkg_name), searchpaths())) > 0,
error = function(e) FALSE)
}
source_by_l10n_info <- function(path) {
info <- l10n_info()
if (info$MBCS & !info$`UTF-8`) {
try(source(path, local = new.env()), silent = TRUE)
} else {
try(source(path, local = new.env(), encoding = "UTF-8"), silent = TRUE)
}
}
rdatamining_01_test <- function() {
e <- get("e", parent.frame())
check_then_install("mlbench", "2.1.1")
source_result <- source_by_l10n_info(e$script_temp_path)
if (class(source_result)[1] == "try-error") return(FALSE)
name.list <- c("cl1", "cl2", "cl3")
reference <- as.integer(get("shapes", envir = globalenv())$classes)
tryCatch({
for(name in name.list) {
if (!isTRUE(all.equal(
get(name, envir = globalenv()),
reference
))) stop(sprintf("%s is wrong! Try again.\n", name))
}
TRUE
}, error = function(e) {
cat(conditionMessage(e))
FALSE
})
}
rdatamining_taipower_test <- function(){
e <- get("e", parent.frame())
tryCatch({
for(name in name.list) {
if (!isTRUE(all.equal(
get(name, envir = globalenv()),
reference
))) stop(sprintf("%s is wrong! Try again.\n", name))
}
TRUE
}, error = function(e) {
cat(conditionMessage(e))
FALSE
})
}
|
#' Compute asymptotically linear IPTW estimators with super learning
#' for the propensity score
#'
#' @param W A \code{data.frame} of named covariates
#' @param A A \code{numeric} vector of binary treatment assignment (assumed to
#' be equal to 0 or 1)
#' @param Y A \code{numeric} numeric of continuous or binary outcomes.
#' @param DeltaY A \code{numeric} indicator of missing outcome (assumed to be
#' equal to 0 if missing 1 if observed)
#' @param DeltaA A \code{numeric} indicator of missing treatment (assumed to be
#' equal to 0 if missing 1 if observed)
#' @param a_0 A vector of \code{numeric} treatment values at which to return
#' marginal mean estimates.
#' @param stratify A \code{logical} indicating whether to estimate the missing
#' outcome regression separately for observations with different levels of
#' \code{A} (if \code{TRUE}) or to pool across \code{A} (if \code{FALSE}).
#' @param family A \code{family} object equal to either \code{binomial()} or
#' \code{gaussian()}, to be passed to the \code{SuperLearner} or \code{glm}
#' function.
#' @param SL_g A vector of characters describing the super learner library to be
#' used for each of the propensity score regressions (\code{DeltaA}, \code{A},
#' and \code{DeltaY}). To use the same library for each of the regressions (or
#' if there is no missing data in \code{A} nor \code{Y}), a single library may
#' be input. See \code{link{SuperLearner::SuperLearner}} for details on how
#' super learner libraries can be specified.
#' @param SL_Qr A vector of characters or a list describing the Super Learner
#' library to be used for the reduced-dimension outcome regression.
#' @param glm_g A list of characters describing the formulas to be used
#' for each of the propensity score regressions (\code{DeltaA}, \code{A}, and
#' \code{DeltaY}). To use the same formula for each of the regressions (or if
#' there is no missing data in \code{A} nor \code{Y}), a single character
#' formula may be input.
#' @param glm_Qr A character describing a formula to be used in the call to
#' \code{glm} for reduced-dimension outcome regression. Ignored if
#' \code{SL_Qr!=NULL}. The formula should use the variable name \code{'gn'}.
#' @param maxIter A numeric that sets the maximum number of iterations the TMLE
#' can perform in its fluctuation step.
#' @param tolIC A numeric that defines the stopping criteria based on the
#' empirical mean of the influence function.
#' @param tolg A numeric indicating the minimum value for estimates of the
#' propensity score.
#' @param verbose A logical indicating whether to print status updates.
#' @param returnModels A logical indicating whether to return model fits for the
#' propensity score and reduced-dimension regressions.
#' @param cvFolds A numeric equal to the number of folds to be used in
#' cross-validated fitting of nuisance parameters. If \code{cvFolds = 1}, no
#' cross-validation is used.
#' @param parallel A logical indicating whether to use parallelization based on
#' \code{future} to estimate nuisance parameters in parallel. Only useful if
#' \code{cvFolds > 1}. By default, a \code{multiprocess} evaluation scheme is
#' invoked, using forked R processes (if supported on the OS) and background R
#' sessions otherwise. Users may also register their own backends using the
#' \code{future.batchtools} package.
#' @param future_hpc A character string identifying a high-performance computing
#' backend to be used with parallelization. This should match exactly one of
#' the options available from the \code{future.batchtools} package.
#' @param gn An optional list of propensity score estimates. If specified, the
#' function will ignore the nuisance parameter estimation specified by
#' \code{SL_g} and \code{glm_g}. The entries in the list should correspond to
#' the propensity for the observed values of \code{W}, with order determined by
#' the input to \code{a_0} (e.g., if \code{a_0 = c(0,1)} then \code{gn[[1]]}
#' should be propensity of \code{A} = 0 and \code{gn[[2]]} should be propensity
#' of \code{A} = 1).
#' @param ... Other options (not currently used).
#' @return An object of class \code{"adaptive_iptw"}.
#' \describe{
#' \item{\code{iptw_tmle}}{A \code{list} of point estimates and
#' covariance matrix for the IPTW estimator based on a targeted
#' propensity score. }
#' \item{\code{iptw_tmle_nuisance}}{A \code{list} of the final TMLE estimates
#' of the propensity score (\code{$gnStar}) and reduced-dimension
#' regression (\code{$QrnStar}) evaluated at the observed data values.}
#' \item{\code{iptw_os}}{A \code{list} of point estimates and covariance matrix
#' for the one-step correct IPTW estimator.}
#' \item{\code{iptw_os_nuisance}}{A \code{list} of the initial estimates of the
#' propensity score and reduced-dimension regression evaluated at the
#' observed data values.}
#' \item{\code{iptw}}{A \code{list} of point estimates for the standard IPTW
#' estimator. No estimate of the covariance matrix is provided because
#' theory does not support asymptotic Normality of the IPTW estimator if
#' super learning is used to estimate the propensity score.}
#' \item{\code{gnMod}}{The fitted object for the propensity score. Returns
#' \code{NULL} if \code{returnModels = FALSE}.}
#' \item{\code{QrnMod}}{The fitted object for the reduced-dimension regression
#' that guards against misspecification of the outcome regression.
#' Returns \code{NULL} if \code{returnModels = FALSE}.}
#' \item{\code{a_0}}{The treatment levels that were requested for computation
#' of covariate-adjusted means.}
#' \item{\code{call}}{The call to \code{adaptive_iptw}.}
#' }
#'
#' @importFrom future plan
#' @importFrom future.apply future_lapply
#' @importFrom doFuture registerDoFuture
#' @importFrom stats cov
#'
#' @export
#'
#' @examples
#' # load super learner
#' library(SuperLearner)
#' # simulate data
#' set.seed(123456)
#' n <- 100
#' W <- data.frame(W1 = runif(n), W2 = rnorm(n))
#' A <- rbinom(n, 1, plogis(W$W1 - W$W2))
#' Y <- rbinom(n, 1, plogis(W$W1 * W$W2 * A))
#' # fit iptw with maxIter = 1 to run fast
#' \donttest{
#' fit1 <- adaptive_iptw(
#' W = W, A = A, Y = Y, a_0 = c(1, 0),
#' SL_g = c("SL.glm", "SL.mean", "SL.step"),
#' SL_Qr = "SL.npreg", maxIter = 1
#' )
#' }
adaptive_iptw <- function(W, A, Y,
DeltaY = as.numeric(!is.na(Y)),
DeltaA = as.numeric(!is.na(A)),
stratify = FALSE,
family = if (all(Y %in% c(0, 1))) {
stats::binomial()
} else {
stats::gaussian()
},
a_0 = unique(A[!is.na(A)]),
SL_g = NULL, glm_g = NULL,
SL_Qr = NULL, glm_Qr = NULL,
returnModels = TRUE,
verbose = FALSE,
maxIter = 2,
tolIC = 1 / length(Y),
tolg = 1e-2,
cvFolds = 1,
parallel = FALSE,
future_hpc = NULL,
gn = NULL,
...) {
call <- match.call()
# if cvFolds non-null split data into cvFolds pieces
n <- length(Y)
if (cvFolds != 1) {
validRows <- split(sample(seq_len(n)), rep(1:cvFolds, length = n))
} else {
validRows <- list(seq_len(n))
}
# use futures with foreach if parallel mode
if (!parallel) {
future::plan(future::transparent)
} else {
doFuture::registerDoFuture()
if (all(c("sequential", "uniprocess") %in% class(future::plan())) &
is.null(future_hpc)) {
future::plan(future::multiprocess)
} else if (!is.null(future_hpc)) {
if (future_hpc == "batchtools_torque") {
future::plan(future.batchtools::batchtools_torque)
} else if (future_hpc == "batchtools_slurm") {
future::plan(future.batchtools::batchtools_slurm)
} else if (future_hpc == "batchtools_sge") {
future::plan(future.batchtools::batchtools_sge)
} else if (future_hpc == "batchtools_lsf") {
future::plan(future.batchtools::batchtools_lsf)
} else if (future_hpc == "batchtools_openlava") {
future::plan(future.batchtools::batchtools_openlava)
} else {
stop("The currently specified HPC backend is not (yet) available.")
}
}
}
# -------------------------------
# estimate propensity score
# -------------------------------
if (is.null(gn)) {
gnOut <- future.apply::future_lapply(
X = validRows, FUN = estimateG,
A = A, W = W,
DeltaA = DeltaA, DeltaY = DeltaY,
tolg = tolg, verbose = verbose,
returnModels = returnModels,
SL_g = SL_g, glm_g = glm_g,
a_0 = a_0, stratify = stratify
)
# re-order predictions
gnValid <- unlist(gnOut, recursive = FALSE, use.names = FALSE)
gnUnOrd <- do.call(Map, c(c, gnValid[seq(1, length(gnValid), 2)]))
gn <- vector(mode = "list", length = length(a_0))
for (i in seq_along(a_0)) {
gn[[i]] <- rep(NA, n)
gn[[i]][unlist(validRows)] <- gnUnOrd[[i]]
}
# obtain list of propensity score fits
gnMod <- gnValid[seq(2, length(gnValid), 2)]
} else {
gnMod <- NULL
}
# compute iptw estimator
psi_n <- mapply(a = split(a_0, seq_along(a_0)), g = gn, function(a, g) {
modA <- A
modA[is.na(A)] <- -999
modY <- Y
modY[is.na(Y)] <- -999
mean(as.numeric(modA == a & DeltaA == 1 & DeltaY == 1) / g * modY)
})
# estimate influence function
Dno <- eval_Diptw(
A = A, Y = Y, DeltaA = DeltaA, DeltaY = DeltaY, gn = gn,
psi_n = psi_n, a_0 = a_0
)
# -------------------------------------
# estimate reduced dimension Q
# -------------------------------------
# note that NULL is input to estimateQrn -- internally the function
# assign Qn = 0 for all a_0 because estimateQrn estimates the regression
# of Y - Qn on gn (which is needed for drtmle), while here we just need
# the regression of Y on gn.
QrnOut <- future.apply::future_lapply(
X = validRows, FUN = estimateQrn,
Y = Y, A = A, W = W,
DeltaA = DeltaA, DeltaY = DeltaY,
Qn = NULL, gn = gn, glm_Qr = glm_Qr,
family = family, SL_Qr = SL_Qr, a_0 = a_0,
returnModels = returnModels
)
# re-order predictions
QrnValid <- unlist(QrnOut, recursive = FALSE, use.names = FALSE)
QrnUnOrd <- do.call(Map, c(c, QrnValid[seq(1, length(QrnValid), 2)]))
Qrn <- vector(mode = "list", length = length(a_0))
for (i in seq_along(a_0)) {
Qrn[[i]] <- rep(NA, n)
Qrn[[i]][unlist(validRows)] <- QrnUnOrd[[i]]
}
# obtain list of propensity score fits
QrnMod <- QrnValid[seq(2, length(QrnValid), 2)]
Dngo <- eval_Diptw_g(
A = A, DeltaA = DeltaA, DeltaY = DeltaY,
Qrn = Qrn, gn = gn, a_0 = a_0
)
PnDgn <- lapply(Dngo, mean)
# one-step iptw estimator
psi.o <- mapply(
a = psi_n, b = PnDgn, SIMPLIFY = FALSE,
FUN = function(a, b) {
a - b
}
)
# targeted g estimator
gnStar <- gn
QrnStar <- Qrn
PnDgnStar <- Inf
ct <- 0
# fluctuate
while (max(abs(unlist(PnDgnStar))) > tolIC & ct < maxIter) {
ct <- ct + 1
# fluctuate gnStar
gnStarOut <- fluctuateG(
Y = Y, A = A, W = W, DeltaA = DeltaA,
DeltaY = DeltaY, a_0 = a_0, tolg = tolg,
gn = gnStar, Qrn = QrnStar
)
gnStar <- lapply(gnStarOut, function(x) {
unlist(x$est)
})
# re-estimate reduced dimension regression
QrnStarOut <- future.apply::future_lapply(
X = validRows, FUN = estimateQrn,
Y = Y, A = A, W = W,
DeltaA = DeltaA, DeltaY = DeltaY,
Qn = NULL, gn = gnStar,
glm_Qr = glm_Qr, family = family,
SL_Qr = SL_Qr, a_0 = a_0,
returnModels = returnModels
)
# re-order predictions
QrnValid <- unlist(QrnStarOut, recursive = FALSE, use.names = FALSE)
QrnUnOrd <- do.call(Map, c(c, QrnValid[seq(1, length(QrnValid), 2)]))
QrnStar <- vector(mode = "list", length = length(a_0))
for (i in seq_along(a_0)) {
QrnStar[[i]] <- rep(NA, n)
QrnStar[[i]][unlist(validRows)] <- QrnUnOrd[[i]]
}
# obtain list of propensity score fits
QrnMod <- QrnValid[seq(2, length(QrnValid), 2)]
# compute influence function for fluctuated estimators
DngoStar <- eval_Diptw_g(
A = A, DeltaA = DeltaA, DeltaY = DeltaY,
Qrn = QrnStar, gn = gnStar, a_0 = a_0
)
PnDgnStar <- future.apply::future_lapply(DngoStar, mean)
if (verbose) {
cat("Mean of IC =", round(unlist(PnDgnStar), 10), "\n")
}
}
# compute final tmle-iptw estimate
# compute iptw estimator
psi_nStar <- mapply(
a = split(a_0, seq_along(a_0)), g = gnStar,
function(a, g) {
modA <- A
modA[is.na(A)] <- -999
modY <- Y
modY[is.na(Y)] <- -999
mean(as.numeric(modA == a & DeltaA == 1 &
DeltaY == 1) / g * modY)
}
)
# compute variance estimators
# original influence function
DnoStar <- eval_Diptw(
A = A, Y = Y, DeltaA = DeltaA, DeltaY = DeltaY,
gn = gnStar, psi_n = psi_nStar, a_0 = a_0
)
# covariance for tmle iptw
DnoStarMat <- matrix(
unlist(DnoStar) - unlist(DngoStar),
nrow = n,
ncol = length(a_0)
)
cov.t <- stats::cov(DnoStarMat) / n
# covariate for one-step iptw
DnoMat <- matrix(unlist(Dno) - unlist(Dngo), nrow = n, ncol = length(a_0))
cov.os <- stats::cov(DnoMat) / n
# output
out <- list(
iptw_tmle = list(est = unlist(psi_nStar), cov = cov.t),
iptw_tmle_nuisance = list(gn = gnStar, QrnStar = QrnStar),
iptw_os = list(est = unlist(psi.o), cov = cov.os),
iptw_os_nuisance = list(gn = gn, Qrn = Qrn),
iptw = list(est = unlist(psi_n)),
gnMod = NULL, QrnMod = NULL, a_0 = a_0, call = call
)
if (returnModels) {
out$gnMod <- gnMod
out$QrnMod <- QrnMod
}
class(out) <- "adaptive_iptw"
return(out)
}
|
/R/adaptive_iptw.R
|
permissive
|
kingfish777/drtmle
|
R
| false
| false
| 14,114
|
r
|
#' Compute asymptotically linear IPTW estimators with super learning
#' for the propensity score
#'
#' @param W A \code{data.frame} of named covariates
#' @param A A \code{numeric} vector of binary treatment assignment (assumed to
#' be equal to 0 or 1)
#' @param Y A \code{numeric} numeric of continuous or binary outcomes.
#' @param DeltaY A \code{numeric} indicator of missing outcome (assumed to be
#' equal to 0 if missing 1 if observed)
#' @param DeltaA A \code{numeric} indicator of missing treatment (assumed to be
#' equal to 0 if missing 1 if observed)
#' @param a_0 A vector of \code{numeric} treatment values at which to return
#' marginal mean estimates.
#' @param stratify A \code{logical} indicating whether to estimate the missing
#' outcome regression separately for observations with different levels of
#' \code{A} (if \code{TRUE}) or to pool across \code{A} (if \code{FALSE}).
#' @param family A \code{family} object equal to either \code{binomial()} or
#' \code{gaussian()}, to be passed to the \code{SuperLearner} or \code{glm}
#' function.
#' @param SL_g A vector of characters describing the super learner library to be
#' used for each of the propensity score regressions (\code{DeltaA}, \code{A},
#' and \code{DeltaY}). To use the same library for each of the regressions (or
#' if there is no missing data in \code{A} nor \code{Y}), a single library may
#' be input. See \code{link{SuperLearner::SuperLearner}} for details on how
#' super learner libraries can be specified.
#' @param SL_Qr A vector of characters or a list describing the Super Learner
#' library to be used for the reduced-dimension outcome regression.
#' @param glm_g A list of characters describing the formulas to be used
#' for each of the propensity score regressions (\code{DeltaA}, \code{A}, and
#' \code{DeltaY}). To use the same formula for each of the regressions (or if
#' there is no missing data in \code{A} nor \code{Y}), a single character
#' formula may be input.
#' @param glm_Qr A character describing a formula to be used in the call to
#' \code{glm} for reduced-dimension outcome regression. Ignored if
#' \code{SL_Qr!=NULL}. The formula should use the variable name \code{'gn'}.
#' @param maxIter A numeric that sets the maximum number of iterations the TMLE
#' can perform in its fluctuation step.
#' @param tolIC A numeric that defines the stopping criteria based on the
#' empirical mean of the influence function.
#' @param tolg A numeric indicating the minimum value for estimates of the
#' propensity score.
#' @param verbose A logical indicating whether to print status updates.
#' @param returnModels A logical indicating whether to return model fits for the
#' propensity score and reduced-dimension regressions.
#' @param cvFolds A numeric equal to the number of folds to be used in
#' cross-validated fitting of nuisance parameters. If \code{cvFolds = 1}, no
#' cross-validation is used.
#' @param parallel A logical indicating whether to use parallelization based on
#' \code{future} to estimate nuisance parameters in parallel. Only useful if
#' \code{cvFolds > 1}. By default, a \code{multiprocess} evaluation scheme is
#' invoked, using forked R processes (if supported on the OS) and background R
#' sessions otherwise. Users may also register their own backends using the
#' \code{future.batchtools} package.
#' @param future_hpc A character string identifying a high-performance computing
#' backend to be used with parallelization. This should match exactly one of
#' the options available from the \code{future.batchtools} package.
#' @param gn An optional list of propensity score estimates. If specified, the
#' function will ignore the nuisance parameter estimation specified by
#' \code{SL_g} and \code{glm_g}. The entries in the list should correspond to
#' the propensity for the observed values of \code{W}, with order determined by
#' the input to \code{a_0} (e.g., if \code{a_0 = c(0,1)} then \code{gn[[1]]}
#' should be propensity of \code{A} = 0 and \code{gn[[2]]} should be propensity
#' of \code{A} = 1).
#' @param ... Other options (not currently used).
#' @return An object of class \code{"adaptive_iptw"}.
#' \describe{
#' \item{\code{iptw_tmle}}{A \code{list} of point estimates and
#' covariance matrix for the IPTW estimator based on a targeted
#' propensity score. }
#' \item{\code{iptw_tmle_nuisance}}{A \code{list} of the final TMLE estimates
#' of the propensity score (\code{$gnStar}) and reduced-dimension
#' regression (\code{$QrnStar}) evaluated at the observed data values.}
#' \item{\code{iptw_os}}{A \code{list} of point estimates and covariance matrix
#' for the one-step correct IPTW estimator.}
#' \item{\code{iptw_os_nuisance}}{A \code{list} of the initial estimates of the
#' propensity score and reduced-dimension regression evaluated at the
#' observed data values.}
#' \item{\code{iptw}}{A \code{list} of point estimates for the standard IPTW
#' estimator. No estimate of the covariance matrix is provided because
#' theory does not support asymptotic Normality of the IPTW estimator if
#' super learning is used to estimate the propensity score.}
#' \item{\code{gnMod}}{The fitted object for the propensity score. Returns
#' \code{NULL} if \code{returnModels = FALSE}.}
#' \item{\code{QrnMod}}{The fitted object for the reduced-dimension regression
#' that guards against misspecification of the outcome regression.
#' Returns \code{NULL} if \code{returnModels = FALSE}.}
#' \item{\code{a_0}}{The treatment levels that were requested for computation
#' of covariate-adjusted means.}
#' \item{\code{call}}{The call to \code{adaptive_iptw}.}
#' }
#'
#' @importFrom future plan
#' @importFrom future.apply future_lapply
#' @importFrom doFuture registerDoFuture
#' @importFrom stats cov
#'
#' @export
#'
#' @examples
#' # load super learner
#' library(SuperLearner)
#' # simulate data
#' set.seed(123456)
#' n <- 100
#' W <- data.frame(W1 = runif(n), W2 = rnorm(n))
#' A <- rbinom(n, 1, plogis(W$W1 - W$W2))
#' Y <- rbinom(n, 1, plogis(W$W1 * W$W2 * A))
#' # fit iptw with maxIter = 1 to run fast
#' \donttest{
#' fit1 <- adaptive_iptw(
#' W = W, A = A, Y = Y, a_0 = c(1, 0),
#' SL_g = c("SL.glm", "SL.mean", "SL.step"),
#' SL_Qr = "SL.npreg", maxIter = 1
#' )
#' }
adaptive_iptw <- function(W, A, Y,
DeltaY = as.numeric(!is.na(Y)),
DeltaA = as.numeric(!is.na(A)),
stratify = FALSE,
family = if (all(Y %in% c(0, 1))) {
stats::binomial()
} else {
stats::gaussian()
},
a_0 = unique(A[!is.na(A)]),
SL_g = NULL, glm_g = NULL,
SL_Qr = NULL, glm_Qr = NULL,
returnModels = TRUE,
verbose = FALSE,
maxIter = 2,
tolIC = 1 / length(Y),
tolg = 1e-2,
cvFolds = 1,
parallel = FALSE,
future_hpc = NULL,
gn = NULL,
...) {
call <- match.call()
# if cvFolds non-null split data into cvFolds pieces
n <- length(Y)
if (cvFolds != 1) {
validRows <- split(sample(seq_len(n)), rep(1:cvFolds, length = n))
} else {
validRows <- list(seq_len(n))
}
# use futures with foreach if parallel mode
if (!parallel) {
future::plan(future::transparent)
} else {
doFuture::registerDoFuture()
if (all(c("sequential", "uniprocess") %in% class(future::plan())) &
is.null(future_hpc)) {
future::plan(future::multiprocess)
} else if (!is.null(future_hpc)) {
if (future_hpc == "batchtools_torque") {
future::plan(future.batchtools::batchtools_torque)
} else if (future_hpc == "batchtools_slurm") {
future::plan(future.batchtools::batchtools_slurm)
} else if (future_hpc == "batchtools_sge") {
future::plan(future.batchtools::batchtools_sge)
} else if (future_hpc == "batchtools_lsf") {
future::plan(future.batchtools::batchtools_lsf)
} else if (future_hpc == "batchtools_openlava") {
future::plan(future.batchtools::batchtools_openlava)
} else {
stop("The currently specified HPC backend is not (yet) available.")
}
}
}
# -------------------------------
# estimate propensity score
# -------------------------------
if (is.null(gn)) {
gnOut <- future.apply::future_lapply(
X = validRows, FUN = estimateG,
A = A, W = W,
DeltaA = DeltaA, DeltaY = DeltaY,
tolg = tolg, verbose = verbose,
returnModels = returnModels,
SL_g = SL_g, glm_g = glm_g,
a_0 = a_0, stratify = stratify
)
# re-order predictions
gnValid <- unlist(gnOut, recursive = FALSE, use.names = FALSE)
gnUnOrd <- do.call(Map, c(c, gnValid[seq(1, length(gnValid), 2)]))
gn <- vector(mode = "list", length = length(a_0))
for (i in seq_along(a_0)) {
gn[[i]] <- rep(NA, n)
gn[[i]][unlist(validRows)] <- gnUnOrd[[i]]
}
# obtain list of propensity score fits
gnMod <- gnValid[seq(2, length(gnValid), 2)]
} else {
gnMod <- NULL
}
# compute iptw estimator
psi_n <- mapply(a = split(a_0, seq_along(a_0)), g = gn, function(a, g) {
modA <- A
modA[is.na(A)] <- -999
modY <- Y
modY[is.na(Y)] <- -999
mean(as.numeric(modA == a & DeltaA == 1 & DeltaY == 1) / g * modY)
})
# estimate influence function
Dno <- eval_Diptw(
A = A, Y = Y, DeltaA = DeltaA, DeltaY = DeltaY, gn = gn,
psi_n = psi_n, a_0 = a_0
)
# -------------------------------------
# estimate reduced dimension Q
# -------------------------------------
# note that NULL is input to estimateQrn -- internally the function
# assign Qn = 0 for all a_0 because estimateQrn estimates the regression
# of Y - Qn on gn (which is needed for drtmle), while here we just need
# the regression of Y on gn.
QrnOut <- future.apply::future_lapply(
X = validRows, FUN = estimateQrn,
Y = Y, A = A, W = W,
DeltaA = DeltaA, DeltaY = DeltaY,
Qn = NULL, gn = gn, glm_Qr = glm_Qr,
family = family, SL_Qr = SL_Qr, a_0 = a_0,
returnModels = returnModels
)
# re-order predictions
QrnValid <- unlist(QrnOut, recursive = FALSE, use.names = FALSE)
QrnUnOrd <- do.call(Map, c(c, QrnValid[seq(1, length(QrnValid), 2)]))
Qrn <- vector(mode = "list", length = length(a_0))
for (i in seq_along(a_0)) {
Qrn[[i]] <- rep(NA, n)
Qrn[[i]][unlist(validRows)] <- QrnUnOrd[[i]]
}
# obtain list of propensity score fits
QrnMod <- QrnValid[seq(2, length(QrnValid), 2)]
Dngo <- eval_Diptw_g(
A = A, DeltaA = DeltaA, DeltaY = DeltaY,
Qrn = Qrn, gn = gn, a_0 = a_0
)
PnDgn <- lapply(Dngo, mean)
# one-step iptw estimator
psi.o <- mapply(
a = psi_n, b = PnDgn, SIMPLIFY = FALSE,
FUN = function(a, b) {
a - b
}
)
# targeted g estimator
gnStar <- gn
QrnStar <- Qrn
PnDgnStar <- Inf
ct <- 0
# fluctuate
while (max(abs(unlist(PnDgnStar))) > tolIC & ct < maxIter) {
ct <- ct + 1
# fluctuate gnStar
gnStarOut <- fluctuateG(
Y = Y, A = A, W = W, DeltaA = DeltaA,
DeltaY = DeltaY, a_0 = a_0, tolg = tolg,
gn = gnStar, Qrn = QrnStar
)
gnStar <- lapply(gnStarOut, function(x) {
unlist(x$est)
})
# re-estimate reduced dimension regression
QrnStarOut <- future.apply::future_lapply(
X = validRows, FUN = estimateQrn,
Y = Y, A = A, W = W,
DeltaA = DeltaA, DeltaY = DeltaY,
Qn = NULL, gn = gnStar,
glm_Qr = glm_Qr, family = family,
SL_Qr = SL_Qr, a_0 = a_0,
returnModels = returnModels
)
# re-order predictions
QrnValid <- unlist(QrnStarOut, recursive = FALSE, use.names = FALSE)
QrnUnOrd <- do.call(Map, c(c, QrnValid[seq(1, length(QrnValid), 2)]))
QrnStar <- vector(mode = "list", length = length(a_0))
for (i in seq_along(a_0)) {
QrnStar[[i]] <- rep(NA, n)
QrnStar[[i]][unlist(validRows)] <- QrnUnOrd[[i]]
}
# obtain list of propensity score fits
QrnMod <- QrnValid[seq(2, length(QrnValid), 2)]
# compute influence function for fluctuated estimators
DngoStar <- eval_Diptw_g(
A = A, DeltaA = DeltaA, DeltaY = DeltaY,
Qrn = QrnStar, gn = gnStar, a_0 = a_0
)
PnDgnStar <- future.apply::future_lapply(DngoStar, mean)
if (verbose) {
cat("Mean of IC =", round(unlist(PnDgnStar), 10), "\n")
}
}
# compute final tmle-iptw estimate
# compute iptw estimator
psi_nStar <- mapply(
a = split(a_0, seq_along(a_0)), g = gnStar,
function(a, g) {
modA <- A
modA[is.na(A)] <- -999
modY <- Y
modY[is.na(Y)] <- -999
mean(as.numeric(modA == a & DeltaA == 1 &
DeltaY == 1) / g * modY)
}
)
# compute variance estimators
# original influence function
DnoStar <- eval_Diptw(
A = A, Y = Y, DeltaA = DeltaA, DeltaY = DeltaY,
gn = gnStar, psi_n = psi_nStar, a_0 = a_0
)
# covariance for tmle iptw
DnoStarMat <- matrix(
unlist(DnoStar) - unlist(DngoStar),
nrow = n,
ncol = length(a_0)
)
cov.t <- stats::cov(DnoStarMat) / n
# covariate for one-step iptw
DnoMat <- matrix(unlist(Dno) - unlist(Dngo), nrow = n, ncol = length(a_0))
cov.os <- stats::cov(DnoMat) / n
# output
out <- list(
iptw_tmle = list(est = unlist(psi_nStar), cov = cov.t),
iptw_tmle_nuisance = list(gn = gnStar, QrnStar = QrnStar),
iptw_os = list(est = unlist(psi.o), cov = cov.os),
iptw_os_nuisance = list(gn = gn, Qrn = Qrn),
iptw = list(est = unlist(psi_n)),
gnMod = NULL, QrnMod = NULL, a_0 = a_0, call = call
)
if (returnModels) {
out$gnMod <- gnMod
out$QrnMod <- QrnMod
}
class(out) <- "adaptive_iptw"
return(out)
}
|
\name{standardise}
\alias{standardise}
\title{Standardization of microarray data for clustering.}
\description{Standardisation of the expression values of every gene
is performed, so that the average expression value for each gene
is zero and the standard deviation is one.}
\usage{standardise(eset)}
\arguments{\item{eset}{object of the classe \emph{ExpressionSet}.}
}
\value{The function produces an object of the ExpressionSet class with
standardised expression values.}
\author{Matthias E. Futschik (\url{http://itb.biologie.hu-berlin.de/~futschik})}
\examples{
if (interactive()){
data(yeast)
# Data pre-processing
yeastF <- filter.NA(yeast)
yeastF <- fill.NA(yeastF)
yeastF <- standardise(yeastF)
# Soft clustering and visualisation
cl <- mfuzz(yeastF,c=20,m=1.25)
mfuzz.plot(yeastF,cl=cl,mfrow=c(4,5))
}
}
\keyword{utilities}
|
/man/standardise.Rd
|
no_license
|
iansealy/Mfuzz
|
R
| false
| false
| 845
|
rd
|
\name{standardise}
\alias{standardise}
\title{Standardization of microarray data for clustering.}
\description{Standardisation of the expression values of every gene
is performed, so that the average expression value for each gene
is zero and the standard deviation is one.}
\usage{standardise(eset)}
\arguments{\item{eset}{object of the classe \emph{ExpressionSet}.}
}
\value{The function produces an object of the ExpressionSet class with
standardised expression values.}
\author{Matthias E. Futschik (\url{http://itb.biologie.hu-berlin.de/~futschik})}
\examples{
if (interactive()){
data(yeast)
# Data pre-processing
yeastF <- filter.NA(yeast)
yeastF <- fill.NA(yeastF)
yeastF <- standardise(yeastF)
# Soft clustering and visualisation
cl <- mfuzz(yeastF,c=20,m=1.25)
mfuzz.plot(yeastF,cl=cl,mfrow=c(4,5))
}
}
\keyword{utilities}
|
# These functions are the go-betweens between parsnip::fit (or parsnip::fit_xy)
# and the underlying model function (such as ranger::ranger). So if `fit_xy()` is
# used to fit a ranger model, there needs to be a conversion from x/y format
# data to formula/data objects and so on.
#' @importFrom stats model.frame model.response terms as.formula model.matrix
form_form <-
function(object, control, env, ...) {
if (object$mode == "classification") {
# prob rewrite this as simple subset/levels
y_levels <- levels_from_formula(env$formula, env$data)
if (!inherits(env$data, "tbl_spark") && is.null(y_levels))
stop("For classification models, the outcome should be a factor.",
call. = FALSE)
} else {
y_levels <- NULL
}
object <- check_mode(object, y_levels)
# if descriptors are needed, update descr_env with the calculated values
if (requires_descrs(object)) {
data_stats <- get_descr_form(env$formula, env$data)
scoped_descrs(data_stats)
}
# evaluate quoted args once here to check them
object <- check_args(object)
# sub in arguments to actual syntax for corresponding engine
object <- translate(object, engine = object$engine)
fit_args <- object$method$fit$args
if (is_spark(object)) {
fit_args$x <- quote(x)
env$x <- env$data
} else {
fit_args$data <- quote(data)
}
fit_args$formula <- quote(formula)
fit_call <- make_call(
fun = object$method$fit$func["fun"],
ns = object$method$fit$func["pkg"],
fit_args
)
res <- list(
lvl = y_levels,
spec = object
)
res$fit <- eval_mod(
fit_call,
capture = control$verbosity == 0,
catch = control$catch,
env = env,
...
)
res$preproc <- NA
res
}
xy_xy <- function(object, env, control, target = "none", ...) {
if (inherits(env$x, "tbl_spark") | inherits(env$y, "tbl_spark"))
stop("spark objects can only be used with the formula interface to `fit()`",
call. = FALSE)
object <- check_mode(object, levels(env$y))
if (object$mode == "classification") {
if (is.null(levels(env$y)))
stop("For classification models, the outcome should be a factor.",
call. = FALSE)
}
# if descriptors are needed, update descr_env with the calculated values
if (requires_descrs(object)) {
data_stats <- get_descr_form(env$formula, env$data)
scoped_descrs(data_stats)
}
# evaluate quoted args once here to check them
object <- check_args(object)
# sub in arguments to actual syntax for corresponding engine
object <- translate(object, engine = object$engine)
object$method$fit$args[["y"]] <- quote(y)
object$method$fit$args[["x"]] <-
switch(
target,
none = quote(x),
data.frame = quote(as.data.frame(x)),
matrix = quote(as.matrix(x)),
stop("Invalid data type target: ", target)
)
fit_call <- make_call(
fun = object$method$fit$func["fun"],
ns = object$method$fit$func["pkg"],
object$method$fit$args
)
res <- list(lvl = levels(env$y), spec = object)
res$fit <- eval_mod(
fit_call,
capture = control$verbosity == 0,
catch = control$catch,
env = env,
...
)
res$preproc <- NA
res
}
form_xy <- function(object, control, env,
target = "none", ...) {
data_obj <- convert_form_to_xy_fit(
formula = env$formula,
data = env$data,
...,
composition = target
# indicators
)
env$x <- data_obj$x
env$y <- data_obj$y
res <- list(lvl = levels_from_formula(env$formula, env$data), spec = object)
if (object$mode == "classification") {
if (is.null(res$lvl))
stop("For classification models, the outcome should be a factor.",
call. = FALSE)
}
res <- xy_xy(
object = object,
env = env, #weights! offsets!
control = control,
target = target
)
data_obj$x <- NULL
data_obj$y <- NULL
data_obj$weights <- NULL
data_obj$offset <- NULL
res$preproc <- data_obj
res
}
xy_form <- function(object, env, control, ...) {
if (object$mode == "classification") {
if (is.null(levels(env$y)))
stop("For classification models, the outcome should be a factor.",
call. = FALSE)
}
data_obj <-
convert_xy_to_form_fit(
x = env$x,
y = env$y,
weights = NULL,
y_name = "..y"
)
env$formula <- data_obj$formula
env$data <- data_obj$data
# which terms etc goes in the preproc slot here?
res <- form_form(
object = object,
env = env,
control = control,
...
)
res$preproc <- data_obj["x_var"]
res
}
|
/R/fit_helpers.R
|
no_license
|
conradbm/parsnip
|
R
| false
| false
| 4,680
|
r
|
# These functions are the go-betweens between parsnip::fit (or parsnip::fit_xy)
# and the underlying model function (such as ranger::ranger). So if `fit_xy()` is
# used to fit a ranger model, there needs to be a conversion from x/y format
# data to formula/data objects and so on.
#' @importFrom stats model.frame model.response terms as.formula model.matrix
form_form <-
function(object, control, env, ...) {
if (object$mode == "classification") {
# prob rewrite this as simple subset/levels
y_levels <- levels_from_formula(env$formula, env$data)
if (!inherits(env$data, "tbl_spark") && is.null(y_levels))
stop("For classification models, the outcome should be a factor.",
call. = FALSE)
} else {
y_levels <- NULL
}
object <- check_mode(object, y_levels)
# if descriptors are needed, update descr_env with the calculated values
if (requires_descrs(object)) {
data_stats <- get_descr_form(env$formula, env$data)
scoped_descrs(data_stats)
}
# evaluate quoted args once here to check them
object <- check_args(object)
# sub in arguments to actual syntax for corresponding engine
object <- translate(object, engine = object$engine)
fit_args <- object$method$fit$args
if (is_spark(object)) {
fit_args$x <- quote(x)
env$x <- env$data
} else {
fit_args$data <- quote(data)
}
fit_args$formula <- quote(formula)
fit_call <- make_call(
fun = object$method$fit$func["fun"],
ns = object$method$fit$func["pkg"],
fit_args
)
res <- list(
lvl = y_levels,
spec = object
)
res$fit <- eval_mod(
fit_call,
capture = control$verbosity == 0,
catch = control$catch,
env = env,
...
)
res$preproc <- NA
res
}
xy_xy <- function(object, env, control, target = "none", ...) {
if (inherits(env$x, "tbl_spark") | inherits(env$y, "tbl_spark"))
stop("spark objects can only be used with the formula interface to `fit()`",
call. = FALSE)
object <- check_mode(object, levels(env$y))
if (object$mode == "classification") {
if (is.null(levels(env$y)))
stop("For classification models, the outcome should be a factor.",
call. = FALSE)
}
# if descriptors are needed, update descr_env with the calculated values
if (requires_descrs(object)) {
data_stats <- get_descr_form(env$formula, env$data)
scoped_descrs(data_stats)
}
# evaluate quoted args once here to check them
object <- check_args(object)
# sub in arguments to actual syntax for corresponding engine
object <- translate(object, engine = object$engine)
object$method$fit$args[["y"]] <- quote(y)
object$method$fit$args[["x"]] <-
switch(
target,
none = quote(x),
data.frame = quote(as.data.frame(x)),
matrix = quote(as.matrix(x)),
stop("Invalid data type target: ", target)
)
fit_call <- make_call(
fun = object$method$fit$func["fun"],
ns = object$method$fit$func["pkg"],
object$method$fit$args
)
res <- list(lvl = levels(env$y), spec = object)
res$fit <- eval_mod(
fit_call,
capture = control$verbosity == 0,
catch = control$catch,
env = env,
...
)
res$preproc <- NA
res
}
form_xy <- function(object, control, env,
target = "none", ...) {
data_obj <- convert_form_to_xy_fit(
formula = env$formula,
data = env$data,
...,
composition = target
# indicators
)
env$x <- data_obj$x
env$y <- data_obj$y
res <- list(lvl = levels_from_formula(env$formula, env$data), spec = object)
if (object$mode == "classification") {
if (is.null(res$lvl))
stop("For classification models, the outcome should be a factor.",
call. = FALSE)
}
res <- xy_xy(
object = object,
env = env, #weights! offsets!
control = control,
target = target
)
data_obj$x <- NULL
data_obj$y <- NULL
data_obj$weights <- NULL
data_obj$offset <- NULL
res$preproc <- data_obj
res
}
xy_form <- function(object, env, control, ...) {
if (object$mode == "classification") {
if (is.null(levels(env$y)))
stop("For classification models, the outcome should be a factor.",
call. = FALSE)
}
data_obj <-
convert_xy_to_form_fit(
x = env$x,
y = env$y,
weights = NULL,
y_name = "..y"
)
env$formula <- data_obj$formula
env$data <- data_obj$data
# which terms etc goes in the preproc slot here?
res <- form_form(
object = object,
env = env,
control = control,
...
)
res$preproc <- data_obj["x_var"]
res
}
|
#' Multiple knockoff path
#'
#' This function generates a path of selected variables using multiple knockoff
#' given the test statistics (kappa, tau)
#'
#' @param kappa A \code{p} vector of test statistics, with kappa_i = 1 indicating the original variable winning
#' @param tau A \code{p} vector of test statistics, showing the manitude/importance of the variable
#'
#' @return An list of selected variable sets
#'
#' @examples
#' library(cheapknockoff)
#' set.seed(123)
#' n <- 100
#' p <- 30
#' x <- matrix(data = rnorm(n * p), nrow = n, ncol = p)
#' y <- x[, 1] - 2 * x[, 2] + rnorm(n)
#' omega <- c(2, 9, sample(seq(2, 9), size = 28, replace = TRUE))
#' # construct multiple knockoffs
#' X_k <- multiple_knockoff_Gaussian(X = x, mu = rep(0, p), Sigma = diag(1, p), omega = omega)
#' # compute knockoff statistics
#' stat <- cheapknockoff::stat_glmnet_coef(X = x, X_k = X_k, y = y, omega = omega)
#' # yield the path of selected variables
#' path <- cheapknockoff::generate_path(kappa = stat$kappa, tau = stat$tau)
#' @export
generate_path <- function(kappa, tau){
p <- length(kappa)
# input check
stopifnot(length(kappa) == length(tau))
# now tau[ord] is in non-increasing order
ord <- order(tau, decreasing = TRUE)
# al and kap are the permutations of omega and kappa, respectively, in the order ord
kp <- kappa[ord]
path <- list()
for (k in seq(p)){
path[[k]] <- ord[which(kp[1:k] == 1)]
}
return(path)
}
|
/R/path.R
|
no_license
|
hugogogo/cheapknockoff
|
R
| false
| false
| 1,444
|
r
|
#' Multiple knockoff path
#'
#' This function generates a path of selected variables using multiple knockoff
#' given the test statistics (kappa, tau)
#'
#' @param kappa A \code{p} vector of test statistics, with kappa_i = 1 indicating the original variable winning
#' @param tau A \code{p} vector of test statistics, showing the manitude/importance of the variable
#'
#' @return An list of selected variable sets
#'
#' @examples
#' library(cheapknockoff)
#' set.seed(123)
#' n <- 100
#' p <- 30
#' x <- matrix(data = rnorm(n * p), nrow = n, ncol = p)
#' y <- x[, 1] - 2 * x[, 2] + rnorm(n)
#' omega <- c(2, 9, sample(seq(2, 9), size = 28, replace = TRUE))
#' # construct multiple knockoffs
#' X_k <- multiple_knockoff_Gaussian(X = x, mu = rep(0, p), Sigma = diag(1, p), omega = omega)
#' # compute knockoff statistics
#' stat <- cheapknockoff::stat_glmnet_coef(X = x, X_k = X_k, y = y, omega = omega)
#' # yield the path of selected variables
#' path <- cheapknockoff::generate_path(kappa = stat$kappa, tau = stat$tau)
#' @export
generate_path <- function(kappa, tau){
p <- length(kappa)
# input check
stopifnot(length(kappa) == length(tau))
# now tau[ord] is in non-increasing order
ord <- order(tau, decreasing = TRUE)
# al and kap are the permutations of omega and kappa, respectively, in the order ord
kp <- kappa[ord]
path <- list()
for (k in seq(p)){
path[[k]] <- ord[which(kp[1:k] == 1)]
}
return(path)
}
|
library(dplyr)
#get data
url<-"https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(url, "hhpowerconsumption.zip")
unzip(zipfile="hhpowerconsumption.zip")
hh = read.delim("household_power_consumption.txt", header=TRUE, sep=";", na.strings = "?", stringsAsFactors=FALSE)
newDateTime = strptime(paste(hh$Date, hh$Time), "%d/%m/%Y %H:%M:%S")
hh2 = cbind(hh, newDateTime)
#subset data by date: 2007-02-01 and 2007-02-02
hh_feb = filter(hh2, between(newDateTime, as.POSIXct("2007-02-01"), as.POSIXct("2007-02-02 23:59:59")))
str(hh_feb)
summary(hh_feb)
#width of 480 pixels and a height of 480 pixels
png("plot1.png", 480, 480)
#plot1: frequency of Global Active Power
hist(hh_feb$Global_active_power, col="red", main="Global Active Power",xlab = "Global Active Power (kilowatts)")
dev.off() ## Don't forget to close the PNG device!
|
/plot1.R
|
no_license
|
mityan99/ExData_Plotting1
|
R
| false
| false
| 884
|
r
|
library(dplyr)
#get data
url<-"https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(url, "hhpowerconsumption.zip")
unzip(zipfile="hhpowerconsumption.zip")
hh = read.delim("household_power_consumption.txt", header=TRUE, sep=";", na.strings = "?", stringsAsFactors=FALSE)
newDateTime = strptime(paste(hh$Date, hh$Time), "%d/%m/%Y %H:%M:%S")
hh2 = cbind(hh, newDateTime)
#subset data by date: 2007-02-01 and 2007-02-02
hh_feb = filter(hh2, between(newDateTime, as.POSIXct("2007-02-01"), as.POSIXct("2007-02-02 23:59:59")))
str(hh_feb)
summary(hh_feb)
#width of 480 pixels and a height of 480 pixels
png("plot1.png", 480, 480)
#plot1: frequency of Global Active Power
hist(hh_feb$Global_active_power, col="red", main="Global Active Power",xlab = "Global Active Power (kilowatts)")
dev.off() ## Don't forget to close the PNG device!
|
#MCMC for Stat221 Pset4
#select the dataset to run
args <- as.numeric(commandArgs(trailingOnly = TRUE))
if(length(args) != 1) {
args[1] = 1
}
job.id = args[1]
select = 0
if (job.id > 10){
select = 1
}
library(MASS)
library(scales)
sample.data <- function(n, N, theta) {
rbinom(n, N, theta)
}
get.data <- function(n){
#get waterbucks data if n==0
#get impala data if n==1
if (n==0){
return (read.table('waterbuck.txt', header=T)$waterbuck)
}
if (n==1){
return(read.table('impala.txt', header=T)$impala)
}
}
log.lik <- function(N, theta, Y) {
# Log-likelihood of the data
sum(dbinom(Y, N, theta, log = T))
}
log.prior <- function(N, theta) {
log(1/N)
}
log.posterior <- function(N, theta, Y) {
log.lik(N, theta, Y) + log.prior(N, theta)
}
rpropose <- function(N.old, theta.old, y){
S.old = N.old * theta.old
S.new = rbeta(1, theta.old*c.s, c.s-theta.old*c.s)*N.old
theta.new = rbeta(1, theta.old*c.t, c.t-theta.old*c.t)
#theta.new = rbeta(1, 6, 6)
N.new = ceiling(S.new/theta.new)
while(N.new <= max(y) || N.new > 10000){
theta.new = rbeta(1, theta.old*c.t, c.t-theta.old*c.t)
#theta.new = rbeta(1, 6, 6)
N.new = round(S.new/theta.new)
}
c(N.new, theta.new)
}
log.dpropose <- function(N.old, theta.old, N.new, theta.new){
dbeta(theta.new, theta.old*c.t, c.t-theta.old*c.t, log=T)+
dbeta(N.new*theta.new/N.old, theta.old*c.s, c.s-theta.old*c.s, log=T)
#dbeta(theta.new, 6, 6, log=T)+
# dbeta(N.new*theta.new/N.old, theta.old*c.s, c.s-theta.old*c.s, log=T)
}
rpropose1 <- function(N.old, theta.old, y){
S.old = N.old * theta.old
S.new = rbeta(1, theta.old*c.s, c.s-theta.old*c.s)*N.old
N.new = rpois(1, N.old)
#N.new = round(rnorm(1, N.old, 3))
theta.new = S.new/N.new
while(N.new <= max(y) || theta.new >= 1){
N.new = rpois(1, N.old)
#N.new = round(rnorm(1, N.old, 3))
theta.new = S.new/N.new
}
#theta.new = min(1-1e-10, theta.new)
c(N.new, theta.new)
}
log.dpropose1 <- function(N.old, theta.old, N.new, theta.new){
dpois(N.new, N.old, log=T)+
dbeta(N.new*theta.new/N.old, theta.old*c.s, c.s-theta.old*c.s, log=T)
#dnorm(N.new, N.old, 3, log=T)+
dbeta(N.new*theta.new/N.old, theta.old*c.s, c.s-theta.old*c.s, log=T)
}
plot.chain2 <- function(mcmc.chain){
mcmc.niters = nrow(mcmc.chain)
burnin = 0.3 * mcmc.niters
mcmc.chain = mcmc.chain[burnin:mcmc.niters, ]
cutoff = quantile(mcmc.chain, 0.90)
mcmc.chain = data.frame(mcmc.chain)
mcmc.chain = mcmc.chain[which(mcmc.chain$X1 < cutoff),]
f = kde2d(x=mcmc.chain[, 1], y=mcmc.chain[, 2], n=100)
plot(mcmc.chain$X1, mcmc.chain$X2, col = alpha('black', 0.005), xlab='N', ylab='theta')
contour(f, col='red', lwd=2.5, add=TRUE)
}
mcmc <- function(y, mcmc.niters=1e5, rpropose, dpropose) {
# Complete with MH.
S = sum(y)
n = length(y)
y.max = max(y)
mcmc.chain <- matrix(nrow=mcmc.niters, ncol=2)
mcmc.chain[1, ] = c(max(ceiling(S/n*2), y.max), 0.5)
nacc <- 0
for(i in 2:mcmc.niters) {
# 1. Current state
N.old = mcmc.chain[i-1, 1]
theta.old = mcmc.chain[i-1, 2]
# 2. Propose new state
param.new = rpropose(N.old, theta.old, y)
N.new = param.new[1]
theta.new = param.new[2]
# 3. Ratio
mh.ratio = min(0, log.posterior(N.new, theta.new, y) -
log.posterior(N.old, theta.old, y) +
log.dpropose(N.new, theta.new, N.old, theta.old) -
log.dpropose(N.old, theta.old, N.new, theta.new))
if(runif(1) < exp(mh.ratio)) {
# Accept
mcmc.chain[i, ] <- c(N.new, theta.new)
nacc <- nacc + 1
} else {
mcmc.chain[i, ] <- c(N.old, theta.old)
}
}
# Cut the burnin period.
print(sprintf("Acceptance ratio %.2f%%", 100 * nacc / mcmc.niters))
#plot.chain2(mcmc.chain)
return(list(mcmc.chain, 100 * nacc / mcmc.niters))
}
c.s = 1000
c.t = 400
data = get.data(select)
mcmc.chain = mcmc(data,mcmc.niters=1e6,rpropose = rpropose, dpropose = log.dpropose)
jpeg(filename=sprintf("mcmc_job_%d.jpg", job.id), width=900, height=600)
plot.chain2(mcmc.chain[[1]])
dev.off()
accept = mcmc.chain[[2]]
mcmc.chain = mcmc.chain[[1]]
save(accept, mcmc.chain, file=sprintf("mcmc_job_%d.rda", job.id))
|
/HW4/tianlan_mcmc.R
|
no_license
|
lantian2012/STAT221
|
R
| false
| false
| 4,212
|
r
|
#MCMC for Stat221 Pset4
#select the dataset to run
args <- as.numeric(commandArgs(trailingOnly = TRUE))
if(length(args) != 1) {
args[1] = 1
}
job.id = args[1]
select = 0
if (job.id > 10){
select = 1
}
library(MASS)
library(scales)
sample.data <- function(n, N, theta) {
rbinom(n, N, theta)
}
get.data <- function(n){
#get waterbucks data if n==0
#get impala data if n==1
if (n==0){
return (read.table('waterbuck.txt', header=T)$waterbuck)
}
if (n==1){
return(read.table('impala.txt', header=T)$impala)
}
}
log.lik <- function(N, theta, Y) {
# Log-likelihood of the data
sum(dbinom(Y, N, theta, log = T))
}
log.prior <- function(N, theta) {
log(1/N)
}
log.posterior <- function(N, theta, Y) {
log.lik(N, theta, Y) + log.prior(N, theta)
}
rpropose <- function(N.old, theta.old, y){
S.old = N.old * theta.old
S.new = rbeta(1, theta.old*c.s, c.s-theta.old*c.s)*N.old
theta.new = rbeta(1, theta.old*c.t, c.t-theta.old*c.t)
#theta.new = rbeta(1, 6, 6)
N.new = ceiling(S.new/theta.new)
while(N.new <= max(y) || N.new > 10000){
theta.new = rbeta(1, theta.old*c.t, c.t-theta.old*c.t)
#theta.new = rbeta(1, 6, 6)
N.new = round(S.new/theta.new)
}
c(N.new, theta.new)
}
log.dpropose <- function(N.old, theta.old, N.new, theta.new){
dbeta(theta.new, theta.old*c.t, c.t-theta.old*c.t, log=T)+
dbeta(N.new*theta.new/N.old, theta.old*c.s, c.s-theta.old*c.s, log=T)
#dbeta(theta.new, 6, 6, log=T)+
# dbeta(N.new*theta.new/N.old, theta.old*c.s, c.s-theta.old*c.s, log=T)
}
rpropose1 <- function(N.old, theta.old, y){
S.old = N.old * theta.old
S.new = rbeta(1, theta.old*c.s, c.s-theta.old*c.s)*N.old
N.new = rpois(1, N.old)
#N.new = round(rnorm(1, N.old, 3))
theta.new = S.new/N.new
while(N.new <= max(y) || theta.new >= 1){
N.new = rpois(1, N.old)
#N.new = round(rnorm(1, N.old, 3))
theta.new = S.new/N.new
}
#theta.new = min(1-1e-10, theta.new)
c(N.new, theta.new)
}
log.dpropose1 <- function(N.old, theta.old, N.new, theta.new){
dpois(N.new, N.old, log=T)+
dbeta(N.new*theta.new/N.old, theta.old*c.s, c.s-theta.old*c.s, log=T)
#dnorm(N.new, N.old, 3, log=T)+
dbeta(N.new*theta.new/N.old, theta.old*c.s, c.s-theta.old*c.s, log=T)
}
plot.chain2 <- function(mcmc.chain){
mcmc.niters = nrow(mcmc.chain)
burnin = 0.3 * mcmc.niters
mcmc.chain = mcmc.chain[burnin:mcmc.niters, ]
cutoff = quantile(mcmc.chain, 0.90)
mcmc.chain = data.frame(mcmc.chain)
mcmc.chain = mcmc.chain[which(mcmc.chain$X1 < cutoff),]
f = kde2d(x=mcmc.chain[, 1], y=mcmc.chain[, 2], n=100)
plot(mcmc.chain$X1, mcmc.chain$X2, col = alpha('black', 0.005), xlab='N', ylab='theta')
contour(f, col='red', lwd=2.5, add=TRUE)
}
mcmc <- function(y, mcmc.niters=1e5, rpropose, dpropose) {
# Complete with MH.
S = sum(y)
n = length(y)
y.max = max(y)
mcmc.chain <- matrix(nrow=mcmc.niters, ncol=2)
mcmc.chain[1, ] = c(max(ceiling(S/n*2), y.max), 0.5)
nacc <- 0
for(i in 2:mcmc.niters) {
# 1. Current state
N.old = mcmc.chain[i-1, 1]
theta.old = mcmc.chain[i-1, 2]
# 2. Propose new state
param.new = rpropose(N.old, theta.old, y)
N.new = param.new[1]
theta.new = param.new[2]
# 3. Ratio
mh.ratio = min(0, log.posterior(N.new, theta.new, y) -
log.posterior(N.old, theta.old, y) +
log.dpropose(N.new, theta.new, N.old, theta.old) -
log.dpropose(N.old, theta.old, N.new, theta.new))
if(runif(1) < exp(mh.ratio)) {
# Accept
mcmc.chain[i, ] <- c(N.new, theta.new)
nacc <- nacc + 1
} else {
mcmc.chain[i, ] <- c(N.old, theta.old)
}
}
# Cut the burnin period.
print(sprintf("Acceptance ratio %.2f%%", 100 * nacc / mcmc.niters))
#plot.chain2(mcmc.chain)
return(list(mcmc.chain, 100 * nacc / mcmc.niters))
}
c.s = 1000
c.t = 400
data = get.data(select)
mcmc.chain = mcmc(data,mcmc.niters=1e6,rpropose = rpropose, dpropose = log.dpropose)
jpeg(filename=sprintf("mcmc_job_%d.jpg", job.id), width=900, height=600)
plot.chain2(mcmc.chain[[1]])
dev.off()
accept = mcmc.chain[[2]]
mcmc.chain = mcmc.chain[[1]]
save(accept, mcmc.chain, file=sprintf("mcmc_job_%d.rda", job.id))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/record-batch-reader.R
\docType{class}
\name{RecordBatchReader}
\alias{RecordBatchReader}
\alias{RecordBatchStreamReader}
\alias{RecordBatchFileReader}
\title{RecordBatchReader classes}
\description{
Apache Arrow defines two formats for \href{https://arrow.apache.org/docs/format/Columnar.html#serialization-and-interprocess-communication-ipc}{serializing data for interprocess communication (IPC)}:
a "stream" format and a "file" format, known as Feather.
\code{RecordBatchStreamReader} and \code{RecordBatchFileReader} are
interfaces for accessing record batches from input sources those formats,
respectively.
For guidance on how to use these classes, see the examples section.
}
\section{Factory}{
The \code{RecordBatchFileReader$create()} and \code{RecordBatchStreamReader$create()}
factory methods instantiate the object and
take a single argument, named according to the class:
\itemize{
\item \code{file} A character file name, raw vector, or Arrow file connection object
(e.g. \link{RandomAccessFile}).
\item \code{stream} A raw vector, \link{Buffer}, or \link{InputStream}.
}
}
\section{Methods}{
\itemize{
\item \verb{$read_next_batch()}: Returns a \code{RecordBatch}, iterating through the
Reader. If there are no further batches in the Reader, it returns \code{NULL}.
\item \verb{$schema}: Returns a \link{Schema} (active binding)
\item \verb{$batches()}: Returns a list of \code{RecordBatch}es
\item \verb{$read_table()}: Collects the reader's \code{RecordBatch}es into a \link{Table}
\item \verb{$get_batch(i)}: For \code{RecordBatchFileReader}, return a particular batch
by an integer index.
\item \verb{$num_record_batches()}: For \code{RecordBatchFileReader}, see how many batches
are in the file.
}
}
\examples{
\donttest{
tf <- tempfile()
on.exit(unlink(tf))
batch <- record_batch(iris)
# This opens a connection to the file in Arrow
file_obj <- FileOutputStream$create(tf)
# Pass that to a RecordBatchWriter to write data conforming to a schema
writer <- RecordBatchFileWriter$create(file_obj, batch$schema)
writer$write(batch)
# You may write additional batches to the stream, provided that they have
# the same schema.
# Call "close" on the writer to indicate end-of-file/stream
writer$close()
# Then, close the connection--closing the IPC message does not close the file
file_obj$close()
# Now, we have a file we can read from. Same pattern: open file connection,
# then pass it to a RecordBatchReader
read_file_obj <- ReadableFile$create(tf)
reader <- RecordBatchFileReader$create(read_file_obj)
# RecordBatchFileReader knows how many batches it has (StreamReader does not)
reader$num_record_batches
# We could consume the Reader by calling $read_next_batch() until all are,
# consumed, or we can call $read_table() to pull them all into a Table
tab <- reader$read_table()
# Call as.data.frame to turn that Table into an R data.frame
df <- as.data.frame(tab)
# This should be the same data we sent
all.equal(df, iris, check.attributes = FALSE)
# Unlike the Writers, we don't have to close RecordBatchReaders,
# but we do still need to close the file connection
read_file_obj$close()
}
}
\seealso{
\code{\link[=read_ipc_stream]{read_ipc_stream()}} and \code{\link[=read_feather]{read_feather()}} provide a much simpler interface
for reading data from these formats and are sufficient for many use cases.
}
|
/deps/arrow-0.17.1/r/man/RecordBatchReader.Rd
|
permissive
|
snowflakedb/libsnowflakeclient
|
R
| false
| true
| 3,416
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/record-batch-reader.R
\docType{class}
\name{RecordBatchReader}
\alias{RecordBatchReader}
\alias{RecordBatchStreamReader}
\alias{RecordBatchFileReader}
\title{RecordBatchReader classes}
\description{
Apache Arrow defines two formats for \href{https://arrow.apache.org/docs/format/Columnar.html#serialization-and-interprocess-communication-ipc}{serializing data for interprocess communication (IPC)}:
a "stream" format and a "file" format, known as Feather.
\code{RecordBatchStreamReader} and \code{RecordBatchFileReader} are
interfaces for accessing record batches from input sources those formats,
respectively.
For guidance on how to use these classes, see the examples section.
}
\section{Factory}{
The \code{RecordBatchFileReader$create()} and \code{RecordBatchStreamReader$create()}
factory methods instantiate the object and
take a single argument, named according to the class:
\itemize{
\item \code{file} A character file name, raw vector, or Arrow file connection object
(e.g. \link{RandomAccessFile}).
\item \code{stream} A raw vector, \link{Buffer}, or \link{InputStream}.
}
}
\section{Methods}{
\itemize{
\item \verb{$read_next_batch()}: Returns a \code{RecordBatch}, iterating through the
Reader. If there are no further batches in the Reader, it returns \code{NULL}.
\item \verb{$schema}: Returns a \link{Schema} (active binding)
\item \verb{$batches()}: Returns a list of \code{RecordBatch}es
\item \verb{$read_table()}: Collects the reader's \code{RecordBatch}es into a \link{Table}
\item \verb{$get_batch(i)}: For \code{RecordBatchFileReader}, return a particular batch
by an integer index.
\item \verb{$num_record_batches()}: For \code{RecordBatchFileReader}, see how many batches
are in the file.
}
}
\examples{
\donttest{
tf <- tempfile()
on.exit(unlink(tf))
batch <- record_batch(iris)
# This opens a connection to the file in Arrow
file_obj <- FileOutputStream$create(tf)
# Pass that to a RecordBatchWriter to write data conforming to a schema
writer <- RecordBatchFileWriter$create(file_obj, batch$schema)
writer$write(batch)
# You may write additional batches to the stream, provided that they have
# the same schema.
# Call "close" on the writer to indicate end-of-file/stream
writer$close()
# Then, close the connection--closing the IPC message does not close the file
file_obj$close()
# Now, we have a file we can read from. Same pattern: open file connection,
# then pass it to a RecordBatchReader
read_file_obj <- ReadableFile$create(tf)
reader <- RecordBatchFileReader$create(read_file_obj)
# RecordBatchFileReader knows how many batches it has (StreamReader does not)
reader$num_record_batches
# We could consume the Reader by calling $read_next_batch() until all are,
# consumed, or we can call $read_table() to pull them all into a Table
tab <- reader$read_table()
# Call as.data.frame to turn that Table into an R data.frame
df <- as.data.frame(tab)
# This should be the same data we sent
all.equal(df, iris, check.attributes = FALSE)
# Unlike the Writers, we don't have to close RecordBatchReaders,
# but we do still need to close the file connection
read_file_obj$close()
}
}
\seealso{
\code{\link[=read_ipc_stream]{read_ipc_stream()}} and \code{\link[=read_feather]{read_feather()}} provide a much simpler interface
for reading data from these formats and are sufficient for many use cases.
}
|
.binary.seq = function (m, n)
{ rep.vector = round (2 ^ (0:(m - 1) ) )
verts = matrix (0, nrow=n, ncol=m)
for (j in 1:m)
verts [,j] = rep (c (0, 1), times=rep.vector [j], each=rep.vector [1 + m - j])
verts
}
.binary.sign = function (binary)
{ sums = apply (binary, 1, sum)
sign = rep (1L, nrow (binary) )
if (ncol (binary) %% 2 == 0)
sign [(1 + sums) %% 2 == 0] = -1L
else
sign [sums %% 2 == 0] = -1L
sign
}
comb.prob = function (F, a, b)
{ has.matrix.args = (is.matrix (a) && is.matrix (b) )
if (has.matrix.args)
{ m = ncol (a)
if (nrow (a) != nrow (b) )
stop ("nrow(a) must equal nrow(b)")
if (ncol (a) != ncol (b) )
stop ("ncol(a) must equal ncol(b)")
}
else
{ m = length (a)
if (length (a) != length (b) )
stop ("length(a) must equal length(b)")
}
nF = as.integer (round (2 ^ m) )
binary = .binary.seq (m, nF)
sign = .binary.sign (binary)
y = 0
for (i in 1:nF)
{ x = a
j = as.logical (binary [i,])
if (has.matrix.args)
x [,j] = b [,j]
else
x [j] = b [j]
y = y + sign [i] * F (x)
}
y
}
|
/R/comb.prob.r
|
no_license
|
cran/empirical
|
R
| false
| false
| 1,257
|
r
|
.binary.seq = function (m, n)
{ rep.vector = round (2 ^ (0:(m - 1) ) )
verts = matrix (0, nrow=n, ncol=m)
for (j in 1:m)
verts [,j] = rep (c (0, 1), times=rep.vector [j], each=rep.vector [1 + m - j])
verts
}
.binary.sign = function (binary)
{ sums = apply (binary, 1, sum)
sign = rep (1L, nrow (binary) )
if (ncol (binary) %% 2 == 0)
sign [(1 + sums) %% 2 == 0] = -1L
else
sign [sums %% 2 == 0] = -1L
sign
}
comb.prob = function (F, a, b)
{ has.matrix.args = (is.matrix (a) && is.matrix (b) )
if (has.matrix.args)
{ m = ncol (a)
if (nrow (a) != nrow (b) )
stop ("nrow(a) must equal nrow(b)")
if (ncol (a) != ncol (b) )
stop ("ncol(a) must equal ncol(b)")
}
else
{ m = length (a)
if (length (a) != length (b) )
stop ("length(a) must equal length(b)")
}
nF = as.integer (round (2 ^ m) )
binary = .binary.seq (m, nF)
sign = .binary.sign (binary)
y = 0
for (i in 1:nF)
{ x = a
j = as.logical (binary [i,])
if (has.matrix.args)
x [,j] = b [,j]
else
x [j] = b [j]
y = y + sign [i] * F (x)
}
y
}
|
## Matrix inversion is usually a costly computation and it make sense to
## cache the inverse of a matrix rather than compute it repeatedly, so
## that when we need it again, it can be looked up in the cache rather
## than recomputed.
## The function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(solve) m <<- solve
getinverse <- function() m
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## This function computes the inverse of the special "matrix" returned
## by makeCacheMatrix above. If the inverse has already been calculated
## (and the matrix has not changed), then the cachesolve should retrieve
## the inverse from the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
m
}
|
/cachematrix.R
|
no_license
|
DyOrFly/ProgrammingAssignment2
|
R
| false
| false
| 1,122
|
r
|
## Matrix inversion is usually a costly computation and it make sense to
## cache the inverse of a matrix rather than compute it repeatedly, so
## that when we need it again, it can be looked up in the cache rather
## than recomputed.
## The function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(solve) m <<- solve
getinverse <- function() m
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## This function computes the inverse of the special "matrix" returned
## by makeCacheMatrix above. If the inverse has already been calculated
## (and the matrix has not changed), then the cachesolve should retrieve
## the inverse from the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
m
}
|
\name{getSteps}
\alias{getSteps}
\title{Number of Steps of a One-Dimensional Cellular Automaton}
\description{
This method extracts the number of steps (generations) from an instance of class \code{CellularAutomaton}.
}
\details{
\code{ca$getSteps()}
}
\value{
\code{getSteps} returns an integer >= 1.
}
\author{John Hughes}
\keyword{methods}
|
/man/getSteps.Rd
|
no_license
|
cran/CellularAutomaton
|
R
| false
| false
| 359
|
rd
|
\name{getSteps}
\alias{getSteps}
\title{Number of Steps of a One-Dimensional Cellular Automaton}
\description{
This method extracts the number of steps (generations) from an instance of class \code{CellularAutomaton}.
}
\details{
\code{ca$getSteps()}
}
\value{
\code{getSteps} returns an integer >= 1.
}
\author{John Hughes}
\keyword{methods}
|
##############################################################################################
# check upper atom count bounds ##############################################################
##############################################################################################
data(chemforms)
masses<-centro[[1]][,1]
intensities<-centro[[1]][,2]
int_cut<-(max(intensities)*0.1)
inttol=0.2
use_C=FALSE
charges=c(1,2)
ppm=TRUE
dmz=c(20,20,20,20,3,3,0)
elements=c("C","H","N","O","Cl","Br","P","S")
must_peak=FALSE
##############################################################################################
for(j in 1:20){ # different settings
for(i in 1:length(chemforms)){
use_chem<-chemforms[i]
counts<-check_chemform(isotopes,use_chem,get_sorted=FALSE,get_list=TRUE)[[1]]
at_charge<-sample(1:4,1)
#at_charge<-3
pattern<-isopattern(
isotopes,
use_chem,
threshold=0.1,
plot_it=FALSE,
charge=at_charge,
emass=0.00054858,
algo=2
)
res<-sample(seq(1E4,5E5,1E4),1)
profiles<-envelope(
pattern,
ppm=FALSE,
dmz=0.0001,
frac=1/10,
env="Gaussian",
resolution=1E5,
plot_it=FALSE
)
centro<-vdetect(
profiles,
detect="centroid",
plot_it=FALSE
)
###########################################################################################
# estimate bounds #########################################################################
bounds<-enviMass:::atoms(
masses=centro[[1]][,1],
intensities=centro[[1]][,2],
elements=names(counts),
dmz=rep(100,length(names(counts))),
ppm=TRUE,
charges=at_charge,
isotopes,
int_cut=(max(centro[[1]][,2])*0.1),
inttol=0.2,
use_C=TRUE,
must_peak=FALSE
)
for(j in 1:length(counts)){
here<-which(colnames(bounds)==names(counts)[j])
if(counts[j]>max(bounds[,here])){
stop("")
}
}
}
}
|
/inst/test_atoms.r
|
no_license
|
uweschmitt/enviMass
|
R
| false
| false
| 1,880
|
r
|
##############################################################################################
# check upper atom count bounds ##############################################################
##############################################################################################
data(chemforms)
masses<-centro[[1]][,1]
intensities<-centro[[1]][,2]
int_cut<-(max(intensities)*0.1)
inttol=0.2
use_C=FALSE
charges=c(1,2)
ppm=TRUE
dmz=c(20,20,20,20,3,3,0)
elements=c("C","H","N","O","Cl","Br","P","S")
must_peak=FALSE
##############################################################################################
for(j in 1:20){ # different settings
for(i in 1:length(chemforms)){
use_chem<-chemforms[i]
counts<-check_chemform(isotopes,use_chem,get_sorted=FALSE,get_list=TRUE)[[1]]
at_charge<-sample(1:4,1)
#at_charge<-3
pattern<-isopattern(
isotopes,
use_chem,
threshold=0.1,
plot_it=FALSE,
charge=at_charge,
emass=0.00054858,
algo=2
)
res<-sample(seq(1E4,5E5,1E4),1)
profiles<-envelope(
pattern,
ppm=FALSE,
dmz=0.0001,
frac=1/10,
env="Gaussian",
resolution=1E5,
plot_it=FALSE
)
centro<-vdetect(
profiles,
detect="centroid",
plot_it=FALSE
)
###########################################################################################
# estimate bounds #########################################################################
bounds<-enviMass:::atoms(
masses=centro[[1]][,1],
intensities=centro[[1]][,2],
elements=names(counts),
dmz=rep(100,length(names(counts))),
ppm=TRUE,
charges=at_charge,
isotopes,
int_cut=(max(centro[[1]][,2])*0.1),
inttol=0.2,
use_C=TRUE,
must_peak=FALSE
)
for(j in 1:length(counts)){
here<-which(colnames(bounds)==names(counts)[j])
if(counts[j]>max(bounds[,here])){
stop("")
}
}
}
}
|
#' Convert \code{vars::varprd} to \code{data.frame}
#'
#' @param model \code{vars::varprd} instance
#' @inheritParams fortify_base
#' @param is.date Logical frag indicates whether the \code{stats::ts} is date or not.
#' If not provided, regard the input as date when the frequency is 4 or 12.
#' @param ts.connect Logical frag indicates whether connects original time-series and predicted values
#' @param melt Logical flag indicating whether to melt each timeseries as variable
#' @return data.frame
#' @examples
#' data(Canada, package = 'vars')
#' d.var <- vars::VAR(Canada, p = 3, type = 'const')
#' fortify(stats::predict(d.var, n.ahead = 50))
#' @export
fortify.varprd <- function(model, data = NULL, is.date = NULL,
ts.connect = FALSE, melt = FALSE, ...){
fitted <- ggplot2::fortify(model$model$y)
fcst <- model$fcst
dtindex.cont <- get.dtindex.continuous(model$model$y, length = nrow(fcst[[1]]),
is.date = is.date)
cols <- names(fcst)
if (melt) {
# for autoplot conversion
for (col in cols){
pred <- data.frame(fcst[[col]])
pred$Index <- dtindex.cont
obs <- fitted[, c('Index', col)]
colnames(obs) <- c('Index', 'Data')
binded <- ggfortify::rbind_ts(pred, obs, ts.connect = ts.connect)
binded$variable <- col
fcst[[col]] <- binded
}
return(dplyr::bind_rows(fcst))
} else {
for (col in cols){
colnames(fcst[[col]]) <- paste0(col, '.', colnames(fcst[[col]]))
}
pred <- data.frame(do.call(cbind, fcst))
pred$Index <- dtindex.cont
binded <- ggfortify::rbind_ts(pred, fitted, ts.connect = ts.connect)
return(binded)
}
}
#' Autoplot \code{vars::varprd}
#'
#' @param object \code{vars::varpred} instance
#' @param is.date Logical frag indicates whether the \code{stats::ts} is date or not.
#' If not provided, regard the input as date when the frequency is 4 or 12.
#' @param ts.connect Logical frag indicates whether connects original time-series and predicted values
#' @param scales Scale value passed to \code{ggplot2}
#' @inheritParams autoplot.tsmodel
#' @inheritParams plot_confint
#' @param ... other arguments passed to \code{autoplot.ts}
#' @return ggplot
#' @examples
#' data(Canada, package = 'vars')
#' d.var <- vars::VAR(Canada, p = 3, type = 'const')
#' autoplot(stats::predict(d.var, n.ahead = 50), is.date = TRUE)
#' autoplot(stats::predict(d.var, n.ahead = 50), conf.int = FALSE)
#' @export
autoplot.varprd <- function(object, is.date = NULL, ts.connect = TRUE,
scales = 'free_y',
predict.geom = 'line',
predict.colour = '#0000FF', predict.size = NULL,
predict.linetype = NULL, predict.alpha = NULL,
predict.fill = NULL, predict.shape = NULL,
conf.int = TRUE,
conf.int.colour = '#0000FF', conf.int.linetype = 'none',
conf.int.fill = '#000000', conf.int.alpha = 0.3,
...) {
plot.data <- ggplot2::fortify(object, is.date = is.date,
ts.connect = ts.connect, melt = TRUE)
# Filter existing values to avoid warnings
original.data <- dplyr::filter_(plot.data, '!is.na(Data)')
predict.data <- dplyr::filter_(plot.data, '!is.na(fcst)')
p <- autoplot.ts(original.data, columns = 'Data', ...)
p <- autoplot.ts(predict.data, columns = 'fcst', p = p,
geom = predict.geom,
colour = predict.colour, size = predict.size,
linetype = predict.linetype, alpha = predict.alpha,
fill = predict.fill, shape = predict.shape)
p <- p + ggplot2::facet_grid(variable ~ ., scales = scales)
p <- plot_confint(p = p, data = predict.data, conf.int = conf.int,
conf.int.colour = conf.int.colour,
conf.int.linetype = conf.int.linetype,
conf.int.fill = conf.int.fill, conf.int.alpha = conf.int.alpha)
p
}
|
/R/fortify_vars.R
|
no_license
|
richierocks/ggfortify
|
R
| false
| false
| 4,107
|
r
|
#' Convert \code{vars::varprd} to \code{data.frame}
#'
#' @param model \code{vars::varprd} instance
#' @inheritParams fortify_base
#' @param is.date Logical frag indicates whether the \code{stats::ts} is date or not.
#' If not provided, regard the input as date when the frequency is 4 or 12.
#' @param ts.connect Logical frag indicates whether connects original time-series and predicted values
#' @param melt Logical flag indicating whether to melt each timeseries as variable
#' @return data.frame
#' @examples
#' data(Canada, package = 'vars')
#' d.var <- vars::VAR(Canada, p = 3, type = 'const')
#' fortify(stats::predict(d.var, n.ahead = 50))
#' @export
fortify.varprd <- function(model, data = NULL, is.date = NULL,
ts.connect = FALSE, melt = FALSE, ...){
fitted <- ggplot2::fortify(model$model$y)
fcst <- model$fcst
dtindex.cont <- get.dtindex.continuous(model$model$y, length = nrow(fcst[[1]]),
is.date = is.date)
cols <- names(fcst)
if (melt) {
# for autoplot conversion
for (col in cols){
pred <- data.frame(fcst[[col]])
pred$Index <- dtindex.cont
obs <- fitted[, c('Index', col)]
colnames(obs) <- c('Index', 'Data')
binded <- ggfortify::rbind_ts(pred, obs, ts.connect = ts.connect)
binded$variable <- col
fcst[[col]] <- binded
}
return(dplyr::bind_rows(fcst))
} else {
for (col in cols){
colnames(fcst[[col]]) <- paste0(col, '.', colnames(fcst[[col]]))
}
pred <- data.frame(do.call(cbind, fcst))
pred$Index <- dtindex.cont
binded <- ggfortify::rbind_ts(pred, fitted, ts.connect = ts.connect)
return(binded)
}
}
#' Autoplot \code{vars::varprd}
#'
#' @param object \code{vars::varpred} instance
#' @param is.date Logical frag indicates whether the \code{stats::ts} is date or not.
#' If not provided, regard the input as date when the frequency is 4 or 12.
#' @param ts.connect Logical frag indicates whether connects original time-series and predicted values
#' @param scales Scale value passed to \code{ggplot2}
#' @inheritParams autoplot.tsmodel
#' @inheritParams plot_confint
#' @param ... other arguments passed to \code{autoplot.ts}
#' @return ggplot
#' @examples
#' data(Canada, package = 'vars')
#' d.var <- vars::VAR(Canada, p = 3, type = 'const')
#' autoplot(stats::predict(d.var, n.ahead = 50), is.date = TRUE)
#' autoplot(stats::predict(d.var, n.ahead = 50), conf.int = FALSE)
#' @export
autoplot.varprd <- function(object, is.date = NULL, ts.connect = TRUE,
scales = 'free_y',
predict.geom = 'line',
predict.colour = '#0000FF', predict.size = NULL,
predict.linetype = NULL, predict.alpha = NULL,
predict.fill = NULL, predict.shape = NULL,
conf.int = TRUE,
conf.int.colour = '#0000FF', conf.int.linetype = 'none',
conf.int.fill = '#000000', conf.int.alpha = 0.3,
...) {
plot.data <- ggplot2::fortify(object, is.date = is.date,
ts.connect = ts.connect, melt = TRUE)
# Filter existing values to avoid warnings
original.data <- dplyr::filter_(plot.data, '!is.na(Data)')
predict.data <- dplyr::filter_(plot.data, '!is.na(fcst)')
p <- autoplot.ts(original.data, columns = 'Data', ...)
p <- autoplot.ts(predict.data, columns = 'fcst', p = p,
geom = predict.geom,
colour = predict.colour, size = predict.size,
linetype = predict.linetype, alpha = predict.alpha,
fill = predict.fill, shape = predict.shape)
p <- p + ggplot2::facet_grid(variable ~ ., scales = scales)
p <- plot_confint(p = p, data = predict.data, conf.int = conf.int,
conf.int.colour = conf.int.colour,
conf.int.linetype = conf.int.linetype,
conf.int.fill = conf.int.fill, conf.int.alpha = conf.int.alpha)
p
}
|
\name{idw}
\alias{idw}
\title{Inverse-distance weighted smoothing of observations at irregular points}
\description{
Performs spatial smoothing of numeric values observed
at a set of irregular locations using inverse-distance weighting.
}
\usage{
idw(X, power=2, at=c("pixels", "points"), ..., se=FALSE)
}
\arguments{
\item{X}{A marked point pattern (object of class \code{"ppp"}).}
\item{power}{Numeric. Power of distance used in the weighting.}
\item{at}{
Character string specifying whether to compute the intensity values
at a grid of pixel locations (\code{at="pixels"}) or
only at the points of \code{X} (\code{at="points"}).
String is partially matched.
}
\item{\dots}{Arguments passed to \code{\link{as.mask}}
to control the pixel resolution of the result.}
\item{se}{
Logical value specifying whether to calculate a standard error.
}
}
\details{
This function performs spatial smoothing of numeric values
observed at a set of irregular locations.
Smoothing is performed by inverse distance weighting. If the
observed values are \eqn{v_1,\ldots,v_n}{v[1],...,v[n]}
at locations \eqn{x_1,\ldots,x_n}{x[1],...,x[n]} respectively,
then the smoothed value at a location \eqn{u} is
\deqn{
g(u) = \frac{\sum_i w_i v_i}{\sum_i w_i}
}{
g(u) = (sum of w[i] * v[i])/(sum of w[i])
}
where the weights are the inverse \eqn{p}-th powers of distance,
\deqn{
w_i = \frac 1 {d(u,x_i)^p}
}{
w[i] = 1/d(u,x[i])^p
}
where \eqn{d(u,x_i) = ||u - x_i||}{d(u,x[i])}
is the Euclidean distance from \eqn{u} to \eqn{x_i}{x[i]}.
The argument \code{X} must be a marked point pattern (object
of class \code{"ppp"}, see \code{\link{ppp.object}}).
The points of the pattern are taken to be the
observation locations \eqn{x_i}{x[i]}, and the marks of the pattern
are taken to be the numeric values \eqn{v_i}{v[i]} observed at these
locations.
The marks are allowed to be a data frame.
Then the smoothing procedure is applied to each
column of marks.
If \code{at="pixels"} (the default), the smoothed mark value
is calculated at a grid of pixels, and the result is a pixel image.
The arguments \code{\dots} control the pixel resolution.
See \code{\link{as.mask}}.
If \code{at="points"}, the smoothed mark values are calculated
at the data points only, using a leave-one-out rule (the mark value
at a data point is excluded when calculating the smoothed value
for that point).
An estimate of standard error is also calculated, if \code{se=TRUE}.
The calculation assumes that the data point locations are fixed,
that is, the standard error only takes into account the variability
in the mark values, and not the variability due to randomness of the
data point locations.
An alternative to inverse-distance weighting is kernel smoothing,
which is performed by \code{\link{Smooth.ppp}}.
}
\value{
\emph{If \code{X} has a single column of marks:}
\itemize{
\item
If \code{at="pixels"} (the default), the result is
a pixel image (object of class \code{"im"}).
Pixel values are values of the interpolated function.
\item
If \code{at="points"}, the result is a numeric vector
of length equal to the number of points in \code{X}.
Entries are values of the interpolated function at the points of \code{X}.
}
\emph{If \code{X} has a data frame of marks:}
\itemize{
\item
If \code{at="pixels"} (the default), the result is a named list of
pixel images (object of class \code{"im"}). There is one
image for each column of marks. This list also belongs to
the class \code{"solist"}, for which there is a plot method.
\item
If \code{at="points"}, the result is a data frame
with one row for each point of \code{X},
and one column for each column of marks.
Entries are values of the interpolated function at the points of \code{X}.
}
If \code{se=TRUE}, then the result is a list
with two entries named \code{estimate} and \code{SE}, which
each have the format described above.
}
\seealso{
\code{\link{density.ppp}},
\code{\link{ppp.object}},
\code{\link{im.object}}.
See \code{\link{Smooth.ppp}} for kernel smoothing
and \code{\link{nnmark}} for nearest-neighbour interpolation.
To perform other kinds of interpolation, see also the \code{akima} package.
}
\examples{
# data frame of marks: trees marked by diameter and height
plot(idw(finpines))
idw(finpines, at="points")[1:5,]
plot(idw(finpines, se=TRUE)$SE)
idw(finpines, at="points", se=TRUE)$SE[1:5, ]
}
\references{
Shepard, D. (1968) A two-dimensional interpolation function for
irregularly-spaced data.
\emph{Proceedings of the 1968 ACM National Conference},
1968, pages 517--524. DOI: 10.1145/800186.810616
}
\author{
\spatstatAuthors.
Variance calculation by Andrew P Wheeler with modifications by
Adrian Baddeley.
}
\keyword{spatial}
\keyword{methods}
\keyword{smooth}
|
/man/idw.Rd
|
no_license
|
spatstat/spatstat.core
|
R
| false
| false
| 4,984
|
rd
|
\name{idw}
\alias{idw}
\title{Inverse-distance weighted smoothing of observations at irregular points}
\description{
Performs spatial smoothing of numeric values observed
at a set of irregular locations using inverse-distance weighting.
}
\usage{
idw(X, power=2, at=c("pixels", "points"), ..., se=FALSE)
}
\arguments{
\item{X}{A marked point pattern (object of class \code{"ppp"}).}
\item{power}{Numeric. Power of distance used in the weighting.}
\item{at}{
Character string specifying whether to compute the intensity values
at a grid of pixel locations (\code{at="pixels"}) or
only at the points of \code{X} (\code{at="points"}).
String is partially matched.
}
\item{\dots}{Arguments passed to \code{\link{as.mask}}
to control the pixel resolution of the result.}
\item{se}{
Logical value specifying whether to calculate a standard error.
}
}
\details{
This function performs spatial smoothing of numeric values
observed at a set of irregular locations.
Smoothing is performed by inverse distance weighting. If the
observed values are \eqn{v_1,\ldots,v_n}{v[1],...,v[n]}
at locations \eqn{x_1,\ldots,x_n}{x[1],...,x[n]} respectively,
then the smoothed value at a location \eqn{u} is
\deqn{
g(u) = \frac{\sum_i w_i v_i}{\sum_i w_i}
}{
g(u) = (sum of w[i] * v[i])/(sum of w[i])
}
where the weights are the inverse \eqn{p}-th powers of distance,
\deqn{
w_i = \frac 1 {d(u,x_i)^p}
}{
w[i] = 1/d(u,x[i])^p
}
where \eqn{d(u,x_i) = ||u - x_i||}{d(u,x[i])}
is the Euclidean distance from \eqn{u} to \eqn{x_i}{x[i]}.
The argument \code{X} must be a marked point pattern (object
of class \code{"ppp"}, see \code{\link{ppp.object}}).
The points of the pattern are taken to be the
observation locations \eqn{x_i}{x[i]}, and the marks of the pattern
are taken to be the numeric values \eqn{v_i}{v[i]} observed at these
locations.
The marks are allowed to be a data frame.
Then the smoothing procedure is applied to each
column of marks.
If \code{at="pixels"} (the default), the smoothed mark value
is calculated at a grid of pixels, and the result is a pixel image.
The arguments \code{\dots} control the pixel resolution.
See \code{\link{as.mask}}.
If \code{at="points"}, the smoothed mark values are calculated
at the data points only, using a leave-one-out rule (the mark value
at a data point is excluded when calculating the smoothed value
for that point).
An estimate of standard error is also calculated, if \code{se=TRUE}.
The calculation assumes that the data point locations are fixed,
that is, the standard error only takes into account the variability
in the mark values, and not the variability due to randomness of the
data point locations.
An alternative to inverse-distance weighting is kernel smoothing,
which is performed by \code{\link{Smooth.ppp}}.
}
\value{
\emph{If \code{X} has a single column of marks:}
\itemize{
\item
If \code{at="pixels"} (the default), the result is
a pixel image (object of class \code{"im"}).
Pixel values are values of the interpolated function.
\item
If \code{at="points"}, the result is a numeric vector
of length equal to the number of points in \code{X}.
Entries are values of the interpolated function at the points of \code{X}.
}
\emph{If \code{X} has a data frame of marks:}
\itemize{
\item
If \code{at="pixels"} (the default), the result is a named list of
pixel images (object of class \code{"im"}). There is one
image for each column of marks. This list also belongs to
the class \code{"solist"}, for which there is a plot method.
\item
If \code{at="points"}, the result is a data frame
with one row for each point of \code{X},
and one column for each column of marks.
Entries are values of the interpolated function at the points of \code{X}.
}
If \code{se=TRUE}, then the result is a list
with two entries named \code{estimate} and \code{SE}, which
each have the format described above.
}
\seealso{
\code{\link{density.ppp}},
\code{\link{ppp.object}},
\code{\link{im.object}}.
See \code{\link{Smooth.ppp}} for kernel smoothing
and \code{\link{nnmark}} for nearest-neighbour interpolation.
To perform other kinds of interpolation, see also the \code{akima} package.
}
\examples{
# data frame of marks: trees marked by diameter and height
plot(idw(finpines))
idw(finpines, at="points")[1:5,]
plot(idw(finpines, se=TRUE)$SE)
idw(finpines, at="points", se=TRUE)$SE[1:5, ]
}
\references{
Shepard, D. (1968) A two-dimensional interpolation function for
irregularly-spaced data.
\emph{Proceedings of the 1968 ACM National Conference},
1968, pages 517--524. DOI: 10.1145/800186.810616
}
\author{
\spatstatAuthors.
Variance calculation by Andrew P Wheeler with modifications by
Adrian Baddeley.
}
\keyword{spatial}
\keyword{methods}
\keyword{smooth}
|
batch.file.initial.lines<-c(
"#!/bin/bash",
"#SBATCH --job-name=spueuc",
"#SBATCH --time=96:00:00",
"#SBATCH --nodes=1",
#"#SBATCH --ntasks-per-node=20",
"#SBATCH --cpus-per-task=1",
"#SBATCH --mem=2500",
"",
"module load R/3.4.0"
)
if(Sys.info()["sysname"]=="Linux"){
# wd<-"/data/kim079/model_optimisation_framework_v2"
wd<-"/datasets/work/LW_TVD_MDBA_WORK/8_Working/7_Shaun/data_backup/kim079/model_optimisation_framework_v2"
} else {
# wd<-"C:/Users/kim079/Documents/model_optimisation_framework"
# wd<-"//pearceydata.csiro.au/data/kim079/model_optimisation_framework_v2"
wd<-"//gpfs2-cbr.san.csiro.au/lw_tvd_mdba_work/8_Working/7_Shaun/data_backup/kim079/model_optimisation_framework_v2"
}
remove_for_linux<-"//pearceydata.csiro.au|//pearceyflush1.csiro.au|//pearceyflush2.csiro.au"
replace_this<-"//gpfs2-cbr.san.csiro.au/lw_tvd_mdba_work" #"//lw-osm-03-cdc.it.csiro.au/OSM_CBR_LW_TVD_MDBA_work"
replace_with<-"/datasets/work/LW_TVD_MDBA_WORK" #"/OSM/CBR/LW_TVD_MDBA/work"
preprocess_dir<-"output/gr4j.calib.param.state.all.sites.preprocess"
# output_dir<-"output/gibbs_sampler_param_and_state_uncertainty_on_state_errors_real_data_split_periods_all_sites_state_values"
batch.write.dir<-"scripts/SPUE_generic_sites_seeds_upcov"
setwd(wd)
simulation_data_files<-list.files(preprocess_dir,pattern = "state_error_simulation_data_",full.names = T)
all_ids<-gsub("state_error_simulation_data_|.csv","",basename(simulation_data_files))
remove_sites<-"110014|415202|415214"
all_ids<-all_ids[grep(remove_sites,all_ids,invert = T)]
# dir.create(output_dir, showWarnings = F)
dir.create(batch.write.dir, showWarnings = F)
all_batch_names<-c()
for(ii in 1:4){
for(i in 1:length(all_ids)){
output_dir<-paste0("output/SPUE_all_sites_upcov_seed_",ii)
command<-rep(paste0("Rscript /datasets/work/LW_TVD_MDBA_WORK/8_Working/7_Shaun/data_backup/kim079/model_optimisation_framework_v2/scripts/SPUE_generic_sites_seeds_upcov.r ",
"\"",all_ids[i],"\" ",output_dir),11)
batch.file.lines<-c(batch.file.initial.lines,command)
batch_fn<-paste0(batch.write.dir,"/",all_ids[i],"_seed_",ii)
all_batch_names<-c(all_batch_names,basename(batch_fn))
batch.file.lines<-gsub(remove_for_linux,"",batch.file.lines)
batch.file.lines<-gsub(replace_this,replace_with,batch.file.lines)
writeLines(batch.file.lines,batch_fn)
}
}
batch_runner_fn<-paste0(batch.write.dir,"/batch_runner.sh")
all_cluster_lines<-c(paste("dos2unix",all_batch_names),paste("sbatch",all_batch_names))
writeLines(all_cluster_lines,batch_runner_fn)
# batch_cancel<-paste("scancel -u kim079 -n",all_job_names)
# writeLines(batch_cancel,paste0(batch.file.write.dir,"/kill_all_jobs"))
|
/multi-site_study/scripts/SPUE/SPUE_generic_sites_seeds_upcov_gen_bat.r
|
no_license
|
shaunkim079/SPUE
|
R
| false
| false
| 2,724
|
r
|
batch.file.initial.lines<-c(
"#!/bin/bash",
"#SBATCH --job-name=spueuc",
"#SBATCH --time=96:00:00",
"#SBATCH --nodes=1",
#"#SBATCH --ntasks-per-node=20",
"#SBATCH --cpus-per-task=1",
"#SBATCH --mem=2500",
"",
"module load R/3.4.0"
)
if(Sys.info()["sysname"]=="Linux"){
# wd<-"/data/kim079/model_optimisation_framework_v2"
wd<-"/datasets/work/LW_TVD_MDBA_WORK/8_Working/7_Shaun/data_backup/kim079/model_optimisation_framework_v2"
} else {
# wd<-"C:/Users/kim079/Documents/model_optimisation_framework"
# wd<-"//pearceydata.csiro.au/data/kim079/model_optimisation_framework_v2"
wd<-"//gpfs2-cbr.san.csiro.au/lw_tvd_mdba_work/8_Working/7_Shaun/data_backup/kim079/model_optimisation_framework_v2"
}
remove_for_linux<-"//pearceydata.csiro.au|//pearceyflush1.csiro.au|//pearceyflush2.csiro.au"
replace_this<-"//gpfs2-cbr.san.csiro.au/lw_tvd_mdba_work" #"//lw-osm-03-cdc.it.csiro.au/OSM_CBR_LW_TVD_MDBA_work"
replace_with<-"/datasets/work/LW_TVD_MDBA_WORK" #"/OSM/CBR/LW_TVD_MDBA/work"
preprocess_dir<-"output/gr4j.calib.param.state.all.sites.preprocess"
# output_dir<-"output/gibbs_sampler_param_and_state_uncertainty_on_state_errors_real_data_split_periods_all_sites_state_values"
batch.write.dir<-"scripts/SPUE_generic_sites_seeds_upcov"
setwd(wd)
simulation_data_files<-list.files(preprocess_dir,pattern = "state_error_simulation_data_",full.names = T)
all_ids<-gsub("state_error_simulation_data_|.csv","",basename(simulation_data_files))
remove_sites<-"110014|415202|415214"
all_ids<-all_ids[grep(remove_sites,all_ids,invert = T)]
# dir.create(output_dir, showWarnings = F)
dir.create(batch.write.dir, showWarnings = F)
all_batch_names<-c()
for(ii in 1:4){
for(i in 1:length(all_ids)){
output_dir<-paste0("output/SPUE_all_sites_upcov_seed_",ii)
command<-rep(paste0("Rscript /datasets/work/LW_TVD_MDBA_WORK/8_Working/7_Shaun/data_backup/kim079/model_optimisation_framework_v2/scripts/SPUE_generic_sites_seeds_upcov.r ",
"\"",all_ids[i],"\" ",output_dir),11)
batch.file.lines<-c(batch.file.initial.lines,command)
batch_fn<-paste0(batch.write.dir,"/",all_ids[i],"_seed_",ii)
all_batch_names<-c(all_batch_names,basename(batch_fn))
batch.file.lines<-gsub(remove_for_linux,"",batch.file.lines)
batch.file.lines<-gsub(replace_this,replace_with,batch.file.lines)
writeLines(batch.file.lines,batch_fn)
}
}
batch_runner_fn<-paste0(batch.write.dir,"/batch_runner.sh")
all_cluster_lines<-c(paste("dos2unix",all_batch_names),paste("sbatch",all_batch_names))
writeLines(all_cluster_lines,batch_runner_fn)
# batch_cancel<-paste("scancel -u kim079 -n",all_job_names)
# writeLines(batch_cancel,paste0(batch.file.write.dir,"/kill_all_jobs"))
|
###################################################
### code chunk number 1: RUNFIRST
###################################################
library(MARSS)
options(width = 60)
options(prompt = " ", continue = " ")
op <- par(no.readonly = TRUE)
|
/inst/userguide/figures/CS5--RUNFIRST.R
|
permissive
|
nwfsc-timeseries/MARSS
|
R
| false
| false
| 243
|
r
|
###################################################
### code chunk number 1: RUNFIRST
###################################################
library(MARSS)
options(width = 60)
options(prompt = " ", continue = " ")
op <- par(no.readonly = TRUE)
|
##chapter 4
library(readr)
#read csv data
ozone = read_csv('US EPA data 2017.csv')
#replace space as . for columns name
names(ozone) <- make.names(names(ozone))
#check numbers of rows and columns
nrow(ozone)
ncol(ozone)
#check detail of the data set
str(ozone)
#look at top and bottom of the data
head(ozone)
tail(ozone)
#counting elements for a column
table(ozone$Sample.Duration)
#use dplyr to look into data
library(dplyr)
filter(ozone, Sample.Duration == "24 HOUR") %>%
select(State.Name, County.Name, Arithmetic.Mean)
#convert the data to dataframe
filter(ozone, Sample.Duration == "24 HOUR") %>%
select(State.Name, County.Name, Arithmetic.Mean) %>%
as.data.frame
#find unique records
select(ozone, State.Name) %>% unique %>% nrow
unique(ozone$State.Name)
#summarise data
ranking <- group_by(ozone, State.Name, County.Name) %>%
summarize(ozone = mean(Arithmetic.Mean)) %>%
as.data.frame %>% arrange(desc(ozone))
|
/code_portfolio/week2/EDA_checlist.R
|
no_license
|
lydiakan310/analytic
|
R
| false
| false
| 938
|
r
|
##chapter 4
library(readr)
#read csv data
ozone = read_csv('US EPA data 2017.csv')
#replace space as . for columns name
names(ozone) <- make.names(names(ozone))
#check numbers of rows and columns
nrow(ozone)
ncol(ozone)
#check detail of the data set
str(ozone)
#look at top and bottom of the data
head(ozone)
tail(ozone)
#counting elements for a column
table(ozone$Sample.Duration)
#use dplyr to look into data
library(dplyr)
filter(ozone, Sample.Duration == "24 HOUR") %>%
select(State.Name, County.Name, Arithmetic.Mean)
#convert the data to dataframe
filter(ozone, Sample.Duration == "24 HOUR") %>%
select(State.Name, County.Name, Arithmetic.Mean) %>%
as.data.frame
#find unique records
select(ozone, State.Name) %>% unique %>% nrow
unique(ozone$State.Name)
#summarise data
ranking <- group_by(ozone, State.Name, County.Name) %>%
summarize(ozone = mean(Arithmetic.Mean)) %>%
as.data.frame %>% arrange(desc(ozone))
|
# CART trees similar & slightly worse than log reg
# Much easier to understand
# Selects only significant variables
library(rpart)
library(rpart.plot)
# Regression Tree
# Remove method = "class" for numerical regression output
# add cp or minbucket to adjust tree
CARTmodel = rpart(targetfield ~ inputfields, data=dataset, method = "class")
# Plot tree and see summary
prp(CARTmodel)
summary(CARTmodel)
# predict
# remove type = "class" to generate:
# probabilities if model's method = class
# numbers if model's method was not defined
pred <- predict(CARTmodel, newdata = test, type = "class")
predtable <- table(test$targetfield,pred)
|
/Common Scripts/CART_trees.R
|
no_license
|
umarmf/AnalyticsEdgeAssignments
|
R
| false
| false
| 640
|
r
|
# CART trees similar & slightly worse than log reg
# Much easier to understand
# Selects only significant variables
library(rpart)
library(rpart.plot)
# Regression Tree
# Remove method = "class" for numerical regression output
# add cp or minbucket to adjust tree
CARTmodel = rpart(targetfield ~ inputfields, data=dataset, method = "class")
# Plot tree and see summary
prp(CARTmodel)
summary(CARTmodel)
# predict
# remove type = "class" to generate:
# probabilities if model's method = class
# numbers if model's method was not defined
pred <- predict(CARTmodel, newdata = test, type = "class")
predtable <- table(test$targetfield,pred)
|
#' recodeTable Function
#'
#' This function allows us to create recode table
#' @param
#' @keywords
#' @export
#' @examples
#' recodeTable()
recodeTable <- function(df,path){
# column name to camel case
names(df) <- rapportools::tocamel(tolower(names(df)), upper=FALSE)
# summary stats
sumStats<-psych::describe(df)
sumStats$varname <- rownames(sumStats)
names(sumStats)[names(sumStats)=="vars"]<-"varindex"
sumStats <- sumStats[,c("varname","varindex","n","mean","sd","min","median","max","range")]
# prepare column
column <- data.frame(
names(df),sapply(df, class)
)
names(column) <- c("variableName","variableTypeOriginal")
row.names(column) <- seq_along(df)
column$variableTypeNew <- NA
recode <- data.frame(
variableIndex = as.numeric(),
variableName = as.character(),
variableValueOriginal = as.character(),
variableValueRecode = as.character()
)
recodeMaster <- recode
for (i in seq_along(df)){
# for (i in c(63,65)){
varName <- names(df)[i]
if (is.factor(df[,varName])) {
n = nlevels(df[,varName])
recode <- data.frame(
variableIndex = rep(as.character(i),n),
variableName = rep(varName,n),
variableValueOriginal = unique(df[,varName]),
variableValueRecode = as.character(rep("",n))
)
recodeMaster <- rbind(recodeMaster, recode)
}
}
write.csv(recodeMaster, paste0(path,"/","recodeTable.csv",sep=""), row.names=FALSE)
}
|
/R/recodeTable.R
|
no_license
|
yubinx/dataPrepAid
|
R
| false
| false
| 1,497
|
r
|
#' recodeTable Function
#'
#' This function allows us to create recode table
#' @param
#' @keywords
#' @export
#' @examples
#' recodeTable()
recodeTable <- function(df,path){
# column name to camel case
names(df) <- rapportools::tocamel(tolower(names(df)), upper=FALSE)
# summary stats
sumStats<-psych::describe(df)
sumStats$varname <- rownames(sumStats)
names(sumStats)[names(sumStats)=="vars"]<-"varindex"
sumStats <- sumStats[,c("varname","varindex","n","mean","sd","min","median","max","range")]
# prepare column
column <- data.frame(
names(df),sapply(df, class)
)
names(column) <- c("variableName","variableTypeOriginal")
row.names(column) <- seq_along(df)
column$variableTypeNew <- NA
recode <- data.frame(
variableIndex = as.numeric(),
variableName = as.character(),
variableValueOriginal = as.character(),
variableValueRecode = as.character()
)
recodeMaster <- recode
for (i in seq_along(df)){
# for (i in c(63,65)){
varName <- names(df)[i]
if (is.factor(df[,varName])) {
n = nlevels(df[,varName])
recode <- data.frame(
variableIndex = rep(as.character(i),n),
variableName = rep(varName,n),
variableValueOriginal = unique(df[,varName]),
variableValueRecode = as.character(rep("",n))
)
recodeMaster <- rbind(recodeMaster, recode)
}
}
write.csv(recodeMaster, paste0(path,"/","recodeTable.csv",sep=""), row.names=FALSE)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.