content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/assign_parameters.R
\name{assign_parameters.storage}
\alias{assign_parameters.storage}
\title{conversion helper}
\usage{
\method{assign_parameters}{storage}(x, infiltration = NULL,
subcatchment = NULL, subcatchment_typologies = NULL,
conduit_material = NULL, junction_parameters = NULL)
}
\description{
conversion helper
}
\keyword{internal}
| /man/assign_parameters.storage.Rd | no_license | SumathyS/swmmr | R | false | true | 424 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/assign_parameters.R
\name{assign_parameters.storage}
\alias{assign_parameters.storage}
\title{conversion helper}
\usage{
\method{assign_parameters}{storage}(x, infiltration = NULL,
subcatchment = NULL, subcatchment_typologies = NULL,
conduit_material = NULL, junction_parameters = NULL)
}
\description{
conversion helper
}
\keyword{internal}
|
install.packages("httr")
install.packages("jsonlite")
library(httr)
library(jsonlite)
#require()
print("hello")
endpoint <- "https://api.openweathermap.org/data/2.5/weather?q=Warszawa&appid=1765994b51ed366c506d5dc0d0b07b77"
getWeather <- GET(endpoint)
weatherText<- content(getWeather,"text")
weatherJson<- fromJSON(weatherText,flatten = TRUE)
weatherDF<-as.data.frame(weatherJson)
View(weatherDF)
| /firstscript.R | no_license | bartw21/rzajecia1 | R | false | false | 399 | r | install.packages("httr")
install.packages("jsonlite")
library(httr)
library(jsonlite)
#require()
print("hello")
endpoint <- "https://api.openweathermap.org/data/2.5/weather?q=Warszawa&appid=1765994b51ed366c506d5dc0d0b07b77"
getWeather <- GET(endpoint)
weatherText<- content(getWeather,"text")
weatherJson<- fromJSON(weatherText,flatten = TRUE)
weatherDF<-as.data.frame(weatherJson)
View(weatherDF)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dimension_reduction.R
\name{isomds}
\alias{isomds}
\title{Conveinience function to run isoMDS on a SingleCellExperiment object.}
\usage{
isomds(object, exprs_values, features = NULL, method = "spearman", ...)
}
\arguments{
\item{object}{A SingleCellExperiment object.}
\item{exprs_values}{String indicating which assay contains the data that should be used to perform isoMDS.}
\item{features}{A character vector (of feature names), a logical vector or numeric vector (of indices) specifying the features to use for isoMDS. The default of NULL will use all features.}
\item{method}{A character string specifying the method to be used to calulate a dissimilarity structure using WGCNA::cor.}
\item{...}{Additional parameters to be passed on to MASS::isoMDS.}
}
\value{
A matrix with the k-dimensional embedding.
}
\description{
Conveinience function to run isoMDS on a SingleCellExperiment object.
}
| /man/isomds.Rd | permissive | jenzopr/singlecellutils | R | false | true | 980 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dimension_reduction.R
\name{isomds}
\alias{isomds}
\title{Conveinience function to run isoMDS on a SingleCellExperiment object.}
\usage{
isomds(object, exprs_values, features = NULL, method = "spearman", ...)
}
\arguments{
\item{object}{A SingleCellExperiment object.}
\item{exprs_values}{String indicating which assay contains the data that should be used to perform isoMDS.}
\item{features}{A character vector (of feature names), a logical vector or numeric vector (of indices) specifying the features to use for isoMDS. The default of NULL will use all features.}
\item{method}{A character string specifying the method to be used to calulate a dissimilarity structure using WGCNA::cor.}
\item{...}{Additional parameters to be passed on to MASS::isoMDS.}
}
\value{
A matrix with the k-dimensional embedding.
}
\description{
Conveinience function to run isoMDS on a SingleCellExperiment object.
}
|
library(shiny)
fillmap<-function(map, figtitle, y , n.col, bk="e", cuts,legendtxt="",
leg.loc="bottomright",leg.cex=1.5,main.cex=1.5,main.line=-2,leg.horiz=F,map.lty=1){
if(bk=="q"){if (min(y)<min(y) | max(y)>max(y)){
print("The minimum or maximum values of y fall outside of those for y")
} else {p <- seq(0,1, length=n.col+1)
br <- round(quantile(y, probs=p),2)}}
if(bk=="e"){if (min(y)<min(y) | max(y)>max(y)){
print("The minimum or maximum values of y fall outside of those for y")
} else {br <- round(seq(min(y), max(y), length=n.col+1),6)}}
if(bk=="c"){if (length(cuts)!= (n.col+1)) {cat("Cut off and color categories
do not match. ", "\n")
break} else {br <- cuts} }
# 0: dark 1: light light Current shading ranges from darkest to light gray white (to distinguish with lakes).
shading<-gray(rev(0:(n.col-1)/(n.col-1)))
#shading<-hsv(.6,alpha=0:(n.col-1)/(n.col-1))
y.grp<-findInterval(y, vec=br, rightmost.closed = TRUE, all.inside = TRUE)
y.shad<-shading[y.grp]
plot(map,col=y.shad,axes=F, lty=map.lty)
title(main=figtitle,cex.main=main.cex,line=main.line)
br<-round(br, 2)
if (is.na(legendtxt[1])){print("No legend specifed")
} else if (legendtxt[1]==""){
leg.txt<-paste("[",br[n.col],",",br[n.col+1],"]",sep="")
for(j in (n.col-1):1){
leg.txt<-append(leg.txt,paste("[",br[j],",",br[j+1],")",sep="")) }
leg.txt<-rev(leg.txt)
legend(leg.loc,legend=leg.txt,fill=shading,cex=leg.cex,ncol=1,bty="n",
horiz=leg.horiz)
} else if (length(legendtxt) != n.col){cat("Length of lengendtxt must equal
n.col", "\n")
break
} else {leg.txt<-legendtxt
legend(leg.loc,legend=leg.txt,fill=shading,cex=leg.cex,ncol=1,bty="n",
horiz=leg.horiz)}
}
fillmap2<-function(map, figtitle, y , leg.loc="beside", y.scl=NULL,
main.cex=1.5,main.line=0,map.lty=1,leg.rnd=0,
leg.cex=1){
# 0: dark 1: light light Current shading ranges from darkest to light gray white (to distinguish with lakes).
y.uq=sort(unique(c(y,y.scl)))
cols<-viridis(length(y.uq),direction=-1)
shading=y
for (i in 1:length(y)){
shading[i]<-cols[which(y.uq==y[i])]
}
par(mar=c(0,0,2,0))
if (leg.loc=="beside"){
layout(matrix(1:2,ncol=2),width=c(.8,.2))
} else
if (leg.loc=="below"){
layout(matrix(1:2,nrow=2),height=c(.6,.4))
} else (print("leg.loc options are below or beside"))
plot(map,col=shading,axes=F, lty=map.lty)
title(main=figtitle,cex.main=main.cex,line=main.line)
par(mar=c(5, 4, 4, 2) + 0.1)
plot(c(0,2),c(0,1),type = 'n', axes = F,xlab = '', ylab = '', main = '')
cols.5=cols[seq(1,length(y.uq),length.out=5)]
lab.5=cols.5
for (i in 1:5){lab.5[i]=y.uq[which(cols==cols.5[i])[1]]}
lab.5=round(as.numeric(lab.5),leg.rnd)
par(mar=c(0,0,0,0))
if (leg.loc=="beside"){
legend_image <- as.raster(matrix(cols, ncol=1))
text(x=1.6,
y = seq(0,length(y.uq),length.out=5)/length(y.uq),
labels = rev(lab.5), cex=leg.cex)
rasterImage(legend_image, 0, 0, 1,1)
} else{
legend_image <- as.raster(matrix(cols, nrow=1))
text(y=-0.25,
x = seq(0,length(y.uq),length.out=5)/(length(y.uq)*.5),
labels = lab.5, cex=leg.cex)
rasterImage(legend_image, 0, 0, 2,1)
}
}
library(INLA)
#library(fillmap)
library(rgdal)
library(spdep)
library(maptools)
library(corrplot)
library(visdat)
library(viridis)
d.inla = read.csv("data\\d.INLA.csv")
data = read.csv("data\\data1.csv")
NHtracts = readOGR("data\\NHtracts\\NHtracts.shp")
ui = (fluidPage(
titlePanel("2010-2018 WPD Arrest Data"),
sidebarLayout(
sidebarPanel(
sliderInput("year", label="Year",
min=2010,max=2018,value=2010,sep="",animate=animationOptions(interval=500,loop=TRUE)),
radioButtons("data",label="Data:",c("Total Arrests","White Only Arrests","Black Only Arrests")),
radioButtons("adj",label="Data Adjustment:",c("None","As a Percent of the Population","As a Percent of Total Arrests",
"Standardized Incidence Ratio","Poisson Regression")),
br(),
div(img(src = "Handcuffs.jpg", height = 70, width = 150), style="text-align: center;"),
br(),
div(p(a("Cape Fear Collective",
href = "https://capefearcollective.org/")), style="text-align: center;")
),
mainPanel(
textOutput("text"),
plotOutput("map"),
tableOutput("table"))
)))
server = function(input,output){
output$map = renderPlot({
if(input$adj=='None'){
if(input$data=='Total Arrests'){
fillmap2(NHtracts, paste(input$year,input$data),data$arrests_total[which(data$year == input$year)], map.lty = 0,leg.rnd = 2,leg.loc = 'below', y.scl = data$arrests_total)
}
if(input$data=='White Only Arrests'){
fillmap2(NHtracts, paste(input$year,input$data),data$arrests_W[which(data$year == input$year)], map.lty = 0,leg.rnd = 2,leg.loc = 'below', y.scl = data$arrests_W)
}
if(input$data=='Black Only Arrests'){
fillmap2(NHtracts, paste(input$year,input$data),data$arrests_B[which(data$year == input$year)], map.lty = 0,leg.rnd = 2,leg.loc = 'below', y.scl = data$arrests_B)
}
}
if(input$adj=='As a Percent of Total Arrests'){
if(input$data=='Total Arrests'){
fillmap2(NHtracts, paste(input$year,input$data),data$t_ar_pct[which(data$year == input$year)], map.lty = 0,leg.rnd = 2,leg.loc = 'below', y.scl = data$t_ar_pct)
}
if(input$data=='White Only Arrests'){
fillmap2(NHtracts, paste(input$year,input$data),data$w_ar_pct[which(data$year == input$year)], map.lty = 0,leg.rnd = 2,leg.loc = 'below', y.scl = data$w_ar_pct)
}
if(input$data=='Black Only Arrests'){
fillmap2(NHtracts, paste(input$year,input$data),data$b_ar_pct[which(data$year == input$year)], map.lty = 0,leg.rnd = 2,leg.loc = 'below', y.scl = data$b_ar_pct)
}
}
if(input$adj=='As a Percent of the Population'){
if(input$data=='Total Arrests'){
fillmap2(NHtracts, paste(input$year,input$data),data$ttt_ar_pct[which(data$year == input$year)], map.lty = 0,leg.rnd = 2,leg.loc = 'below', y.scl = data$ttt_ar_pct)
}
if(input$data=='White Only Arrests'){
fillmap2(NHtracts, paste(input$year,input$data),data$ww_ar_pct[which(data$year == input$year)], map.lty = 0,leg.rnd = 2,leg.loc = 'below', y.scl = data$ww_ar_pct)
}
if(input$data=='Black Only Arrests'){
fillmap2(NHtracts, paste(input$year,input$data),data$bb_ar_pct[which(data$year == input$year)], map.lty = 0,leg.rnd = 2,leg.loc = 'below', y.scl = data$bb_ar_pct)
}
}
if(input$adj=='Standardized Incidence Ratio'){
if(input$data=='Total Arrests'){
fillmap2(NHtracts, paste(input$year,input$data),data$sir_tot[which(data$year == input$year)], map.lty = 0,leg.rnd = 2,leg.loc = 'below', y.scl = data$sir_tot)
}
if(input$data=='White Only Arrests'){
fillmap2(NHtracts, paste(input$year,input$data),data$sir_w[which(data$year == input$year)], map.lty = 0,leg.rnd = 2,leg.loc = 'below', y.scl = data$sir_w)
}
if(input$data=='Black Only Arrests'){
fillmap2(NHtracts, paste(input$year,input$data),data$sir_b[which(data$year == input$year)], map.lty = 0,leg.rnd = 2,leg.loc = 'below', y.scl = data$sir_b)
}
}
if(input$adj=='Poisson Regression'){
if(input$data=='Total Arrests'){
fillmap2(NHtracts, paste(input$year,input$data),exp(d.inla$res[which(data$year == input$year)]), map.lty = 0,leg.rnd = 2,leg.loc = 'below', y.scl = exp(d.inla$res))
}
if(input$data=='White Only Arrests'){
fillmap2(NHtracts, paste(input$year,input$data),exp(d.inla$res2[which(data$year == input$year)]), map.lty = 0,leg.rnd = 2,leg.loc = 'below', y.scl = exp(d.inla$res2))
}
if(input$data=='Black Only Arrests'){
fillmap2(NHtracts, paste(input$year,input$data),exp(d.inla$res1[which(data$year == input$year)]), map.lty = 0,leg.rnd = 2,leg.loc = 'below', y.scl = exp(d.inla$res))
}
}
}
)
}
shinyApp(ui = ui, server = server) | /App.R | no_license | sethticles/UNCW_Arrests | R | false | false | 8,404 | r | library(shiny)
fillmap<-function(map, figtitle, y , n.col, bk="e", cuts,legendtxt="",
leg.loc="bottomright",leg.cex=1.5,main.cex=1.5,main.line=-2,leg.horiz=F,map.lty=1){
if(bk=="q"){if (min(y)<min(y) | max(y)>max(y)){
print("The minimum or maximum values of y fall outside of those for y")
} else {p <- seq(0,1, length=n.col+1)
br <- round(quantile(y, probs=p),2)}}
if(bk=="e"){if (min(y)<min(y) | max(y)>max(y)){
print("The minimum or maximum values of y fall outside of those for y")
} else {br <- round(seq(min(y), max(y), length=n.col+1),6)}}
if(bk=="c"){if (length(cuts)!= (n.col+1)) {cat("Cut off and color categories
do not match. ", "\n")
break} else {br <- cuts} }
# 0: dark 1: light light Current shading ranges from darkest to light gray white (to distinguish with lakes).
shading<-gray(rev(0:(n.col-1)/(n.col-1)))
#shading<-hsv(.6,alpha=0:(n.col-1)/(n.col-1))
y.grp<-findInterval(y, vec=br, rightmost.closed = TRUE, all.inside = TRUE)
y.shad<-shading[y.grp]
plot(map,col=y.shad,axes=F, lty=map.lty)
title(main=figtitle,cex.main=main.cex,line=main.line)
br<-round(br, 2)
if (is.na(legendtxt[1])){print("No legend specifed")
} else if (legendtxt[1]==""){
leg.txt<-paste("[",br[n.col],",",br[n.col+1],"]",sep="")
for(j in (n.col-1):1){
leg.txt<-append(leg.txt,paste("[",br[j],",",br[j+1],")",sep="")) }
leg.txt<-rev(leg.txt)
legend(leg.loc,legend=leg.txt,fill=shading,cex=leg.cex,ncol=1,bty="n",
horiz=leg.horiz)
} else if (length(legendtxt) != n.col){cat("Length of lengendtxt must equal
n.col", "\n")
break
} else {leg.txt<-legendtxt
legend(leg.loc,legend=leg.txt,fill=shading,cex=leg.cex,ncol=1,bty="n",
horiz=leg.horiz)}
}
fillmap2<-function(map, figtitle, y , leg.loc="beside", y.scl=NULL,
main.cex=1.5,main.line=0,map.lty=1,leg.rnd=0,
leg.cex=1){
# 0: dark 1: light light Current shading ranges from darkest to light gray white (to distinguish with lakes).
y.uq=sort(unique(c(y,y.scl)))
cols<-viridis(length(y.uq),direction=-1)
shading=y
for (i in 1:length(y)){
shading[i]<-cols[which(y.uq==y[i])]
}
par(mar=c(0,0,2,0))
if (leg.loc=="beside"){
layout(matrix(1:2,ncol=2),width=c(.8,.2))
} else
if (leg.loc=="below"){
layout(matrix(1:2,nrow=2),height=c(.6,.4))
} else (print("leg.loc options are below or beside"))
plot(map,col=shading,axes=F, lty=map.lty)
title(main=figtitle,cex.main=main.cex,line=main.line)
par(mar=c(5, 4, 4, 2) + 0.1)
plot(c(0,2),c(0,1),type = 'n', axes = F,xlab = '', ylab = '', main = '')
cols.5=cols[seq(1,length(y.uq),length.out=5)]
lab.5=cols.5
for (i in 1:5){lab.5[i]=y.uq[which(cols==cols.5[i])[1]]}
lab.5=round(as.numeric(lab.5),leg.rnd)
par(mar=c(0,0,0,0))
if (leg.loc=="beside"){
legend_image <- as.raster(matrix(cols, ncol=1))
text(x=1.6,
y = seq(0,length(y.uq),length.out=5)/length(y.uq),
labels = rev(lab.5), cex=leg.cex)
rasterImage(legend_image, 0, 0, 1,1)
} else{
legend_image <- as.raster(matrix(cols, nrow=1))
text(y=-0.25,
x = seq(0,length(y.uq),length.out=5)/(length(y.uq)*.5),
labels = lab.5, cex=leg.cex)
rasterImage(legend_image, 0, 0, 2,1)
}
}
library(INLA)
#library(fillmap)
library(rgdal)
library(spdep)
library(maptools)
library(corrplot)
library(visdat)
library(viridis)
d.inla = read.csv("data\\d.INLA.csv")
data = read.csv("data\\data1.csv")
NHtracts = readOGR("data\\NHtracts\\NHtracts.shp")
ui = (fluidPage(
titlePanel("2010-2018 WPD Arrest Data"),
sidebarLayout(
sidebarPanel(
sliderInput("year", label="Year",
min=2010,max=2018,value=2010,sep="",animate=animationOptions(interval=500,loop=TRUE)),
radioButtons("data",label="Data:",c("Total Arrests","White Only Arrests","Black Only Arrests")),
radioButtons("adj",label="Data Adjustment:",c("None","As a Percent of the Population","As a Percent of Total Arrests",
"Standardized Incidence Ratio","Poisson Regression")),
br(),
div(img(src = "Handcuffs.jpg", height = 70, width = 150), style="text-align: center;"),
br(),
div(p(a("Cape Fear Collective",
href = "https://capefearcollective.org/")), style="text-align: center;")
),
mainPanel(
textOutput("text"),
plotOutput("map"),
tableOutput("table"))
)))
server = function(input,output){
output$map = renderPlot({
if(input$adj=='None'){
if(input$data=='Total Arrests'){
fillmap2(NHtracts, paste(input$year,input$data),data$arrests_total[which(data$year == input$year)], map.lty = 0,leg.rnd = 2,leg.loc = 'below', y.scl = data$arrests_total)
}
if(input$data=='White Only Arrests'){
fillmap2(NHtracts, paste(input$year,input$data),data$arrests_W[which(data$year == input$year)], map.lty = 0,leg.rnd = 2,leg.loc = 'below', y.scl = data$arrests_W)
}
if(input$data=='Black Only Arrests'){
fillmap2(NHtracts, paste(input$year,input$data),data$arrests_B[which(data$year == input$year)], map.lty = 0,leg.rnd = 2,leg.loc = 'below', y.scl = data$arrests_B)
}
}
if(input$adj=='As a Percent of Total Arrests'){
if(input$data=='Total Arrests'){
fillmap2(NHtracts, paste(input$year,input$data),data$t_ar_pct[which(data$year == input$year)], map.lty = 0,leg.rnd = 2,leg.loc = 'below', y.scl = data$t_ar_pct)
}
if(input$data=='White Only Arrests'){
fillmap2(NHtracts, paste(input$year,input$data),data$w_ar_pct[which(data$year == input$year)], map.lty = 0,leg.rnd = 2,leg.loc = 'below', y.scl = data$w_ar_pct)
}
if(input$data=='Black Only Arrests'){
fillmap2(NHtracts, paste(input$year,input$data),data$b_ar_pct[which(data$year == input$year)], map.lty = 0,leg.rnd = 2,leg.loc = 'below', y.scl = data$b_ar_pct)
}
}
if(input$adj=='As a Percent of the Population'){
if(input$data=='Total Arrests'){
fillmap2(NHtracts, paste(input$year,input$data),data$ttt_ar_pct[which(data$year == input$year)], map.lty = 0,leg.rnd = 2,leg.loc = 'below', y.scl = data$ttt_ar_pct)
}
if(input$data=='White Only Arrests'){
fillmap2(NHtracts, paste(input$year,input$data),data$ww_ar_pct[which(data$year == input$year)], map.lty = 0,leg.rnd = 2,leg.loc = 'below', y.scl = data$ww_ar_pct)
}
if(input$data=='Black Only Arrests'){
fillmap2(NHtracts, paste(input$year,input$data),data$bb_ar_pct[which(data$year == input$year)], map.lty = 0,leg.rnd = 2,leg.loc = 'below', y.scl = data$bb_ar_pct)
}
}
if(input$adj=='Standardized Incidence Ratio'){
if(input$data=='Total Arrests'){
fillmap2(NHtracts, paste(input$year,input$data),data$sir_tot[which(data$year == input$year)], map.lty = 0,leg.rnd = 2,leg.loc = 'below', y.scl = data$sir_tot)
}
if(input$data=='White Only Arrests'){
fillmap2(NHtracts, paste(input$year,input$data),data$sir_w[which(data$year == input$year)], map.lty = 0,leg.rnd = 2,leg.loc = 'below', y.scl = data$sir_w)
}
if(input$data=='Black Only Arrests'){
fillmap2(NHtracts, paste(input$year,input$data),data$sir_b[which(data$year == input$year)], map.lty = 0,leg.rnd = 2,leg.loc = 'below', y.scl = data$sir_b)
}
}
if(input$adj=='Poisson Regression'){
if(input$data=='Total Arrests'){
fillmap2(NHtracts, paste(input$year,input$data),exp(d.inla$res[which(data$year == input$year)]), map.lty = 0,leg.rnd = 2,leg.loc = 'below', y.scl = exp(d.inla$res))
}
if(input$data=='White Only Arrests'){
fillmap2(NHtracts, paste(input$year,input$data),exp(d.inla$res2[which(data$year == input$year)]), map.lty = 0,leg.rnd = 2,leg.loc = 'below', y.scl = exp(d.inla$res2))
}
if(input$data=='Black Only Arrests'){
fillmap2(NHtracts, paste(input$year,input$data),exp(d.inla$res1[which(data$year == input$year)]), map.lty = 0,leg.rnd = 2,leg.loc = 'below', y.scl = exp(d.inla$res))
}
}
}
)
}
shinyApp(ui = ui, server = server) |
# lib.r ProbMetab version="1.0.0"
# Author: Misharl Monsoor ABIMS TEAM mmonsoor@sb-roscoff.fr
# Contributors: Yann Guitton and Jean-francois Martin
##Main probmetab function launch by the Galaxy ProbMetab wrapper
probmetab = function(xa, xaP, xaN, variableMetadata, variableMetadataP, variableMetadataN, listArguments){
##ONE MODE ACQUISITION##
if(listArguments[["mode_acquisition"]]=="one") {
comb=NULL
#Get the polarity from xa object
polarity=xa@polarity
#SNR option
if ("xsetnofill" %in% names(listArguments)) {
load(listArguments[["xsetnofill"]])
xsetnofill=xset
}
else{
xsetnofill=NULL
}
#Exclude samples
if ("toexclude" %in% names(listArguments)) {
toexclude=listArguments[["toexclude"]]
}
else {
toexclude=NULL
}
ionAnnot=get.annot(xa, polarity=polarity, allowMiss=listArguments[["allowMiss"]],xset=xsetnofill,toexclude=toexclude)
comb=NULL
}
##TWO MODES ACQUISITION##
#Mode annotatediffreport
else if(listArguments[["inputs_mode"]]=="two"){
##Prepare the objects that will be used for the get.annot function
comb=1
xsetPnofill=NULL
xsetNnofill=NULL
# TODO: a reactiver
#if ("xsetPnofill" %in% names(listArguments)) {
# load(listArguments[["xsetPnofill"]])
# xsetPnofill=xset
#}
#if ("xsetNnofill" %in% names(listArguments)) {
# load(listArguments[["xsetNnofill"]])
# xsetNnofill=xset
#}
# include CAMERA non-annotated compounds, and snr retrieval
# comb 2+ - used on Table 1
ionAnnotP2plus = get.annot(xaP, allowMiss=listArguments[["allowMiss"]], xset=xsetPnofill,toexclude=listArguments[["toexclude"]])
ionAnnotN2plus = get.annot(xaN, polarity="negative", allowMiss=listArguments[["allowMiss"]], xset=xsetNnofill,toexclude=listArguments[["toexclude"]])
ionAnnot = combineMolIon(ionAnnotP2plus, ionAnnotN2plus)
print(sum(ionAnnot$molIon[,3]==1))
print(sum(ionAnnot$molIon[,3]==0))
write.table(ionAnnot[1], sep="\t", quote=FALSE, row.names=FALSE, file="CombineMolIon.tsv")
#Merge variableMetadata Negative and positive acquisitions mode
#Mode combinexsannos TODO bug avec tableau issus de combinexsannos
#else {
#load(listArguments[["image_combinexsannos"]])
#image_combinexsannos=cAnnot
##Prepare the objects that will be used for the combineMolIon function
#load(listArguments[["image_pos"]])
#image_pos=xa
#ionAnnot=combineMolIon(peaklist=cAnnot, cameraobj=image_pos, polarity="pos")
#}
}
##DATABASE MATCHING##
if (listArguments[["kegg_db"]]=="KEGG"){
DB=build.database.kegg(orgID = NULL)
}
else{
table_list <<- NULL
ids=strsplit(listArguments[["kegg_db"]],",")
ids=ids[[1]]
if(length(ids)>1){
for(i in 1:length(ids)){
table_list[[i]] <- build.database.kegg(ids[i])
}
db_table=do.call("rbind",table_list)
DB=unique(db_table)
}
else{
DB=build.database.kegg(listArguments[["kegg_db"]])
}
}
#Matching des mass exactes mesurees avec les masses des compounds KEGG (pas M+H ou M-H)
reactionM = create.reactionM(DB, ionAnnot, ppm.tol=listArguments[["ppm_tol"]])
##PROBABILITY RANKING##
# number of masses with candidates inside the fixed mass window
# and masses with more than one candidate
length(unique(reactionM[reactionM[,"id"]!="unknown",1]))
sum(table(reactionM[reactionM[,"id"]!="unknown",1])>1)
#if (listArguments[["useIso"]]){
#BUG TODO
# Calculate the ratio between observed and theoretical isotopic patterns.
# If you don't have an assessment of carbon offset to carbon number prediction
# skip this step and use the reactionM as input to weigthM function.
#isoPatt < incorporate.isotopes(comb2plus, reactionM, , samp=12:23, DB=DB)
# calculate the likelihood of each mass to compound assignment using mass accuracy,and isotopic pattern, when present
#wl < weightM(isoPatt,intervals=seq(0,1000,by=500), offset=c(3.115712, 3.434146, 2.350798))
#isoPatt=incorporate.isotopes(ionAnnot, reactionM,comb=comb,var=listArguments[["var"]],DB=DB)
#wl = weightM(reactionM, useIso=true)
#}
#else {
#wl = weightM(reactionM, useIso=FALSE)
#}
wl =weightM(reactionM, useIso=FALSE)
w = design.connection(reactionM)
# Probability calculations
x = 1:ncol(wl$wm)
y = 1:nrow(wl$wm)
conn = gibbs.samp(x, y, 5000, w, wl$wm)
ansConn = export.class.table(conn, reactionM, ionAnnot, DB=DB,html=listArguments[["html"]],filename="AnalysisExample",prob=listArguments[["prob"]])
if(listArguments[["html"]]){
#Zip the EICS plot
system(paste('zip -rq "Analysis_Report.zip" "AnalysisExample_fig"'))
}
# calculate the correlations and partial correlations and cross reference then with reactions
mw=which(w==1,arr.ind=TRUE)
#reac2cor function : Use the intensity of putative molecules in repeated samples to calculate correlations and partial
#correlation in a user defined threshold of false discovery rate for significance testing. After the
#correlation test the function also overlay significant correlations with all putative reactions between
#two masses.
#It generates a list of estimated correlations and reactions.
corList=reac2cor(mw,ansConn$classTable,listArguments[["opt"]],listArguments[["corths"]],listArguments[["corprob"]],listArguments[["pcorprob"]])
ans=list("ansConn"=ansConn,"corList"=corList)
#Generate the siff table for CytoScape
cytoscape_output(corList,ansConn)
#Execute the merge_probmetab function to merge the variableMetadata table and annotations from ProbMetab results
if(listArguments[["mode_acquisition"]]=="one") {
#Retrocompatibility with previous annotateDiffreport variableMetadata dataframe (must replace mzmed column by mz, and rtmed by rt)
names(variableMetadata)[names(variableMetadata)=="mzmed"] <- "mz"
names(variableMetadata)[names(variableMetadata)=="rtmed"] <- "rt"
variableM=merge_probmetab(variableMetadata, ansConn)
write.table(variableM, sep="\t", quote=FALSE, row.names=FALSE, file="variableMetadata.tsv")
} else if (listArguments[["mode_acquisition"]]=="two") {
#Retrocompatibility with previous annotateDiffreport variableMetadata dataframe (must replace mzmed column by mz, and rtmed by rt)
names(variableMetadataP)[names(variableMetadataP)=="mzmed"] <- "mz"
names(variableMetadataP)[names(variableMetadataP)=="rtmed"] <- "rt"
names(variableMetadataN)[names(variableMetadataN)=="mzmed"] <- "mz"
names(variableMetadataN)[names(variableMetadataN)=="rtmed"] <- "rt"
variableMP=merge_probmetab(variableMetadataP, ansConn)
write.table(variableMP, sep="\t", quote=FALSE, row.names=FALSE, file="variableMetadata_Positive.tsv")
variableMN=merge_probmetab(variableMetadataN, ansConn)
write.table(variableMN, sep="\t", quote=FALSE, row.names=FALSE, file="variableMetadata_Negative.tsv")
}
return(ans)
}
##Function that generates a siff table for CytoScape
cytoscape_output=function(corList,ansConn){
signif_cor=as.data.frame(corList$signif.cor)
classTable=as.data.frame(ansConn$classTable)
#Siff table
siff_table=cbind(signif_cor["node1"],signif_cor["cor"],signif_cor["node2"])
#attribute table output for Cytoscape
## START Code part from the export2cytoscape function of ProbMetab written by Ricardo R. Silva
for (i in 1:nrow(classTable)) if (classTable[i, 1] == ""){
classTable[i, c(1, 4, 6, 7)] <- classTable[i - 1, c(1, 4, 6, 7)]
}
msel <- as.matrix(classTable[, 1:7])
msel <- cbind(msel[, 6], msel[,-6])
colnames(msel)[1] <- "Id"
msel[, 1] <- sub("^\\s+", "", msel[, 1])
colnames(msel)[1] <- "Id"
ids <- unique(msel[, 1])
attrMatrix <- matrix("", nrow = length(ids), ncol = ncol(msel)-1)
for (i in 1:length(ids)) {
attrMatrix[i, 1] <- unique(msel[msel[, 1] == ids[i],
2])
attrMatrix[i, 2] <- paste("[", paste(msel[msel[,
1] == ids[i], 3], collapse = ", "), "]", sep = "")
attrMatrix[i, 3] <- paste("[", paste(msel[msel[,
1] == ids[i], 4], collapse = ", "), "]", sep = "")
attrMatrix[i, 4] <- unique(msel[msel[, 1] == ids[i],
5])
attrMatrix[i, 5] <- paste("[", paste(msel[msel[,
1] == ids[i], 6], collapse = ", "), "]", sep = "")
attrMatrix[i, 6] <- unique(msel[msel[, 1] == ids[i],
7])
}
ids <- as.numeric(unique(msel[, 1]))
attrMatrix <- cbind(ids, attrMatrix)
colnames(attrMatrix) <- colnames(msel)
## END Code part from the export2cytoscape function of ProbMetab writieen by Ricardo R. Silva
write.table(attrMatrix, sep="\t", quote=FALSE, row.names=FALSE, file="Analysis_Report.tsv")
write.table(siff_table, sep="\t", quote=FALSE, row.names=FALSE, file="sif.tsv")
return(attrMatrix)
}
##Functions written by Jean-Francois Martin
deter_ioni <- function (aninfo, pm)
{
# determine ionisation in ProbMetab result file, used in function merge_probmetab
# input : for 1 ion, aninfo = string with m/z rt and CAMERA annotation from ProbMetab result file
# if the difference between m/z and the probmetab proposed mass is ~1 we use the sign (positive or negative) of this diference
# to define the type of ionisation
# If adduct or fragment was detected, therefore diff >>1 and so, we search for substring "+" ou "2+" ou "3+" ou "-"...
# to define the type of ionisation
# aninfo : vecteur of character resulting of the parsing(sep="#") of the probmetab annotation
if (round(abs(as.numeric(aninfo[1]) - pm),0) ==1) {
if (as.numeric(aninfo[1]) - pm <0) {esi <- "n"} else {esi <- "p"}
} else
if (!is.na(aninfo[4])) {
anstr <- aninfo[4]
# cat(anstr)
if ((grepl("]+",anstr,fixed=T)==T) || (grepl("]2+",anstr,fixed=T)==T) || (grepl("]3+",anstr,fixed=T)==T)) { esi <- "p"}
else
if ((grepl("]-",anstr,fixed=T)==T) || (grepl("]2-",anstr,fixed=T)==T) || (grepl("]3-",anstr,fixed=T)==T)) { esi <- "n"}
# cat(" ioni ",esi,"\n")
} else
{ esi <- "u"}
return(esi)
}
merge_probmetab <- function(metaVar,ansConn) {
## Parse ProbMetab information result file and merge in variable_metaData initial file
## inputs :
## metaVar : data.frame of metadataVariable input of probmetab function
## ansConn : data.frame of ProbMetab result
## output : dataframe with Probmetab results merge with variableMetadata
## Constante
## iannot : indice de la colonne annotation dans le resultat de probMetab
iannot <- 4
## definition of an unique identification of ions mz with 3 decimals and rt(sec) with 1 decimal to avoid
## duplicate ions name in the diffreport result file
ions <- paste ("M",round(metaVar$mz,3),"T",round(metaVar$rt,1),sep="")
metaVar <- data.frame(ions,metaVar)
###### Result data.frame from ProbMetab result list
an_ini <- ansConn$classTable
## Suppression of rows without mz and rt or unknown and columns of intensities
## COLUMNS SUBSCRIPTS HAVE TO BE CHECKED WITh DIFFERENT RESULTS FILES
an <- an_ini[(an_ini[,2]!="unknown"),c(1,2,3,7)]
## initialisation of vectors receiving the result of the parse of the column annotation (subscrip iannot)
mz <- rep(0,dim(an)[1])
rt <- rep(0,dim(an)[1])
propmz <- rep(0,dim(an)[1])
ioni <- rep("u",dim(an)[1])
## parse the column annotation and define ionisation mode
for (i in 1:dim(an)[1]) {
if (an[i,1] != "") {
info_mzrt <- unlist(strsplit(an[i,iannot],"#"))
propmz[i] <- as.numeric(an[i,1])
mz[i] <- as.numeric(info_mzrt[1])
rt[i] <- as.numeric(info_mzrt[2])
ioni[i] <- deter_ioni(info_mzrt,as.numeric(an[i,1]))
}
else {
propmz[i] <- as.numeric(propmz[i-1])
mz[i] <- as.numeric(mz[i-1])
rt[i] <- as.numeric(rt[i-1])
ioni[i] <- ioni[i-1]
}
}
## definition of an unique identification of ions : mz with 3 decimals and rt(sec) with 1 decimal
## The same as for the metadataVariable data.frame to match with.
ions <- paste ("M",round(mz,3),"T",round(rt,1),sep="")
an <- data.frame(ions,ioni,propmz,mz,rt,an)
## transposition of the different probmetab annotations which are in different rows in the initial result data.frame
## on only 1 row separated with a ";"
li <- as.matrix(table(an$propmz))
li <- data.frame(dimnames(li)[1],li)
dimnames(li)[[2]][1] <- "propmz"
ions <- rep("u",dim(li)[1])
propmz <- rep(0,dim(li)[1])
mpc <- rep("c",dim(li)[1])
proba <- rep("p",dim(li)[1])
c <- 0
while (c < dim(li)[1]) {
c <- c + 1
suban <- an[an$propmz==li[c,1],]
ions[c] <- as.character(suban[1,1])
propmz[c] <- as.numeric(suban[1,3])
mpc[c] <- paste(suban[,7],collapse=";")
proba[c] <- paste(as.character(suban[,8]),collapse=";")
}
## Creation of the data.frame with 1 row per ions
anc <- data.frame(ions,propmz,mpc,proba)
anc <- anc[order(anc[,1]),]
metaVarFinal <- merge(metaVar, anc, by.x=1, by.y=1, all.x=T, all.y=T)
metaVarFinal <- metaVarFinal[,-1]
#write.table(metaVarFinal,file="res.txt", sep="\t", row.names=F, quote=F)
return (metaVarFinal)
}
# RETROCOMPATIBILITE avec ancienne version de annotate
getVariableMetadata = function(xa) {
# --- variableMetadata ---
peakList=getPeaklist(xa)
peakList=cbind(groupnames(xa@xcmsSet),peakList); colnames(peakList)[1] = c("name");
variableMetadata=peakList[,!(colnames(peakList) %in% c(sampnames(xa@xcmsSet)))]
variableMetadata$name= groupnames(xa@xcmsSet)
return (variableMetadata)
}
# This function get the raw file path from the arguments
getRawfilePathFromArguments <- function(singlefile, zipfile, listArguments) {
if (!is.null(listArguments[["zipfile"]])) zipfile = listArguments[["zipfile"]]
if (!is.null(listArguments[["zipfilePositive"]])) zipfile = listArguments[["zipfilePositive"]]
if (!is.null(listArguments[["zipfileNegative"]])) zipfile = listArguments[["zipfileNegative"]]
if (!is.null(listArguments[["singlefile_galaxyPath"]])) {
singlefile_galaxyPaths = listArguments[["singlefile_galaxyPath"]];
singlefile_sampleNames = listArguments[["singlefile_sampleName"]]
}
if (!is.null(listArguments[["singlefile_galaxyPathPositive"]])) {
singlefile_galaxyPaths = listArguments[["singlefile_galaxyPathPositive"]];
singlefile_sampleNames = listArguments[["singlefile_sampleNamePositive"]]
}
if (!is.null(listArguments[["singlefile_galaxyPathNegative"]])) {
singlefile_galaxyPaths = listArguments[["singlefile_galaxyPathNegative"]];
singlefile_sampleNames = listArguments[["singlefile_sampleNameNegative"]]
}
if (exists("singlefile_galaxyPaths")){
singlefile_galaxyPaths = unlist(strsplit(singlefile_galaxyPaths,","))
singlefile_sampleNames = unlist(strsplit(singlefile_sampleNames,","))
singlefile=NULL
for (singlefile_galaxyPath_i in seq(1:length(singlefile_galaxyPaths))) {
singlefile_galaxyPath=singlefile_galaxyPaths[singlefile_galaxyPath_i]
singlefile_sampleName=singlefile_sampleNames[singlefile_galaxyPath_i]
singlefile[[singlefile_sampleName]] = singlefile_galaxyPath
}
}
return(list(zipfile=zipfile, singlefile=singlefile))
}
# This function retrieve the raw file in the working directory
# - if zipfile: unzip the file with its directory tree
# - if singlefiles: set symlink with the good filename
retrieveRawfileInTheWorkingDirectory <- function(singlefile, zipfile) {
if(!is.null(singlefile) && (length("singlefile")>0)) {
for (singlefile_sampleName in names(singlefile)) {
singlefile_galaxyPath = singlefile[[singlefile_sampleName]]
if(!file.exists(singlefile_galaxyPath)){
error_message=paste("Cannot access the sample:",singlefile_sampleName,"located:",singlefile_galaxyPath,". Please, contact your administrator ... if you have one!")
print(error_message); stop(error_message)
}
file.symlink(singlefile_galaxyPath,singlefile_sampleName)
}
directory = "."
}
if(!is.null(zipfile) && (zipfile!="")) {
if(!file.exists(zipfile)){
error_message=paste("Cannot access the Zip file:",zipfile,". Please, contact your administrator ... if you have one!")
print(error_message)
stop(error_message)
}
#list all file in the zip file
#zip_files=unzip(zipfile,list=T)[,"Name"]
#unzip
suppressWarnings(unzip(zipfile, unzip="unzip"))
#get the directory name
filesInZip=unzip(zipfile, list=T);
directories=unique(unlist(lapply(strsplit(filesInZip$Name,"/"), function(x) x[1])));
directories=directories[!(directories %in% c("__MACOSX")) & file.info(directories)$isdir]
directory = "."
if (length(directories) == 1) directory = directories
cat("files_root_directory\t",directory,"\n")
}
}
| /galaxy/probmetab/lib.r | no_license | workflow4metabolomics/probmetab | R | false | false | 17,623 | r | # lib.r ProbMetab version="1.0.0"
# Author: Misharl Monsoor ABIMS TEAM mmonsoor@sb-roscoff.fr
# Contributors: Yann Guitton and Jean-francois Martin
##Main probmetab function launch by the Galaxy ProbMetab wrapper
probmetab = function(xa, xaP, xaN, variableMetadata, variableMetadataP, variableMetadataN, listArguments){
##ONE MODE ACQUISITION##
if(listArguments[["mode_acquisition"]]=="one") {
comb=NULL
#Get the polarity from xa object
polarity=xa@polarity
#SNR option
if ("xsetnofill" %in% names(listArguments)) {
load(listArguments[["xsetnofill"]])
xsetnofill=xset
}
else{
xsetnofill=NULL
}
#Exclude samples
if ("toexclude" %in% names(listArguments)) {
toexclude=listArguments[["toexclude"]]
}
else {
toexclude=NULL
}
ionAnnot=get.annot(xa, polarity=polarity, allowMiss=listArguments[["allowMiss"]],xset=xsetnofill,toexclude=toexclude)
comb=NULL
}
##TWO MODES ACQUISITION##
#Mode annotatediffreport
else if(listArguments[["inputs_mode"]]=="two"){
##Prepare the objects that will be used for the get.annot function
comb=1
xsetPnofill=NULL
xsetNnofill=NULL
# TODO: a reactiver
#if ("xsetPnofill" %in% names(listArguments)) {
# load(listArguments[["xsetPnofill"]])
# xsetPnofill=xset
#}
#if ("xsetNnofill" %in% names(listArguments)) {
# load(listArguments[["xsetNnofill"]])
# xsetNnofill=xset
#}
# include CAMERA non-annotated compounds, and snr retrieval
# comb 2+ - used on Table 1
ionAnnotP2plus = get.annot(xaP, allowMiss=listArguments[["allowMiss"]], xset=xsetPnofill,toexclude=listArguments[["toexclude"]])
ionAnnotN2plus = get.annot(xaN, polarity="negative", allowMiss=listArguments[["allowMiss"]], xset=xsetNnofill,toexclude=listArguments[["toexclude"]])
ionAnnot = combineMolIon(ionAnnotP2plus, ionAnnotN2plus)
print(sum(ionAnnot$molIon[,3]==1))
print(sum(ionAnnot$molIon[,3]==0))
write.table(ionAnnot[1], sep="\t", quote=FALSE, row.names=FALSE, file="CombineMolIon.tsv")
#Merge variableMetadata Negative and positive acquisitions mode
#Mode combinexsannos TODO bug avec tableau issus de combinexsannos
#else {
#load(listArguments[["image_combinexsannos"]])
#image_combinexsannos=cAnnot
##Prepare the objects that will be used for the combineMolIon function
#load(listArguments[["image_pos"]])
#image_pos=xa
#ionAnnot=combineMolIon(peaklist=cAnnot, cameraobj=image_pos, polarity="pos")
#}
}
##DATABASE MATCHING##
if (listArguments[["kegg_db"]]=="KEGG"){
DB=build.database.kegg(orgID = NULL)
}
else{
table_list <<- NULL
ids=strsplit(listArguments[["kegg_db"]],",")
ids=ids[[1]]
if(length(ids)>1){
for(i in 1:length(ids)){
table_list[[i]] <- build.database.kegg(ids[i])
}
db_table=do.call("rbind",table_list)
DB=unique(db_table)
}
else{
DB=build.database.kegg(listArguments[["kegg_db"]])
}
}
#Matching des mass exactes mesurees avec les masses des compounds KEGG (pas M+H ou M-H)
reactionM = create.reactionM(DB, ionAnnot, ppm.tol=listArguments[["ppm_tol"]])
##PROBABILITY RANKING##
# number of masses with candidates inside the fixed mass window
# and masses with more than one candidate
length(unique(reactionM[reactionM[,"id"]!="unknown",1]))
sum(table(reactionM[reactionM[,"id"]!="unknown",1])>1)
#if (listArguments[["useIso"]]){
#BUG TODO
# Calculate the ratio between observed and theoretical isotopic patterns.
# If you don't have an assessment of carbon offset to carbon number prediction
# skip this step and use the reactionM as input to weigthM function.
#isoPatt < incorporate.isotopes(comb2plus, reactionM, , samp=12:23, DB=DB)
# calculate the likelihood of each mass to compound assignment using mass accuracy,and isotopic pattern, when present
#wl < weightM(isoPatt,intervals=seq(0,1000,by=500), offset=c(3.115712, 3.434146, 2.350798))
#isoPatt=incorporate.isotopes(ionAnnot, reactionM,comb=comb,var=listArguments[["var"]],DB=DB)
#wl = weightM(reactionM, useIso=true)
#}
#else {
#wl = weightM(reactionM, useIso=FALSE)
#}
wl =weightM(reactionM, useIso=FALSE)
w = design.connection(reactionM)
# Probability calculations
x = 1:ncol(wl$wm)
y = 1:nrow(wl$wm)
conn = gibbs.samp(x, y, 5000, w, wl$wm)
ansConn = export.class.table(conn, reactionM, ionAnnot, DB=DB,html=listArguments[["html"]],filename="AnalysisExample",prob=listArguments[["prob"]])
if(listArguments[["html"]]){
#Zip the EICS plot
system(paste('zip -rq "Analysis_Report.zip" "AnalysisExample_fig"'))
}
# calculate the correlations and partial correlations and cross reference then with reactions
mw=which(w==1,arr.ind=TRUE)
#reac2cor function : Use the intensity of putative molecules in repeated samples to calculate correlations and partial
#correlation in a user defined threshold of false discovery rate for significance testing. After the
#correlation test the function also overlay significant correlations with all putative reactions between
#two masses.
#It generates a list of estimated correlations and reactions.
corList=reac2cor(mw,ansConn$classTable,listArguments[["opt"]],listArguments[["corths"]],listArguments[["corprob"]],listArguments[["pcorprob"]])
ans=list("ansConn"=ansConn,"corList"=corList)
#Generate the siff table for CytoScape
cytoscape_output(corList,ansConn)
#Execute the merge_probmetab function to merge the variableMetadata table and annotations from ProbMetab results
if(listArguments[["mode_acquisition"]]=="one") {
#Retrocompatibility with previous annotateDiffreport variableMetadata dataframe (must replace mzmed column by mz, and rtmed by rt)
names(variableMetadata)[names(variableMetadata)=="mzmed"] <- "mz"
names(variableMetadata)[names(variableMetadata)=="rtmed"] <- "rt"
variableM=merge_probmetab(variableMetadata, ansConn)
write.table(variableM, sep="\t", quote=FALSE, row.names=FALSE, file="variableMetadata.tsv")
} else if (listArguments[["mode_acquisition"]]=="two") {
#Retrocompatibility with previous annotateDiffreport variableMetadata dataframe (must replace mzmed column by mz, and rtmed by rt)
names(variableMetadataP)[names(variableMetadataP)=="mzmed"] <- "mz"
names(variableMetadataP)[names(variableMetadataP)=="rtmed"] <- "rt"
names(variableMetadataN)[names(variableMetadataN)=="mzmed"] <- "mz"
names(variableMetadataN)[names(variableMetadataN)=="rtmed"] <- "rt"
variableMP=merge_probmetab(variableMetadataP, ansConn)
write.table(variableMP, sep="\t", quote=FALSE, row.names=FALSE, file="variableMetadata_Positive.tsv")
variableMN=merge_probmetab(variableMetadataN, ansConn)
write.table(variableMN, sep="\t", quote=FALSE, row.names=FALSE, file="variableMetadata_Negative.tsv")
}
return(ans)
}
##Function that generates a siff table for CytoScape
cytoscape_output=function(corList,ansConn){
signif_cor=as.data.frame(corList$signif.cor)
classTable=as.data.frame(ansConn$classTable)
#Siff table
siff_table=cbind(signif_cor["node1"],signif_cor["cor"],signif_cor["node2"])
#attribute table output for Cytoscape
## START Code part from the export2cytoscape function of ProbMetab written by Ricardo R. Silva
for (i in 1:nrow(classTable)) if (classTable[i, 1] == ""){
classTable[i, c(1, 4, 6, 7)] <- classTable[i - 1, c(1, 4, 6, 7)]
}
msel <- as.matrix(classTable[, 1:7])
msel <- cbind(msel[, 6], msel[,-6])
colnames(msel)[1] <- "Id"
msel[, 1] <- sub("^\\s+", "", msel[, 1])
colnames(msel)[1] <- "Id"
ids <- unique(msel[, 1])
attrMatrix <- matrix("", nrow = length(ids), ncol = ncol(msel)-1)
for (i in 1:length(ids)) {
attrMatrix[i, 1] <- unique(msel[msel[, 1] == ids[i],
2])
attrMatrix[i, 2] <- paste("[", paste(msel[msel[,
1] == ids[i], 3], collapse = ", "), "]", sep = "")
attrMatrix[i, 3] <- paste("[", paste(msel[msel[,
1] == ids[i], 4], collapse = ", "), "]", sep = "")
attrMatrix[i, 4] <- unique(msel[msel[, 1] == ids[i],
5])
attrMatrix[i, 5] <- paste("[", paste(msel[msel[,
1] == ids[i], 6], collapse = ", "), "]", sep = "")
attrMatrix[i, 6] <- unique(msel[msel[, 1] == ids[i],
7])
}
ids <- as.numeric(unique(msel[, 1]))
attrMatrix <- cbind(ids, attrMatrix)
colnames(attrMatrix) <- colnames(msel)
## END Code part from the export2cytoscape function of ProbMetab writieen by Ricardo R. Silva
write.table(attrMatrix, sep="\t", quote=FALSE, row.names=FALSE, file="Analysis_Report.tsv")
write.table(siff_table, sep="\t", quote=FALSE, row.names=FALSE, file="sif.tsv")
return(attrMatrix)
}
##Functions written by Jean-Francois Martin
deter_ioni <- function (aninfo, pm)
{
# determine ionisation in ProbMetab result file, used in function merge_probmetab
# input : for 1 ion, aninfo = string with m/z rt and CAMERA annotation from ProbMetab result file
# if the difference between m/z and the probmetab proposed mass is ~1 we use the sign (positive or negative) of this diference
# to define the type of ionisation
# If adduct or fragment was detected, therefore diff >>1 and so, we search for substring "+" ou "2+" ou "3+" ou "-"...
# to define the type of ionisation
# aninfo : vecteur of character resulting of the parsing(sep="#") of the probmetab annotation
if (round(abs(as.numeric(aninfo[1]) - pm),0) ==1) {
if (as.numeric(aninfo[1]) - pm <0) {esi <- "n"} else {esi <- "p"}
} else
if (!is.na(aninfo[4])) {
anstr <- aninfo[4]
# cat(anstr)
if ((grepl("]+",anstr,fixed=T)==T) || (grepl("]2+",anstr,fixed=T)==T) || (grepl("]3+",anstr,fixed=T)==T)) { esi <- "p"}
else
if ((grepl("]-",anstr,fixed=T)==T) || (grepl("]2-",anstr,fixed=T)==T) || (grepl("]3-",anstr,fixed=T)==T)) { esi <- "n"}
# cat(" ioni ",esi,"\n")
} else
{ esi <- "u"}
return(esi)
}
merge_probmetab <- function(metaVar,ansConn) {
## Parse ProbMetab information result file and merge in variable_metaData initial file
## inputs :
## metaVar : data.frame of metadataVariable input of probmetab function
## ansConn : data.frame of ProbMetab result
## output : dataframe with Probmetab results merge with variableMetadata
## Constante
## iannot : indice de la colonne annotation dans le resultat de probMetab
iannot <- 4
## definition of an unique identification of ions mz with 3 decimals and rt(sec) with 1 decimal to avoid
## duplicate ions name in the diffreport result file
ions <- paste ("M",round(metaVar$mz,3),"T",round(metaVar$rt,1),sep="")
metaVar <- data.frame(ions,metaVar)
###### Result data.frame from ProbMetab result list
an_ini <- ansConn$classTable
## Suppression of rows without mz and rt or unknown and columns of intensities
## COLUMNS SUBSCRIPTS HAVE TO BE CHECKED WITh DIFFERENT RESULTS FILES
an <- an_ini[(an_ini[,2]!="unknown"),c(1,2,3,7)]
## initialisation of vectors receiving the result of the parse of the column annotation (subscrip iannot)
mz <- rep(0,dim(an)[1])
rt <- rep(0,dim(an)[1])
propmz <- rep(0,dim(an)[1])
ioni <- rep("u",dim(an)[1])
## parse the column annotation and define ionisation mode
for (i in 1:dim(an)[1]) {
if (an[i,1] != "") {
info_mzrt <- unlist(strsplit(an[i,iannot],"#"))
propmz[i] <- as.numeric(an[i,1])
mz[i] <- as.numeric(info_mzrt[1])
rt[i] <- as.numeric(info_mzrt[2])
ioni[i] <- deter_ioni(info_mzrt,as.numeric(an[i,1]))
}
else {
propmz[i] <- as.numeric(propmz[i-1])
mz[i] <- as.numeric(mz[i-1])
rt[i] <- as.numeric(rt[i-1])
ioni[i] <- ioni[i-1]
}
}
## definition of an unique identification of ions : mz with 3 decimals and rt(sec) with 1 decimal
## The same as for the metadataVariable data.frame to match with.
ions <- paste ("M",round(mz,3),"T",round(rt,1),sep="")
an <- data.frame(ions,ioni,propmz,mz,rt,an)
## transposition of the different probmetab annotations which are in different rows in the initial result data.frame
## on only 1 row separated with a ";"
li <- as.matrix(table(an$propmz))
li <- data.frame(dimnames(li)[1],li)
dimnames(li)[[2]][1] <- "propmz"
ions <- rep("u",dim(li)[1])
propmz <- rep(0,dim(li)[1])
mpc <- rep("c",dim(li)[1])
proba <- rep("p",dim(li)[1])
c <- 0
while (c < dim(li)[1]) {
c <- c + 1
suban <- an[an$propmz==li[c,1],]
ions[c] <- as.character(suban[1,1])
propmz[c] <- as.numeric(suban[1,3])
mpc[c] <- paste(suban[,7],collapse=";")
proba[c] <- paste(as.character(suban[,8]),collapse=";")
}
## Creation of the data.frame with 1 row per ions
anc <- data.frame(ions,propmz,mpc,proba)
anc <- anc[order(anc[,1]),]
metaVarFinal <- merge(metaVar, anc, by.x=1, by.y=1, all.x=T, all.y=T)
metaVarFinal <- metaVarFinal[,-1]
#write.table(metaVarFinal,file="res.txt", sep="\t", row.names=F, quote=F)
return (metaVarFinal)
}
# RETROCOMPATIBILITE avec ancienne version de annotate
getVariableMetadata = function(xa) {
# --- variableMetadata ---
peakList=getPeaklist(xa)
peakList=cbind(groupnames(xa@xcmsSet),peakList); colnames(peakList)[1] = c("name");
variableMetadata=peakList[,!(colnames(peakList) %in% c(sampnames(xa@xcmsSet)))]
variableMetadata$name= groupnames(xa@xcmsSet)
return (variableMetadata)
}
# This function get the raw file path from the arguments
getRawfilePathFromArguments <- function(singlefile, zipfile, listArguments) {
if (!is.null(listArguments[["zipfile"]])) zipfile = listArguments[["zipfile"]]
if (!is.null(listArguments[["zipfilePositive"]])) zipfile = listArguments[["zipfilePositive"]]
if (!is.null(listArguments[["zipfileNegative"]])) zipfile = listArguments[["zipfileNegative"]]
if (!is.null(listArguments[["singlefile_galaxyPath"]])) {
singlefile_galaxyPaths = listArguments[["singlefile_galaxyPath"]];
singlefile_sampleNames = listArguments[["singlefile_sampleName"]]
}
if (!is.null(listArguments[["singlefile_galaxyPathPositive"]])) {
singlefile_galaxyPaths = listArguments[["singlefile_galaxyPathPositive"]];
singlefile_sampleNames = listArguments[["singlefile_sampleNamePositive"]]
}
if (!is.null(listArguments[["singlefile_galaxyPathNegative"]])) {
singlefile_galaxyPaths = listArguments[["singlefile_galaxyPathNegative"]];
singlefile_sampleNames = listArguments[["singlefile_sampleNameNegative"]]
}
if (exists("singlefile_galaxyPaths")){
singlefile_galaxyPaths = unlist(strsplit(singlefile_galaxyPaths,","))
singlefile_sampleNames = unlist(strsplit(singlefile_sampleNames,","))
singlefile=NULL
for (singlefile_galaxyPath_i in seq(1:length(singlefile_galaxyPaths))) {
singlefile_galaxyPath=singlefile_galaxyPaths[singlefile_galaxyPath_i]
singlefile_sampleName=singlefile_sampleNames[singlefile_galaxyPath_i]
singlefile[[singlefile_sampleName]] = singlefile_galaxyPath
}
}
return(list(zipfile=zipfile, singlefile=singlefile))
}
# This function retrieve the raw file in the working directory
# - if zipfile: unzip the file with its directory tree
# - if singlefiles: set symlink with the good filename
retrieveRawfileInTheWorkingDirectory <- function(singlefile, zipfile) {
if(!is.null(singlefile) && (length("singlefile")>0)) {
for (singlefile_sampleName in names(singlefile)) {
singlefile_galaxyPath = singlefile[[singlefile_sampleName]]
if(!file.exists(singlefile_galaxyPath)){
error_message=paste("Cannot access the sample:",singlefile_sampleName,"located:",singlefile_galaxyPath,". Please, contact your administrator ... if you have one!")
print(error_message); stop(error_message)
}
file.symlink(singlefile_galaxyPath,singlefile_sampleName)
}
directory = "."
}
if(!is.null(zipfile) && (zipfile!="")) {
if(!file.exists(zipfile)){
error_message=paste("Cannot access the Zip file:",zipfile,". Please, contact your administrator ... if you have one!")
print(error_message)
stop(error_message)
}
#list all file in the zip file
#zip_files=unzip(zipfile,list=T)[,"Name"]
#unzip
suppressWarnings(unzip(zipfile, unzip="unzip"))
#get the directory name
filesInZip=unzip(zipfile, list=T);
directories=unique(unlist(lapply(strsplit(filesInZip$Name,"/"), function(x) x[1])));
directories=directories[!(directories %in% c("__MACOSX")) & file.info(directories)$isdir]
directory = "."
if (length(directories) == 1) directory = directories
cat("files_root_directory\t",directory,"\n")
}
}
|
# bind global variables
utils::globalVariables(c("xn", "vld", "conf.low", "conf.high"))
#' @importFrom dplyr filter
sjp.emm <- function(fit,
swapPredictors = FALSE,
plevel = 0.05,
title = NULL,
geom.colors = "Set1",
geom.size = 0.7,
axisTitle.x = NULL,
axisTitle.y = NULL,
axisLabels.x = NULL,
legendTitle = NULL,
legendLabels = NULL,
showValueLabels = FALSE,
valueLabel.digits = 2,
showCI = FALSE,
breakTitleAt = 50,
breakLegendTitleAt = 20,
breakLegendLabelsAt = 20,
y.offset = 0.07,
axisLimits.y = NULL,
gridBreaksAt = NULL,
facet.grid = FALSE,
printPlot = TRUE) {
# --------------------------------------------------------
# check default geom.size
# --------------------------------------------------------
if (is.null(geom.size)) geom.size = .7
# ------------------------
# check if suggested packages are available
# ------------------------
if (!requireNamespace("lsmeans", quietly = TRUE)) {
stop("Package `lsmeans` needed for this function to work. Please install it.", call. = FALSE)
}
if ((any(class(fit) == "lmerMod" || any(class(fit) == "merModLmerTest"))) && !requireNamespace("lmerTest", quietly = TRUE)) {
stop("Package `lmerTest` needed for this function to work. Please install it.", call. = FALSE)
}
# -----------------------------------------------------------
# go to sub-function if class = lmerMod
# -----------------------------------------------------------
if (any(class(fit) == "lmerMod") || any(class(fit) == "merModLmerTest")) {
return(sjp.emm.lmer(fit, swapPredictors, plevel, title, geom.colors, geom.size,
axisTitle.x, axisTitle.y, axisLabels.x, legendLabels,
showValueLabels, valueLabel.digits, showCI, breakTitleAt,
breakLegendLabelsAt, y.offset, axisLimits.y, gridBreaksAt,
facet.grid, printPlot))
}
# init vector that saves ggplot objects
plotlist <- list()
dflist <- list()
# -----------------------------------------------------------
# parameter check
# -----------------------------------------------------------
if (is.null(gridBreaksAt)) gridbreaks.y <- ggplot2::waiver()
# --------------------------------------------------------
# unlist labels
# --------------------------------------------------------
if (!is.null(legendLabels) && is.list(legendLabels)) legendLabels <- unlistlabels(legendLabels)
if (!is.null(legendTitle) && is.list(legendTitle)) legendTitle <- unlist(legendTitle)
# -----------------------------------------------------------
# retrieve p-values, without intercept
# -----------------------------------------------------------
pval <- summary(fit)$coefficients[-1, 4]
# -----------------------------------------------------------
# find all significant interactions
# we start looking for significant p-values beginning
# with the first interaction, not the first single term!
# thus, the starting point is first position after all single
# predictor variables
# -----------------------------------------------------------
# save all term labels
it <- attr(fit$terms, "term.labels")
# save coefficients
cf <- names(fit$coefficients[-1])
# init counter
it.nr <- 0
it.pos <- c()
it.names <- c()
# check whether current term name contains a ":",
# thus if it is an interaction term
pos <- grep(":", it)
# if yes...
if (length(pos) > 0) it.names <- it[pos]
# check whether current coefficient contains a ":",
# thus if it is an interaction term
pos <- grep(":", cf)
# if yes...
if (length(pos) > 0) {
# ... set count of interactions
it.nr <- length(pos)
# ... and save position of coefficient in model
it.pos <- pos
}
# check whether we have any interaction terms included at all
if (it.nr == 0) {
warning("No interaction term found in fitted model...", call. = FALSE)
return(invisible(NULL))
}
# save names of interaction predictor variables into this object
# but only those with a specific p-level
intnames <- c()
for (i in 1:length(it.pos)) {
if (is.na(pval[it.pos[i]])) pval[it.pos[i]] <- 1
if (pval[it.pos[i]] < plevel) {
intnames <- c(intnames, cf[it.pos[i]])
}
}
# check for any signigicant interactions, stop if nothing found
if (is.null(intnames) || 0 == length(intnames)) {
warning("No significant interactions found...", call. = FALSE)
return(invisible(NULL))
}
# -----------------------------------------------------------
# Now iterate all interaction terms from model
# -----------------------------------------------------------
interactionterms <- c()
for (i in 1:length(it.names)) {
# -----------------------------------------------------------
# retrieve interaction terms
# -----------------------------------------------------------
terms <- unlist(strsplit(it.names[i], ":"))
# -----------------------------------------------------------
# check if both interaction terms are factors
# -----------------------------------------------------------
if (is.factor(fit$model[[terms[1]]]) && is.factor(fit$model[[terms[2]]])) {
# -----------------------------------------------------------
# Iterate all interactions on factor-level-basis from model
# -----------------------------------------------------------
for (cnt in 1:length(intnames)) {
# -----------------------------------------------------------
# first, retrieve and split interaction term so we know
# the two predictor variables, or factor levels of the
# interaction term
# -----------------------------------------------------------
lvls <- unlist(strsplit(intnames[cnt], ":"))
# -----------------------------------------------------------
# since we may have factors with more levels, the original
# term labels differ from what we have as coefficient-
# e.g., "ChickWeight$Diet", becomes "Diet1", "Diet2", etc.
# to calculate marginal means, we only need "Diet". So here
# we have to find, which terms match the significant coefficients
# found, and use the term labels for ls means...
# -----------------------------------------------------------
if (grepl(terms[1], lvls[1], fixed = T) && grepl(terms[2], lvls[2], fixed = T)) {
# we found a match
interactionterms <- rbind(interactionterms, terms)
# leave loop
break
}
}
} else {
warning(sprintf("Both %s and %s need to be factors! Skipping these interaction terms...", terms[1], terms[2]), call. = F)
}
}
# -----------------------------------------------------------
# check if we have any valid interaction terms
# for lsmeans function
# -----------------------------------------------------------
if (nrow(interactionterms) > 0) {
for (cnt in 1:nrow(interactionterms)) {
# -----------------------------------------------------------
# retrieve each pair of interaction terms
# -----------------------------------------------------------
term.pairs <- interactionterms[cnt, ]
if (swapPredictors) term.pairs <- rev(term.pairs)
# -----------------------------------------------------------
# retrieve estiamted marginal means
# -----------------------------------------------------------
emm <- summary(lsmeans::lsmeans.character(fit, term.pairs))
# create data frame from lsmeans
intdf <- data.frame(emm[2],
emm[3],
emm[1],
emm[6],
emm[7],
rep(valueLabel.digits, times = nrow(emm[1])))
colnames(intdf) <- c("x", "y", "grp", "conf.low", "conf.high", "vld")
# -----------------------------------------------------------
# remove missings
# -----------------------------------------------------------
if (anyNA(intdf$y)) {
# warn user
warning("fitted model had estimates with missing values. Output may be incomplete.", call. = F)
# remove missings
intdf <- dplyr::filter(intdf, !is.na(y))
}
# -----------------------------------------------------------
# convert df-values to numeric
# -----------------------------------------------------------
intdf$y <- as.numeric(as.character(intdf$y))
# add numeric x for geom_line
intdf$xn <- as.numeric(intdf$x)
# ci to numeric, y-scale is continuous
intdf$conf.low <- as.numeric(intdf$conf.low)
intdf$conf.high <- as.numeric(intdf$conf.high)
# order data frame
intdf <- intdf[order(intdf$grp), ]
# -----------------------------------------------------------
# retrieve lowest and highest x and y position to determine
# the scale limits
# -----------------------------------------------------------
if (is.null(axisLimits.y)) {
lowerLim.y <- ifelse(isTRUE(showCI), floor(min(intdf$conf.low)), floor(min(intdf$y)))
upperLim.y <- ifelse(isTRUE(showCI), ceiling(max(intdf$conf.high)), ceiling(max(intdf$y)))
} else {
lowerLim.y <- axisLimits.y[1]
upperLim.y <- axisLimits.y[2]
}
# -----------------------------------------------------------
# check whether user defined grid breaks / tick marks are used
# -----------------------------------------------------------
if (!is.null(gridBreaksAt)) {
gridbreaks.y <- c(seq(lowerLim.y, upperLim.y, by = gridBreaksAt))
}
# -----------------------------------------------------------
# prepare label and name from dependend variable
# -----------------------------------------------------------
# get response name, which is variable name
response.name <- colnames(fit$model)[1]
# get variable label attribute
response.label <- sjmisc::get_label(fit$model[[1]], def.value = response.name)
# -----------------------------------------------------------
# prepare label for x-axix
# -----------------------------------------------------------
alx <- sjmisc::get_labels(fit$model[[term.pairs[2]]],
attr.only = F,
include.values = NULL,
include.non.labelled = T)
# check if we have any
if (is.null(alx)) alx <- term.pairs[2]
# -----------------------------------------------------------
# prepare plot title and axis titles
# -----------------------------------------------------------
if (is.null(title)) {
labtitle <- paste0("Estimated marginal means of ", response.name,
" between ", term.pairs[2],
" and ", term.pairs[1])
} else {
labtitle <- title
}
# -----------------------------------------------------------
# legend labels
# -----------------------------------------------------------
if (is.null(legendLabels)) {
# try to get labels
lLabels <- sjmisc::get_labels(fit$model[term.pairs[1]][, 1], attr.only = F)
# if we still have no labels, get factor levels
if (is.null(lLabels)) levels(fit$model[term.pairs[1]][, 1])
} else {
lLabels <- legendLabels
}
# -----------------------------------------------------------
# legend title
# -----------------------------------------------------------
if (is.null(legendTitle)) {
lTitle <- term.pairs[1]
} else {
# set legend title for plot
lTitle <- legendTitle
}
if (is.null(axisLabels.x)) axisLabels.x <- alx
if (!is.null(axisTitle.x)) {
labx <- axisTitle.x
} else {
labx <- term.pairs[2]
}
if (!is.null(axisTitle.y)) {
laby <- axisTitle.y
} else {
laby <- response.label
}
# -----------------------------------------------------------
# prepare annotation labels
# -----------------------------------------------------------
# wrap title
labtitle <- sjmisc::word_wrap(labtitle, breakTitleAt)
# wrap legend labels
lLabels <- sjmisc::word_wrap(lLabels, breakLegendLabelsAt)
# wrap legend title
lTitle <- sjmisc::word_wrap(lTitle, breakLegendTitleAt)
# -----------------------------------------------------------
# prepare base plot of interactions
# -----------------------------------------------------------
baseplot <- ggplot(intdf)
# -----------------------------------------------------------
# Confidence intervals?
# -----------------------------------------------------------
if (showCI) baseplot <- baseplot +
geom_ribbon(aes(x = xn, ymin = conf.low, ymax = conf.high, fill = grp), alpha = .3)
# -----------------------------------------------------------
# continue with plot. point and line layers above ribbon
# -----------------------------------------------------------
baseplot <- baseplot +
geom_point(aes(x = x, y = y, colour = grp)) +
geom_line(aes(x = xn, y = y, colour = grp), size = geom.size) +
scale_x_discrete(labels = axisLabels.x)
# ------------------------------------------------------------
# plot value labels
# ------------------------------------------------------------
if (showValueLabels) {
baseplot <- baseplot +
geom_text(aes(label = round(y, vld), x = x, y = y),
nudge_y = y.offset,
show.legend = FALSE)
}
# ------------------------------------------------------------------------------------
# build plot object with theme and labels
# ------------------------------------------------------------------------------------
baseplot <- baseplot +
# set plot and axis titles
labs(title = labtitle,
x = labx,
y = laby,
colour = lTitle) +
# set axis scale breaks
scale_y_continuous(limits = c(lowerLim.y, upperLim.y), breaks = gridbreaks.y)
# ---------------------------------------------------------
# facet grid?
# ---------------------------------------------------------
if (facet.grid) baseplot <- baseplot + facet_grid( ~grp)
# ---------------------------------------------------------
# set geom colors
# ---------------------------------------------------------
baseplot <- sj.setGeomColors(baseplot, geom.colors, length(lLabels), TRUE, lLabels) + guides(fill = FALSE)
# ---------------------------------------------------------
# Check whether ggplot object should be returned or plotted
# ---------------------------------------------------------
if (printPlot) print(baseplot)
# concatenate plot object
plotlist[[length(plotlist) + 1]] <- baseplot
dflist[[length(dflist) + 1]] <- intdf
}
}
# -------------------------------------
# return results
# -------------------------------------
invisible(structure(class = c("sjPlot", "sjpemmint"),
list(plot.list = plotlist,
data.list = dflist)))
}
#' @importFrom stats model.frame
sjp.emm.lmer <- function(fit, swapPredictors, plevel, title, geom.colors, geom.size, axisTitle.x,
axisTitle.y, axisLabels.x, legendLabels, showValueLabels,
valueLabel.digits, showCI, breakTitleAt, breakLegendLabelsAt,
y.offset, axisLimits.y, gridBreaksAt, facet.grid, printPlot) {
if ((any(class(fit) == "lmerMod") || any(class(fit) == "merModLmerTest")) && !requireNamespace("lmerTest", quietly = TRUE)) {
stop("Package 'lmerTest' needed for this function to work. Please install it.", call. = FALSE)
}
# --------------------------------------------------------
# check default geom.size
# --------------------------------------------------------
if (is.null(geom.size)) geom.size = .7
# init vector that saves ggplot objects
plotlist <- list()
dflist <- list()
# -----------------------------------------------------------
# parameter check
# -----------------------------------------------------------
if (is.null(gridBreaksAt)) gridbreaks.y <- ggplot2::waiver()
# --------------------------------------------------------
# unlist labels
# --------------------------------------------------------
if (!is.null(legendLabels) && is.list(legendLabels)) legendLabels <- unlistlabels(legendLabels)
# -----------------------------------------------------------
# get terms of fitted model
# -----------------------------------------------------------
preds <- attr(terms(fit), "term.labels")
# interaction terms contain colons
it.names <- c()
# any predictors with colon?
pos <- grep(":", preds)
# if yes, we have our interaction terms
if (length(pos) > 0) {
it.names <- preds[pos]
} else {
warning("No interaction term found in fitted model...", call. = F)
return(invisible(NULL))
}
# -----------------------------------------------------------
# find all significant interactions
# we start looking for significant p-values beginning
# with the first interaction, not the first single term!
# thus, the starting point is first position after all single
# predictor variables
# -----------------------------------------------------------
# get model summary
fit.coef <- summary(fit)$coefficients
# save coefficients
cf <- rownames(fit.coef)
# find first interaction terms
pos <- grep(":", cf)
# get all p-values
pval <- get_lmerMod_pvalues(fit)[pos]
# get significant interactions
intnames <- cf[pos[which(pval < plevel)]]
# check for any signigicant interactions, stop if nothing found
if (is.null(intnames) || 0 == length(intnames)) {
warning("No significant interactions found...", call. = FALSE)
return(invisible(NULL))
}
# -----------------------------------------------------------
# get model frame
# -----------------------------------------------------------
m_f <- stats::model.frame(fit)
# -----------------------------------------------------------
# Now iterate all interaction terms from model
# -----------------------------------------------------------
interactionterms <- c()
for (i in 1:length(it.names)) {
# -----------------------------------------------------------
# retrieve interaction terms
# -----------------------------------------------------------
terms <- unlist(strsplit(it.names[i], ":"))
# -----------------------------------------------------------
# check if both interaction terms are factors
# -----------------------------------------------------------
if (is.factor(m_f[[terms[1]]]) && is.factor(m_f[[terms[2]]])) {
# -----------------------------------------------------------
# Iterate all interactions on factor-level-basis from model
# -----------------------------------------------------------
for (cnt in 1:length(intnames)) {
# -----------------------------------------------------------
# first, retrieve and split interaction term so we know
# the two predictor variables, or factor levels of the
# interaction term
# -----------------------------------------------------------
lvls <- unlist(strsplit(intnames[cnt], ":"))
# -----------------------------------------------------------
# since we may have factors with more levels, the original
# term labels differ from what we have as coefficient-
# e.g., "ChickWeight$Diet", becomes "Diet1", "Diet2", etc.
# to calculate marginal means, we only need "Diet". So here
# we have to find, which terms match the significant coefficients
# found, and use the term labels for ls means...
# -----------------------------------------------------------
if (grepl(terms[1], lvls[1], fixed = T) && grepl(terms[2], lvls[2], fixed = T)) {
# we found a match
interactionterms <- rbind(interactionterms, terms)
# leave loop
break
}
}
} else {
warning(sprintf("Both %s and %s need to be factors! Skipping these interaction terms...", terms[1], terms[2]), call. = F)
}
}
# -----------------------------------------------------------
# check if we have any valid interaction terms
# for lsmeans function
# -----------------------------------------------------------
is.em <- suppressWarnings(sjmisc::is_empty(interactionterms));
if (!is.em && nrow(interactionterms) > 0) {
for (cnt in 1:nrow(interactionterms)) {
# -----------------------------------------------------------
# retrieve each pair of interaction terms
# -----------------------------------------------------------
term.pairs <- interactionterms[cnt, ]
# -----------------------------------------------------------
# retrieve estimated marginal means for all predictors of
# the model, with various statistics in a data frame format
# -----------------------------------------------------------
emm.df <- lmerTest::lsmeans(fit, paste(term.pairs, collapse = ":"))[[1]]
# swap predictors?
if (swapPredictors) term.pairs <- rev(term.pairs)
# -----------------------------------------------------------
# get column indices of interaction terms, estimate and
# confidence intervals. latter term in interaction is considered
# as "within subject" (x-pos), first term is considered as
# "between subjects" (group)
# -----------------------------------------------------------
emm.col <- c(which(colnames(emm.df) == term.pairs[2]),
which(colnames(emm.df) == "Estimate"),
which(colnames(emm.df) == term.pairs[1]),
which(colnames(emm.df) == "Lower CI"),
which(colnames(emm.df) == "Upper CI"))
# -----------------------------------------------------------
# create data frame from lsmeans
# -----------------------------------------------------------
intdf <- data.frame(emm.df[, emm.col],
rep(valueLabel.digits, times = nrow(emm.df)))
colnames(intdf) <- c("x", "y", "grp", "conf.low", "conf.high", "vld")
# -----------------------------------------------------------
# convert df-values to numeric
# -----------------------------------------------------------
intdf$y <- as.numeric(as.character(intdf$y))
# add numeric x for geom_line
intdf$xn <- as.numeric(intdf$x)
# ci to numeric, y-scale is continuous
intdf$conf.low <- as.numeric(intdf$conf.low)
intdf$conf.high <- as.numeric(intdf$conf.high)
# order data frame
intdf <- intdf[order(intdf$grp), ]
# -----------------------------------------------------------
# retrieve lowest and highest x and y position to determine
# the scale limits
# -----------------------------------------------------------
if (is.null(axisLimits.y)) {
lowerLim.y <- ifelse(isTRUE(showCI), floor(min(intdf$conf.low)), floor(min(intdf$y)))
upperLim.y <- ifelse(isTRUE(showCI), ceiling(max(intdf$conf.high)), ceiling(max(intdf$y)))
} else {
lowerLim.y <- axisLimits.y[1]
upperLim.y <- axisLimits.y[2]
}
# -----------------------------------------------------------
# check whether user defined grid breaks / tick marks are used
# -----------------------------------------------------------
if (!is.null(gridBreaksAt)) {
gridbreaks.y <- c(seq(lowerLim.y, upperLim.y, by = gridBreaksAt))
}
# -----------------------------------------------------------
# prepare label and name from depend variable
# -----------------------------------------------------------
# get response name, which is variable name
response.name <- colnames(m_f)[1]
# get variable label attribute
response.label <- sjmisc::get_label(m_f[[1]], def.value = response.name)
# -----------------------------------------------------------
# prepare label for x-axix
# -----------------------------------------------------------
# get value label attribute
alx <- sjmisc::get_labels(m_f[[term.pairs[2]]],
attr.only = F,
include.values = NULL,
include.non.labelled = T)
# check if we have any
if (is.null(alx)) alx <- term.pairs[2]
# -----------------------------------------------------------
# prepare plot title and axis titles
# -----------------------------------------------------------
if (is.null(title)) {
labtitle <- paste0("Estimated marginal means of ", response.name,
" between ", term.pairs[2],
" and ", term.pairs[1])
} else {
labtitle <- title
}
if (is.null(legendLabels)) {
# try to get labels
lLabels <- sjmisc::get_labels(m_f[[term.pairs[1]]], attr.only = F)
# if we still have no labels, get factor levels
if (is.null(lLabels)) levels(m_f[[term.pairs[1]]])
} else {
lLabels <- legendLabels
}
if (is.null(axisLabels.x)) axisLabels.x <- alx
if (!is.null(axisTitle.x)) {
labx <- axisTitle.x
} else {
labx <- term.pairs[2]
}
if (!is.null(axisTitle.y)) {
laby <- axisTitle.y
} else {
laby <- response.label
}
# -----------------------------------------------------------
# prepare annotation labels
# -----------------------------------------------------------
# wrap title(s)
labtitle <- sjmisc::word_wrap(labtitle, breakTitleAt)
labx <- sjmisc::word_wrap(labx, breakTitleAt)
laby <- sjmisc::word_wrap(laby, breakTitleAt)
# wrap legend labels
lLabels <- sjmisc::word_wrap(lLabels, breakLegendLabelsAt)
# -----------------------------------------------------------
# prepare base plot of interactions
# -----------------------------------------------------------
baseplot <- ggplot(intdf)
# -----------------------------------------------------------
# Confidence intervals?
# -----------------------------------------------------------
if (showCI) baseplot <- baseplot +
geom_ribbon(aes(x = xn, ymin = conf.low, ymax = conf.high, fill = grp), alpha = .3)
# -----------------------------------------------------------
# continue with plot. point and line layers above ribbon
# -----------------------------------------------------------
baseplot <- baseplot +
geom_point(aes(x = x, y = y, colour = grp)) +
geom_line(aes(x = xn, y = y, colour = grp), size = geom.size) +
scale_x_discrete(labels = axisLabels.x)
# ------------------------------------------------------------
# plot value labels
# ------------------------------------------------------------
if (showValueLabels) {
baseplot <- baseplot +
geom_text(aes(label = round(y, vld), x = x, y = y),
nudge_y = y.offset,
show.legend = FALSE)
}
# ------------------------------------------------------------------------------------
# build plot object with theme and labels
# ------------------------------------------------------------------------------------
baseplot <- baseplot +
# set plot and axis titles
labs(title = labtitle,
x = labx,
y = laby,
colour = term.pairs[1]) +
# set axis scale breaks
scale_y_continuous(limits = c(lowerLim.y, upperLim.y), breaks = gridbreaks.y)
# ---------------------------------------------------------
# facet grid?
# ---------------------------------------------------------
if (facet.grid) baseplot <- baseplot + facet_grid( ~grp)
# ---------------------------------------------------------
# set geom colors
# ---------------------------------------------------------
baseplot <- sj.setGeomColors(baseplot, geom.colors, length(lLabels), TRUE, lLabels) + guides(fill = FALSE)
# ---------------------------------------------------------
# Check whether ggplot object should be returned or plotted
# ---------------------------------------------------------
if (printPlot) print(baseplot)
# concatenate plot object
plotlist[[length(plotlist) + 1]] <- baseplot
dflist[[length(dflist) + 1]] <- intdf
}
}
# -------------------------------------
# return results
# -------------------------------------
invisible(structure(class = c("sjPlot", "sjpemmint"),
list(plot.list = plotlist,
data.list = dflist)))
} | /sjPlot/R/sjPlotAncovaLSMeans.R | no_license | ingted/R-Examples | R | false | false | 30,299 | r | # bind global variables
utils::globalVariables(c("xn", "vld", "conf.low", "conf.high"))
#' @importFrom dplyr filter
sjp.emm <- function(fit,
swapPredictors = FALSE,
plevel = 0.05,
title = NULL,
geom.colors = "Set1",
geom.size = 0.7,
axisTitle.x = NULL,
axisTitle.y = NULL,
axisLabels.x = NULL,
legendTitle = NULL,
legendLabels = NULL,
showValueLabels = FALSE,
valueLabel.digits = 2,
showCI = FALSE,
breakTitleAt = 50,
breakLegendTitleAt = 20,
breakLegendLabelsAt = 20,
y.offset = 0.07,
axisLimits.y = NULL,
gridBreaksAt = NULL,
facet.grid = FALSE,
printPlot = TRUE) {
# --------------------------------------------------------
# check default geom.size
# --------------------------------------------------------
if (is.null(geom.size)) geom.size = .7
# ------------------------
# check if suggested packages are available
# ------------------------
if (!requireNamespace("lsmeans", quietly = TRUE)) {
stop("Package `lsmeans` needed for this function to work. Please install it.", call. = FALSE)
}
if ((any(class(fit) == "lmerMod" || any(class(fit) == "merModLmerTest"))) && !requireNamespace("lmerTest", quietly = TRUE)) {
stop("Package `lmerTest` needed for this function to work. Please install it.", call. = FALSE)
}
# -----------------------------------------------------------
# go to sub-function if class = lmerMod
# -----------------------------------------------------------
if (any(class(fit) == "lmerMod") || any(class(fit) == "merModLmerTest")) {
return(sjp.emm.lmer(fit, swapPredictors, plevel, title, geom.colors, geom.size,
axisTitle.x, axisTitle.y, axisLabels.x, legendLabels,
showValueLabels, valueLabel.digits, showCI, breakTitleAt,
breakLegendLabelsAt, y.offset, axisLimits.y, gridBreaksAt,
facet.grid, printPlot))
}
# init vector that saves ggplot objects
plotlist <- list()
dflist <- list()
# -----------------------------------------------------------
# parameter check
# -----------------------------------------------------------
if (is.null(gridBreaksAt)) gridbreaks.y <- ggplot2::waiver()
# --------------------------------------------------------
# unlist labels
# --------------------------------------------------------
if (!is.null(legendLabels) && is.list(legendLabels)) legendLabels <- unlistlabels(legendLabels)
if (!is.null(legendTitle) && is.list(legendTitle)) legendTitle <- unlist(legendTitle)
# -----------------------------------------------------------
# retrieve p-values, without intercept
# -----------------------------------------------------------
pval <- summary(fit)$coefficients[-1, 4]
# -----------------------------------------------------------
# find all significant interactions
# we start looking for significant p-values beginning
# with the first interaction, not the first single term!
# thus, the starting point is first position after all single
# predictor variables
# -----------------------------------------------------------
# save all term labels
it <- attr(fit$terms, "term.labels")
# save coefficients
cf <- names(fit$coefficients[-1])
# init counter
it.nr <- 0
it.pos <- c()
it.names <- c()
# check whether current term name contains a ":",
# thus if it is an interaction term
pos <- grep(":", it)
# if yes...
if (length(pos) > 0) it.names <- it[pos]
# check whether current coefficient contains a ":",
# thus if it is an interaction term
pos <- grep(":", cf)
# if yes...
if (length(pos) > 0) {
# ... set count of interactions
it.nr <- length(pos)
# ... and save position of coefficient in model
it.pos <- pos
}
# check whether we have any interaction terms included at all
if (it.nr == 0) {
warning("No interaction term found in fitted model...", call. = FALSE)
return(invisible(NULL))
}
# save names of interaction predictor variables into this object
# but only those with a specific p-level
intnames <- c()
for (i in 1:length(it.pos)) {
if (is.na(pval[it.pos[i]])) pval[it.pos[i]] <- 1
if (pval[it.pos[i]] < plevel) {
intnames <- c(intnames, cf[it.pos[i]])
}
}
# check for any signigicant interactions, stop if nothing found
if (is.null(intnames) || 0 == length(intnames)) {
warning("No significant interactions found...", call. = FALSE)
return(invisible(NULL))
}
# -----------------------------------------------------------
# Now iterate all interaction terms from model
# -----------------------------------------------------------
interactionterms <- c()
for (i in 1:length(it.names)) {
# -----------------------------------------------------------
# retrieve interaction terms
# -----------------------------------------------------------
terms <- unlist(strsplit(it.names[i], ":"))
# -----------------------------------------------------------
# check if both interaction terms are factors
# -----------------------------------------------------------
if (is.factor(fit$model[[terms[1]]]) && is.factor(fit$model[[terms[2]]])) {
# -----------------------------------------------------------
# Iterate all interactions on factor-level-basis from model
# -----------------------------------------------------------
for (cnt in 1:length(intnames)) {
# -----------------------------------------------------------
# first, retrieve and split interaction term so we know
# the two predictor variables, or factor levels of the
# interaction term
# -----------------------------------------------------------
lvls <- unlist(strsplit(intnames[cnt], ":"))
# -----------------------------------------------------------
# since we may have factors with more levels, the original
# term labels differ from what we have as coefficient-
# e.g., "ChickWeight$Diet", becomes "Diet1", "Diet2", etc.
# to calculate marginal means, we only need "Diet". So here
# we have to find, which terms match the significant coefficients
# found, and use the term labels for ls means...
# -----------------------------------------------------------
if (grepl(terms[1], lvls[1], fixed = T) && grepl(terms[2], lvls[2], fixed = T)) {
# we found a match
interactionterms <- rbind(interactionterms, terms)
# leave loop
break
}
}
} else {
warning(sprintf("Both %s and %s need to be factors! Skipping these interaction terms...", terms[1], terms[2]), call. = F)
}
}
# -----------------------------------------------------------
# check if we have any valid interaction terms
# for lsmeans function
# -----------------------------------------------------------
if (nrow(interactionterms) > 0) {
for (cnt in 1:nrow(interactionterms)) {
# -----------------------------------------------------------
# retrieve each pair of interaction terms
# -----------------------------------------------------------
term.pairs <- interactionterms[cnt, ]
if (swapPredictors) term.pairs <- rev(term.pairs)
# -----------------------------------------------------------
# retrieve estiamted marginal means
# -----------------------------------------------------------
emm <- summary(lsmeans::lsmeans.character(fit, term.pairs))
# create data frame from lsmeans
intdf <- data.frame(emm[2],
emm[3],
emm[1],
emm[6],
emm[7],
rep(valueLabel.digits, times = nrow(emm[1])))
colnames(intdf) <- c("x", "y", "grp", "conf.low", "conf.high", "vld")
# -----------------------------------------------------------
# remove missings
# -----------------------------------------------------------
if (anyNA(intdf$y)) {
# warn user
warning("fitted model had estimates with missing values. Output may be incomplete.", call. = F)
# remove missings
intdf <- dplyr::filter(intdf, !is.na(y))
}
# -----------------------------------------------------------
# convert df-values to numeric
# -----------------------------------------------------------
intdf$y <- as.numeric(as.character(intdf$y))
# add numeric x for geom_line
intdf$xn <- as.numeric(intdf$x)
# ci to numeric, y-scale is continuous
intdf$conf.low <- as.numeric(intdf$conf.low)
intdf$conf.high <- as.numeric(intdf$conf.high)
# order data frame
intdf <- intdf[order(intdf$grp), ]
# -----------------------------------------------------------
# retrieve lowest and highest x and y position to determine
# the scale limits
# -----------------------------------------------------------
if (is.null(axisLimits.y)) {
lowerLim.y <- ifelse(isTRUE(showCI), floor(min(intdf$conf.low)), floor(min(intdf$y)))
upperLim.y <- ifelse(isTRUE(showCI), ceiling(max(intdf$conf.high)), ceiling(max(intdf$y)))
} else {
lowerLim.y <- axisLimits.y[1]
upperLim.y <- axisLimits.y[2]
}
# -----------------------------------------------------------
# check whether user defined grid breaks / tick marks are used
# -----------------------------------------------------------
if (!is.null(gridBreaksAt)) {
gridbreaks.y <- c(seq(lowerLim.y, upperLim.y, by = gridBreaksAt))
}
# -----------------------------------------------------------
# prepare label and name from dependend variable
# -----------------------------------------------------------
# get response name, which is variable name
response.name <- colnames(fit$model)[1]
# get variable label attribute
response.label <- sjmisc::get_label(fit$model[[1]], def.value = response.name)
# -----------------------------------------------------------
# prepare label for x-axix
# -----------------------------------------------------------
alx <- sjmisc::get_labels(fit$model[[term.pairs[2]]],
attr.only = F,
include.values = NULL,
include.non.labelled = T)
# check if we have any
if (is.null(alx)) alx <- term.pairs[2]
# -----------------------------------------------------------
# prepare plot title and axis titles
# -----------------------------------------------------------
if (is.null(title)) {
labtitle <- paste0("Estimated marginal means of ", response.name,
" between ", term.pairs[2],
" and ", term.pairs[1])
} else {
labtitle <- title
}
# -----------------------------------------------------------
# legend labels
# -----------------------------------------------------------
if (is.null(legendLabels)) {
# try to get labels
lLabels <- sjmisc::get_labels(fit$model[term.pairs[1]][, 1], attr.only = F)
# if we still have no labels, get factor levels
if (is.null(lLabels)) levels(fit$model[term.pairs[1]][, 1])
} else {
lLabels <- legendLabels
}
# -----------------------------------------------------------
# legend title
# -----------------------------------------------------------
if (is.null(legendTitle)) {
lTitle <- term.pairs[1]
} else {
# set legend title for plot
lTitle <- legendTitle
}
if (is.null(axisLabels.x)) axisLabels.x <- alx
if (!is.null(axisTitle.x)) {
labx <- axisTitle.x
} else {
labx <- term.pairs[2]
}
if (!is.null(axisTitle.y)) {
laby <- axisTitle.y
} else {
laby <- response.label
}
# -----------------------------------------------------------
# prepare annotation labels
# -----------------------------------------------------------
# wrap title
labtitle <- sjmisc::word_wrap(labtitle, breakTitleAt)
# wrap legend labels
lLabels <- sjmisc::word_wrap(lLabels, breakLegendLabelsAt)
# wrap legend title
lTitle <- sjmisc::word_wrap(lTitle, breakLegendTitleAt)
# -----------------------------------------------------------
# prepare base plot of interactions
# -----------------------------------------------------------
baseplot <- ggplot(intdf)
# -----------------------------------------------------------
# Confidence intervals?
# -----------------------------------------------------------
if (showCI) baseplot <- baseplot +
geom_ribbon(aes(x = xn, ymin = conf.low, ymax = conf.high, fill = grp), alpha = .3)
# -----------------------------------------------------------
# continue with plot. point and line layers above ribbon
# -----------------------------------------------------------
baseplot <- baseplot +
geom_point(aes(x = x, y = y, colour = grp)) +
geom_line(aes(x = xn, y = y, colour = grp), size = geom.size) +
scale_x_discrete(labels = axisLabels.x)
# ------------------------------------------------------------
# plot value labels
# ------------------------------------------------------------
if (showValueLabels) {
baseplot <- baseplot +
geom_text(aes(label = round(y, vld), x = x, y = y),
nudge_y = y.offset,
show.legend = FALSE)
}
# ------------------------------------------------------------------------------------
# build plot object with theme and labels
# ------------------------------------------------------------------------------------
baseplot <- baseplot +
# set plot and axis titles
labs(title = labtitle,
x = labx,
y = laby,
colour = lTitle) +
# set axis scale breaks
scale_y_continuous(limits = c(lowerLim.y, upperLim.y), breaks = gridbreaks.y)
# ---------------------------------------------------------
# facet grid?
# ---------------------------------------------------------
if (facet.grid) baseplot <- baseplot + facet_grid( ~grp)
# ---------------------------------------------------------
# set geom colors
# ---------------------------------------------------------
baseplot <- sj.setGeomColors(baseplot, geom.colors, length(lLabels), TRUE, lLabels) + guides(fill = FALSE)
# ---------------------------------------------------------
# Check whether ggplot object should be returned or plotted
# ---------------------------------------------------------
if (printPlot) print(baseplot)
# concatenate plot object
plotlist[[length(plotlist) + 1]] <- baseplot
dflist[[length(dflist) + 1]] <- intdf
}
}
# -------------------------------------
# return results
# -------------------------------------
invisible(structure(class = c("sjPlot", "sjpemmint"),
list(plot.list = plotlist,
data.list = dflist)))
}
#' @importFrom stats model.frame
sjp.emm.lmer <- function(fit, swapPredictors, plevel, title, geom.colors, geom.size, axisTitle.x,
axisTitle.y, axisLabels.x, legendLabels, showValueLabels,
valueLabel.digits, showCI, breakTitleAt, breakLegendLabelsAt,
y.offset, axisLimits.y, gridBreaksAt, facet.grid, printPlot) {
if ((any(class(fit) == "lmerMod") || any(class(fit) == "merModLmerTest")) && !requireNamespace("lmerTest", quietly = TRUE)) {
stop("Package 'lmerTest' needed for this function to work. Please install it.", call. = FALSE)
}
# --------------------------------------------------------
# check default geom.size
# --------------------------------------------------------
if (is.null(geom.size)) geom.size = .7
# init vector that saves ggplot objects
plotlist <- list()
dflist <- list()
# -----------------------------------------------------------
# parameter check
# -----------------------------------------------------------
if (is.null(gridBreaksAt)) gridbreaks.y <- ggplot2::waiver()
# --------------------------------------------------------
# unlist labels
# --------------------------------------------------------
if (!is.null(legendLabels) && is.list(legendLabels)) legendLabels <- unlistlabels(legendLabels)
# -----------------------------------------------------------
# get terms of fitted model
# -----------------------------------------------------------
preds <- attr(terms(fit), "term.labels")
# interaction terms contain colons
it.names <- c()
# any predictors with colon?
pos <- grep(":", preds)
# if yes, we have our interaction terms
if (length(pos) > 0) {
it.names <- preds[pos]
} else {
warning("No interaction term found in fitted model...", call. = F)
return(invisible(NULL))
}
# -----------------------------------------------------------
# find all significant interactions
# we start looking for significant p-values beginning
# with the first interaction, not the first single term!
# thus, the starting point is first position after all single
# predictor variables
# -----------------------------------------------------------
# get model summary
fit.coef <- summary(fit)$coefficients
# save coefficients
cf <- rownames(fit.coef)
# find first interaction terms
pos <- grep(":", cf)
# get all p-values
pval <- get_lmerMod_pvalues(fit)[pos]
# get significant interactions
intnames <- cf[pos[which(pval < plevel)]]
# check for any signigicant interactions, stop if nothing found
if (is.null(intnames) || 0 == length(intnames)) {
warning("No significant interactions found...", call. = FALSE)
return(invisible(NULL))
}
# -----------------------------------------------------------
# get model frame
# -----------------------------------------------------------
m_f <- stats::model.frame(fit)
# -----------------------------------------------------------
# Now iterate all interaction terms from model
# -----------------------------------------------------------
interactionterms <- c()
for (i in 1:length(it.names)) {
# -----------------------------------------------------------
# retrieve interaction terms
# -----------------------------------------------------------
terms <- unlist(strsplit(it.names[i], ":"))
# -----------------------------------------------------------
# check if both interaction terms are factors
# -----------------------------------------------------------
if (is.factor(m_f[[terms[1]]]) && is.factor(m_f[[terms[2]]])) {
# -----------------------------------------------------------
# Iterate all interactions on factor-level-basis from model
# -----------------------------------------------------------
for (cnt in 1:length(intnames)) {
# -----------------------------------------------------------
# first, retrieve and split interaction term so we know
# the two predictor variables, or factor levels of the
# interaction term
# -----------------------------------------------------------
lvls <- unlist(strsplit(intnames[cnt], ":"))
# -----------------------------------------------------------
# since we may have factors with more levels, the original
# term labels differ from what we have as coefficient-
# e.g., "ChickWeight$Diet", becomes "Diet1", "Diet2", etc.
# to calculate marginal means, we only need "Diet". So here
# we have to find, which terms match the significant coefficients
# found, and use the term labels for ls means...
# -----------------------------------------------------------
if (grepl(terms[1], lvls[1], fixed = T) && grepl(terms[2], lvls[2], fixed = T)) {
# we found a match
interactionterms <- rbind(interactionterms, terms)
# leave loop
break
}
}
} else {
warning(sprintf("Both %s and %s need to be factors! Skipping these interaction terms...", terms[1], terms[2]), call. = F)
}
}
# -----------------------------------------------------------
# check if we have any valid interaction terms
# for lsmeans function
# -----------------------------------------------------------
is.em <- suppressWarnings(sjmisc::is_empty(interactionterms));
if (!is.em && nrow(interactionterms) > 0) {
for (cnt in 1:nrow(interactionterms)) {
# -----------------------------------------------------------
# retrieve each pair of interaction terms
# -----------------------------------------------------------
term.pairs <- interactionterms[cnt, ]
# -----------------------------------------------------------
# retrieve estimated marginal means for all predictors of
# the model, with various statistics in a data frame format
# -----------------------------------------------------------
emm.df <- lmerTest::lsmeans(fit, paste(term.pairs, collapse = ":"))[[1]]
# swap predictors?
if (swapPredictors) term.pairs <- rev(term.pairs)
# -----------------------------------------------------------
# get column indices of interaction terms, estimate and
# confidence intervals. latter term in interaction is considered
# as "within subject" (x-pos), first term is considered as
# "between subjects" (group)
# -----------------------------------------------------------
emm.col <- c(which(colnames(emm.df) == term.pairs[2]),
which(colnames(emm.df) == "Estimate"),
which(colnames(emm.df) == term.pairs[1]),
which(colnames(emm.df) == "Lower CI"),
which(colnames(emm.df) == "Upper CI"))
# -----------------------------------------------------------
# create data frame from lsmeans
# -----------------------------------------------------------
intdf <- data.frame(emm.df[, emm.col],
rep(valueLabel.digits, times = nrow(emm.df)))
colnames(intdf) <- c("x", "y", "grp", "conf.low", "conf.high", "vld")
# -----------------------------------------------------------
# convert df-values to numeric
# -----------------------------------------------------------
intdf$y <- as.numeric(as.character(intdf$y))
# add numeric x for geom_line
intdf$xn <- as.numeric(intdf$x)
# ci to numeric, y-scale is continuous
intdf$conf.low <- as.numeric(intdf$conf.low)
intdf$conf.high <- as.numeric(intdf$conf.high)
# order data frame
intdf <- intdf[order(intdf$grp), ]
# -----------------------------------------------------------
# retrieve lowest and highest x and y position to determine
# the scale limits
# -----------------------------------------------------------
if (is.null(axisLimits.y)) {
lowerLim.y <- ifelse(isTRUE(showCI), floor(min(intdf$conf.low)), floor(min(intdf$y)))
upperLim.y <- ifelse(isTRUE(showCI), ceiling(max(intdf$conf.high)), ceiling(max(intdf$y)))
} else {
lowerLim.y <- axisLimits.y[1]
upperLim.y <- axisLimits.y[2]
}
# -----------------------------------------------------------
# check whether user defined grid breaks / tick marks are used
# -----------------------------------------------------------
if (!is.null(gridBreaksAt)) {
gridbreaks.y <- c(seq(lowerLim.y, upperLim.y, by = gridBreaksAt))
}
# -----------------------------------------------------------
# prepare label and name from depend variable
# -----------------------------------------------------------
# get response name, which is variable name
response.name <- colnames(m_f)[1]
# get variable label attribute
response.label <- sjmisc::get_label(m_f[[1]], def.value = response.name)
# -----------------------------------------------------------
# prepare label for x-axix
# -----------------------------------------------------------
# get value label attribute
alx <- sjmisc::get_labels(m_f[[term.pairs[2]]],
attr.only = F,
include.values = NULL,
include.non.labelled = T)
# check if we have any
if (is.null(alx)) alx <- term.pairs[2]
# -----------------------------------------------------------
# prepare plot title and axis titles
# -----------------------------------------------------------
if (is.null(title)) {
labtitle <- paste0("Estimated marginal means of ", response.name,
" between ", term.pairs[2],
" and ", term.pairs[1])
} else {
labtitle <- title
}
if (is.null(legendLabels)) {
# try to get labels
lLabels <- sjmisc::get_labels(m_f[[term.pairs[1]]], attr.only = F)
# if we still have no labels, get factor levels
if (is.null(lLabels)) levels(m_f[[term.pairs[1]]])
} else {
lLabels <- legendLabels
}
if (is.null(axisLabels.x)) axisLabels.x <- alx
if (!is.null(axisTitle.x)) {
labx <- axisTitle.x
} else {
labx <- term.pairs[2]
}
if (!is.null(axisTitle.y)) {
laby <- axisTitle.y
} else {
laby <- response.label
}
# -----------------------------------------------------------
# prepare annotation labels
# -----------------------------------------------------------
# wrap title(s)
labtitle <- sjmisc::word_wrap(labtitle, breakTitleAt)
labx <- sjmisc::word_wrap(labx, breakTitleAt)
laby <- sjmisc::word_wrap(laby, breakTitleAt)
# wrap legend labels
lLabels <- sjmisc::word_wrap(lLabels, breakLegendLabelsAt)
# -----------------------------------------------------------
# prepare base plot of interactions
# -----------------------------------------------------------
baseplot <- ggplot(intdf)
# -----------------------------------------------------------
# Confidence intervals?
# -----------------------------------------------------------
if (showCI) baseplot <- baseplot +
geom_ribbon(aes(x = xn, ymin = conf.low, ymax = conf.high, fill = grp), alpha = .3)
# -----------------------------------------------------------
# continue with plot. point and line layers above ribbon
# -----------------------------------------------------------
baseplot <- baseplot +
geom_point(aes(x = x, y = y, colour = grp)) +
geom_line(aes(x = xn, y = y, colour = grp), size = geom.size) +
scale_x_discrete(labels = axisLabels.x)
# ------------------------------------------------------------
# plot value labels
# ------------------------------------------------------------
if (showValueLabels) {
baseplot <- baseplot +
geom_text(aes(label = round(y, vld), x = x, y = y),
nudge_y = y.offset,
show.legend = FALSE)
}
# ------------------------------------------------------------------------------------
# build plot object with theme and labels
# ------------------------------------------------------------------------------------
baseplot <- baseplot +
# set plot and axis titles
labs(title = labtitle,
x = labx,
y = laby,
colour = term.pairs[1]) +
# set axis scale breaks
scale_y_continuous(limits = c(lowerLim.y, upperLim.y), breaks = gridbreaks.y)
# ---------------------------------------------------------
# facet grid?
# ---------------------------------------------------------
if (facet.grid) baseplot <- baseplot + facet_grid( ~grp)
# ---------------------------------------------------------
# set geom colors
# ---------------------------------------------------------
baseplot <- sj.setGeomColors(baseplot, geom.colors, length(lLabels), TRUE, lLabels) + guides(fill = FALSE)
# ---------------------------------------------------------
# Check whether ggplot object should be returned or plotted
# ---------------------------------------------------------
if (printPlot) print(baseplot)
# concatenate plot object
plotlist[[length(plotlist) + 1]] <- baseplot
dflist[[length(dflist) + 1]] <- intdf
}
}
# -------------------------------------
# return results
# -------------------------------------
invisible(structure(class = c("sjPlot", "sjpemmint"),
list(plot.list = plotlist,
data.list = dflist)))
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/thesis.R
\name{thesis_epub}
\alias{thesis_epub}
\title{Creates an R Markdown epub Thesis document}
\usage{
thesis_epub(...)
}
\arguments{
\item{...}{additional arguments passed to the bookdown::epub_book function}
}
\value{
A ebook version of the thesis
}
\description{
This is a function called in output in the YAML of the driver Rmd file
to specify the creation of a epub version of the thesis.
}
\examples{
\dontrun{
output: thesisdown::thesis_epub
}
}
| /man/thesis_epub.Rd | permissive | dr-harper/sotonthesis | R | false | true | 536 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/thesis.R
\name{thesis_epub}
\alias{thesis_epub}
\title{Creates an R Markdown epub Thesis document}
\usage{
thesis_epub(...)
}
\arguments{
\item{...}{additional arguments passed to the bookdown::epub_book function}
}
\value{
A ebook version of the thesis
}
\description{
This is a function called in output in the YAML of the driver Rmd file
to specify the creation of a epub version of the thesis.
}
\examples{
\dontrun{
output: thesisdown::thesis_epub
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/build.R
\name{build_pkg}
\alias{build_pkg}
\alias{get_pkg_tar_ball}
\title{Build R tar gz file}
\usage{
build_pkg()
get_pkg_tar_ball()
}
\description{
Builds and sets the PKG_TARBALL & PKG_TARBALL_PATH variables.
}
| /inst/good_pkg/man/build_pkg.Rd | no_license | jumpingrivers/inteRgrate | R | false | true | 294 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/build.R
\name{build_pkg}
\alias{build_pkg}
\alias{get_pkg_tar_ball}
\title{Build R tar gz file}
\usage{
build_pkg()
get_pkg_tar_ball()
}
\description{
Builds and sets the PKG_TARBALL & PKG_TARBALL_PATH variables.
}
|
# modification on git from copied files
#setMethod("dim", "segMeth", function(x) {
# nrows <- c(nrow(x@Cs), nrow(x@Ts), length(x@coordinates), nrow(x@locLikelihoods))
# ncols <- c(length(x@replicates), ncol(x@Cs), ncol(x@Ts))
# if(any(nrows != 0)) nrow <- nrows[nrows != 0][1] else nrow <- 0
# if(any(ncols != 0)) ncol <- ncols[ncols != 0][1] else ncol <- 0
# c(nrow, ncol)
#})
#setMethod("show", "segMeth", function(object) {
# callNextMethod(object)
# cat('\nSlot "nonconversion":\n')
# print(object@nonconversion)
# cat('\nSlot "Cs":\n')
# .printIRangesMatrix(round(object@Cs))
# cat('\nSlot "Ts":\n')
# .printIRangesMatrix(round(object@Ts))
# })
#setMethod("[", "segMeth", function(x, i, j, ..., drop = FALSE) {
# x <- callNextMethod(x, i, j, ..., drop = FALSE)
# if(!missing(j)) {
# j <- as.vector(j)
# if(nrow(x@Cs) > 0) x@Cs <- x@Cs[,j, drop = FALSE]
# if(nrow(x@Ts) > 0) x@Ts <- x@Ts[,j, drop = FALSE]
# if(length(x@nonconversion)) x@nonconversion <- x@nonconversion[j]
# }
# if(!missing(i)) {
# i <- as.vector(i)
# if(nrow(x@Cs) > 0) x@Cs <- x@Cs[i,, drop = FALSE]
# if(nrow(x@Ts) > 0) x@Ts <- x@Ts[i,, drop = FALSE]
# }
# x
#})
| /R/segMeth-accessors.R | no_license | tjh48/segmentSeq | R | false | false | 1,188 | r | # modification on git from copied files
#setMethod("dim", "segMeth", function(x) {
# nrows <- c(nrow(x@Cs), nrow(x@Ts), length(x@coordinates), nrow(x@locLikelihoods))
# ncols <- c(length(x@replicates), ncol(x@Cs), ncol(x@Ts))
# if(any(nrows != 0)) nrow <- nrows[nrows != 0][1] else nrow <- 0
# if(any(ncols != 0)) ncol <- ncols[ncols != 0][1] else ncol <- 0
# c(nrow, ncol)
#})
#setMethod("show", "segMeth", function(object) {
# callNextMethod(object)
# cat('\nSlot "nonconversion":\n')
# print(object@nonconversion)
# cat('\nSlot "Cs":\n')
# .printIRangesMatrix(round(object@Cs))
# cat('\nSlot "Ts":\n')
# .printIRangesMatrix(round(object@Ts))
# })
#setMethod("[", "segMeth", function(x, i, j, ..., drop = FALSE) {
# x <- callNextMethod(x, i, j, ..., drop = FALSE)
# if(!missing(j)) {
# j <- as.vector(j)
# if(nrow(x@Cs) > 0) x@Cs <- x@Cs[,j, drop = FALSE]
# if(nrow(x@Ts) > 0) x@Ts <- x@Ts[,j, drop = FALSE]
# if(length(x@nonconversion)) x@nonconversion <- x@nonconversion[j]
# }
# if(!missing(i)) {
# i <- as.vector(i)
# if(nrow(x@Cs) > 0) x@Cs <- x@Cs[i,, drop = FALSE]
# if(nrow(x@Ts) > 0) x@Ts <- x@Ts[i,, drop = FALSE]
# }
# x
#})
|
library(RODBC)
library(MASS)
channel <- odbcDriverConnect('driver={SQL Server};server=79.175.181.141,11296;database=AffiliateNetwork;uid=mohammadi;pwd=123456789;')
df_total <- data.frame()
sleep_for_a_minute <- function() { Sys.sleep(60) }
start_time <- Sys.time()
for (n in 1:100){
offers <- sqlQuery(channel,paste(
"SELECT
offerId,
AffiliatorId,
SiteId,
TrackingTime,
Province,
SUBSTRING(ReferrerUrl,
(CASE WHEN CHARINDEX('//', ReferrerUrl)= 0 THEN 1 ELSE CHARINDEX('//', ReferrerUrl) + 2 END),
CASE
WHEN CHARINDEX('/', ReferrerUrl, CHARINDEX('//', ReferrerUrl) + 2) > 0 THEN CHARINDEX('/', ReferrerUrl, CHARINDEX('//', ReferrerUrl) + 2) - (CASE WHEN CHARINDEX('//', ReferrerUrl)= 0 THEN 1 ELSE CHARINDEX('//', ReferrerUrl) + 2 END)
WHEN CHARINDEX('?', ReferrerUrl, CHARINDEX('//', ReferrerUrl) + 2) > 0 THEN CHARINDEX('?', ReferrerUrl, CHARINDEX('//', ReferrerUrl) + 2) - (CASE WHEN CHARINDEX('//', ReferrerUrl)= 0 THEN 1 ELSE CHARINDEX('//', ReferrerUrl) + 2 END)
ELSE LEN(ReferrerUrl)
END
) AS 'ReferrerUrl'
FROM offervisitor
ORDER BY TrackingTime DESC
OFFSET ", n, " ROWS
FETCH NEXT 50000 ROWS ONLY"))
df<- offers
df_total <- rbind(df_total,df)
n <- (n-1)*50000
}
sleep_for_a_minute()
end_time <- Sys.time()
end_time - start_time
total.offer <- sqlQuery(channel, paste("select top 10 * from offervisitor"))
fit <- lm(medv~., data= )
| /Deema2.R | no_license | sinnsalagrim/R | R | false | false | 2,111 | r | library(RODBC)
library(MASS)
channel <- odbcDriverConnect('driver={SQL Server};server=79.175.181.141,11296;database=AffiliateNetwork;uid=mohammadi;pwd=123456789;')
df_total <- data.frame()
sleep_for_a_minute <- function() { Sys.sleep(60) }
start_time <- Sys.time()
for (n in 1:100){
offers <- sqlQuery(channel,paste(
"SELECT
offerId,
AffiliatorId,
SiteId,
TrackingTime,
Province,
SUBSTRING(ReferrerUrl,
(CASE WHEN CHARINDEX('//', ReferrerUrl)= 0 THEN 1 ELSE CHARINDEX('//', ReferrerUrl) + 2 END),
CASE
WHEN CHARINDEX('/', ReferrerUrl, CHARINDEX('//', ReferrerUrl) + 2) > 0 THEN CHARINDEX('/', ReferrerUrl, CHARINDEX('//', ReferrerUrl) + 2) - (CASE WHEN CHARINDEX('//', ReferrerUrl)= 0 THEN 1 ELSE CHARINDEX('//', ReferrerUrl) + 2 END)
WHEN CHARINDEX('?', ReferrerUrl, CHARINDEX('//', ReferrerUrl) + 2) > 0 THEN CHARINDEX('?', ReferrerUrl, CHARINDEX('//', ReferrerUrl) + 2) - (CASE WHEN CHARINDEX('//', ReferrerUrl)= 0 THEN 1 ELSE CHARINDEX('//', ReferrerUrl) + 2 END)
ELSE LEN(ReferrerUrl)
END
) AS 'ReferrerUrl'
FROM offervisitor
ORDER BY TrackingTime DESC
OFFSET ", n, " ROWS
FETCH NEXT 50000 ROWS ONLY"))
df<- offers
df_total <- rbind(df_total,df)
n <- (n-1)*50000
}
sleep_for_a_minute()
end_time <- Sys.time()
end_time - start_time
total.offer <- sqlQuery(channel, paste("select top 10 * from offervisitor"))
fit <- lm(medv~., data= )
|
####################################################################################
### Calcular área de um polígono shp
### Dados de uso do solo: ANA 2016
### Por: Danielle de Oliveira Moreira
### criado em: 11/06/2020
### Atualizado em:
####################################################################################
## load rgdal package
library(rgdal)
library(raster)
## load your polygone shapefile
uso_solo <- readOGR(dsn = "F:/Dados_ArcGis/BHRD/Uso_Ocupao_do_Solo_Bacia_do_Rio_Doce", layer = "uph_sao_jose_uso")
#################################
#Adicionar uma nova coluna do arquivo acima ($area2), para calcular a área do polígono (area(uso_solo))
uso_solo$area2 <- area(uso_solo)
head(uso_solo)
#Salvar o arquivo com a nova coluna
writeOGR(uso_solo, "F:/Dados_ArcGis/BHRD/Uso_Ocupao_do_Solo_Bacia_do_Rio_Doce", "uph_sao_jose_uso2", driver="ESRI Shapefile")
| /R/00_calcular_area_poligono.R | no_license | daphnespier/uso_ANA | R | false | false | 885 | r | ####################################################################################
### Calcular área de um polígono shp
### Dados de uso do solo: ANA 2016
### Por: Danielle de Oliveira Moreira
### criado em: 11/06/2020
### Atualizado em:
####################################################################################
## load rgdal package
library(rgdal)
library(raster)
## load your polygone shapefile
uso_solo <- readOGR(dsn = "F:/Dados_ArcGis/BHRD/Uso_Ocupao_do_Solo_Bacia_do_Rio_Doce", layer = "uph_sao_jose_uso")
#################################
#Adicionar uma nova coluna do arquivo acima ($area2), para calcular a área do polígono (area(uso_solo))
uso_solo$area2 <- area(uso_solo)
head(uso_solo)
#Salvar o arquivo com a nova coluna
writeOGR(uso_solo, "F:/Dados_ArcGis/BHRD/Uso_Ocupao_do_Solo_Bacia_do_Rio_Doce", "uph_sao_jose_uso2", driver="ESRI Shapefile")
|
#' kmr_read_tab
#'
#' Read a file in tab format.
#'
#' @param afile file name (obligatory extension .tab)
#' @import vroom
#'
#' @return data.frame with kmers as DNAStringSet
#' @export
#' @family kmer_core
#' @examples
#' if (interactive()) {
#'
#' fa <- system.file("testdata/phix174_m-pe_w_err_5k_30q.fastq.gz",
#' package = "kmerize")
#'
#' out_file <- file.path(tempdir(), "phx")
#' k <- 7
#' kmr_count(fa, out_file, k = k, f = "q")
#'
#' kmr_write_tab(out_file)
#'
#' dat <- kmr_read_tab(out_file)
#' head(dat)
#'
#' }
kmr_read_tab <- function(afile) {
afile <- assure_file_ext(afile, ".tab.gz")
stopifnot(file.exists(afile))
kmers <- vroom::vroom(afile, col_types = "cn", col_names = c("kmer", "count"))
return(kmers)
}
| /R/kmr_read_tab.R | no_license | c5sire/kmerize | R | false | false | 759 | r | #' kmr_read_tab
#'
#' Read a file in tab format.
#'
#' @param afile file name (obligatory extension .tab)
#' @import vroom
#'
#' @return data.frame with kmers as DNAStringSet
#' @export
#' @family kmer_core
#' @examples
#' if (interactive()) {
#'
#' fa <- system.file("testdata/phix174_m-pe_w_err_5k_30q.fastq.gz",
#' package = "kmerize")
#'
#' out_file <- file.path(tempdir(), "phx")
#' k <- 7
#' kmr_count(fa, out_file, k = k, f = "q")
#'
#' kmr_write_tab(out_file)
#'
#' dat <- kmr_read_tab(out_file)
#' head(dat)
#'
#' }
kmr_read_tab <- function(afile) {
afile <- assure_file_ext(afile, ".tab.gz")
stopifnot(file.exists(afile))
kmers <- vroom::vroom(afile, col_types = "cn", col_names = c("kmer", "count"))
return(kmers)
}
|
error_suggestions <- list(
use_new_episodes = list(
text = c("You can use the :new_episodes record mode to allow vcr to",
"record this new request to the existing cassette"),
url = "https://books.ropensci.org/http-testing/record-modes.html#new_episodes"
),
delete_cassette_for_once = list(
text = c("The current record mode ('once') does not allow new requests to be recorded",
"to a previously recorded cassette. You can delete the cassette file and re-run",
"your tests to allow the cassette to be recorded with this request"),
url = "https://books.ropensci.org/http-testing/record-modes.html#once"
),
deal_with_none = list(
text = c("The current record mode ('none') does not allow requests to be recorded. You",
"can temporarily change the record mode to :once, delete the cassette file ",
"and re-run your tests to allow the cassette to be recorded with this request"),
url = "https://books.ropensci.org/http-testing/record-modes.html#none"
),
use_a_cassette = list(
text = c("If you want vcr to record this request and play it back during future test",
"runs, you should wrap your test (or this portion of your test) in a",
"`vcr::use_cassette` block"),
url = "https://books.ropensci.org/http-testing/intro"
),
allow_http_connections_when_no_cassette = list(
text = c("If you only want vcr to handle requests made while a cassette is in use,",
"configure `allow_http_connections_when_no_cassette = TRUE`. vcr will",
"ignore this request since it is made when there is no cassette"),
url = "https://books.ropensci.org/http-testing/vcr-configuration#allow-http-connections-when-no-cassette"
),
ignore_request = list(
text = c("If you want vcr to ignore this request (and others like it), you can",
"set an `ignore_request` function"),
url = "https://books.ropensci.org/http-testing/vcr-configuration#config-ignore-requests"
),
allow_playback_repeats = list(
text = c("The cassette contains an HTTP interaction that matches this request,",
"but it has already been played back. If you wish to allow a single HTTP",
"interaction to be played back multiple times, set the `allow_playback_repeats`",
"cassette option"),
url = "https://books.ropensci.org/http-testing/request-matching#playback-repeats"
),
match_requests_on = list(
text = c("The cassette contains %s not been",
"played back. If your request is non-deterministic, you may need to",
"change your 'match_requests_on' cassette option to be more lenient",
"or use a custom request matcher to allow it to match"),
url = "https://books.ropensci.org/http-testing/request-matching"
),
try_debug_logger = list(
text = c("If you're surprised vcr is raising this error",
"and want insight about how vcr attempted to handle the request,",
"you can use 'logging' to see more details"),
url = "https://books.ropensci.org/http-testing/debugging-your-tests-that-use-vcr.html#logging-1"
)
)
| /R/error_suggestions.R | permissive | ropensci/vcr | R | false | false | 3,048 | r | error_suggestions <- list(
use_new_episodes = list(
text = c("You can use the :new_episodes record mode to allow vcr to",
"record this new request to the existing cassette"),
url = "https://books.ropensci.org/http-testing/record-modes.html#new_episodes"
),
delete_cassette_for_once = list(
text = c("The current record mode ('once') does not allow new requests to be recorded",
"to a previously recorded cassette. You can delete the cassette file and re-run",
"your tests to allow the cassette to be recorded with this request"),
url = "https://books.ropensci.org/http-testing/record-modes.html#once"
),
deal_with_none = list(
text = c("The current record mode ('none') does not allow requests to be recorded. You",
"can temporarily change the record mode to :once, delete the cassette file ",
"and re-run your tests to allow the cassette to be recorded with this request"),
url = "https://books.ropensci.org/http-testing/record-modes.html#none"
),
use_a_cassette = list(
text = c("If you want vcr to record this request and play it back during future test",
"runs, you should wrap your test (or this portion of your test) in a",
"`vcr::use_cassette` block"),
url = "https://books.ropensci.org/http-testing/intro"
),
allow_http_connections_when_no_cassette = list(
text = c("If you only want vcr to handle requests made while a cassette is in use,",
"configure `allow_http_connections_when_no_cassette = TRUE`. vcr will",
"ignore this request since it is made when there is no cassette"),
url = "https://books.ropensci.org/http-testing/vcr-configuration#allow-http-connections-when-no-cassette"
),
ignore_request = list(
text = c("If you want vcr to ignore this request (and others like it), you can",
"set an `ignore_request` function"),
url = "https://books.ropensci.org/http-testing/vcr-configuration#config-ignore-requests"
),
allow_playback_repeats = list(
text = c("The cassette contains an HTTP interaction that matches this request,",
"but it has already been played back. If you wish to allow a single HTTP",
"interaction to be played back multiple times, set the `allow_playback_repeats`",
"cassette option"),
url = "https://books.ropensci.org/http-testing/request-matching#playback-repeats"
),
match_requests_on = list(
text = c("The cassette contains %s not been",
"played back. If your request is non-deterministic, you may need to",
"change your 'match_requests_on' cassette option to be more lenient",
"or use a custom request matcher to allow it to match"),
url = "https://books.ropensci.org/http-testing/request-matching"
),
try_debug_logger = list(
text = c("If you're surprised vcr is raising this error",
"and want insight about how vcr attempted to handle the request,",
"you can use 'logging' to see more details"),
url = "https://books.ropensci.org/http-testing/debugging-your-tests-that-use-vcr.html#logging-1"
)
)
|
#'@title Create a Trie
#'@description \code{create_trie} creates a trie (a key-value store optimised
#'for matching) out of a provided character vector of keys, and a numeric,
#'character, logical or integer vector of values (both the same length).
#'
#'@param keys a character vector containing the keys for the trie.
#'
#'@param values an atomic vector of any type, containing the values to pair with
#'\code{keys}. Must be the same length as \code{keys}.
#'
#'@return a `trie` object.
#'
#'@seealso \code{\link{trie_add}} and \code{\link{trie_remove}} for adding to and removing
#'from tries after their creation, and \code{\link{longest_match}} and other match functions
#'for matching values against the keys of a created trie.
#'
#'@examples
#'# An integer trie
#'int_trie <- create_trie(keys = "foo", values = 1)
#'
#'# A string trie
#'str_trie <- create_trie(keys = "foo", values = "bar")
#'
#'@export
create_trie <- function(keys, values){
stopifnot(length(keys) == length(values))
stopifnot(is.character(keys))
output <- NULL
output_classes <- c("trie", NA)
switch(class(values)[1],
"character" = {
output <- radix_create_string(keys, values)
output_classes[2] <- "string_trie"
},
"integer" = {
output <- radix_create_integer(keys, values)
output_classes[2] <- "integer_trie"
},
"numeric" = {
output <- radix_create_numeric(keys, values)
output_classes[2] <- "numeric_trie"
},
"logical" = {
output <- radix_create_logical(keys, values)
output_classes[2] <- "logical_trie"
},
stop("'values' must be a numeric, integer, character or logical vector"))
class(output) <- c(class(output), output_classes)
return(output)
} | /R/create.R | permissive | okeyes-r7/triebeard | R | false | false | 1,810 | r | #'@title Create a Trie
#'@description \code{create_trie} creates a trie (a key-value store optimised
#'for matching) out of a provided character vector of keys, and a numeric,
#'character, logical or integer vector of values (both the same length).
#'
#'@param keys a character vector containing the keys for the trie.
#'
#'@param values an atomic vector of any type, containing the values to pair with
#'\code{keys}. Must be the same length as \code{keys}.
#'
#'@return a `trie` object.
#'
#'@seealso \code{\link{trie_add}} and \code{\link{trie_remove}} for adding to and removing
#'from tries after their creation, and \code{\link{longest_match}} and other match functions
#'for matching values against the keys of a created trie.
#'
#'@examples
#'# An integer trie
#'int_trie <- create_trie(keys = "foo", values = 1)
#'
#'# A string trie
#'str_trie <- create_trie(keys = "foo", values = "bar")
#'
#'@export
create_trie <- function(keys, values){
stopifnot(length(keys) == length(values))
stopifnot(is.character(keys))
output <- NULL
output_classes <- c("trie", NA)
switch(class(values)[1],
"character" = {
output <- radix_create_string(keys, values)
output_classes[2] <- "string_trie"
},
"integer" = {
output <- radix_create_integer(keys, values)
output_classes[2] <- "integer_trie"
},
"numeric" = {
output <- radix_create_numeric(keys, values)
output_classes[2] <- "numeric_trie"
},
"logical" = {
output <- radix_create_logical(keys, values)
output_classes[2] <- "logical_trie"
},
stop("'values' must be a numeric, integer, character or logical vector"))
class(output) <- c(class(output), output_classes)
return(output)
} |
# devtools::load_all("/Users/ricard/mofa/MOFAtools")
devtools::load_all("/homes/ricard/mofa/MOFAtools")
library(GFA)
library(data.table)
library(purrr)
data("CLL_data")
CLL_data <- lapply(CLL_data,t)
CLL_data_norm <- normalizeData(CLL_data, type="center")
opts <- getDefaultOpts()
ptm <- proc.time()
gfa_model_tmp <- gfa(CLL_data_norm$train, opts, K = 50)
gfa_time <- proc.time() - ptm
# Parse W
D <- sapply(CLL_data_norm$train,ncol)
M <- length(CLL_data)
tmp <- rep(NA,sum(D))
for (m in 1:M) {
if (m==1) {
tmp[1:D[m]] <- m
} else {
tmp[(cumsum(D)[m-1]+1):cumsum(D)[m]] <- m
}
}
W <- gfa_model_tmp$W
W <- cbind(W,tmp)
W_split <- lapply( split( W[,1:ncol(W)-1], W[,ncol(W)] ), matrix, ncol=ncol(W)-1)
names(W_split) <- names(CLL_data)
# Parse Alpha
Alpha_split <- split(gfa_model_tmp$Z, row(gfa_model_tmp$Z))
names(Alpha_split) <- names(CLL_data)
# Create a MOFA object
gfa_model <- createMOFAobject(map(CLL_data_norm$train,t))
gfa_model@ModelOpts$learnIntercept <- F
gfa_model@TrainData <- map(CLL_data_norm$train,t)
gfa_model@Expectations <- list(
"Y"=CLL_data_norm$train,
"Z"=gfa_model_tmp$X,
"W"=W_split,
"Alpha"=Alpha_split
)
gfa_model@Dimensions[["K"]] <- ncol(gfa_model_tmp$X)
viewNames(gfa_model) <- names(CLL_data)
sampleNames(gfa_model) <- as.character(1:nrow(CLL_data[[1]]))
factorNames(gfa_model) <- as.character(1:ncol(gfa_model_tmp$X))
gfa_model@Status <- "trained"
gfa_model@ModelOpts$likelihood <- c("gaussian","gaussian","gaussian","gaussian")
names(gfa_model@ModelOpts$likelihood) <- names(CLL_data)
# Sort by variance explained
r2 <- rowSums(calculateVarianceExplained(gfa_model)$R2PerFactor)
order_factors <- c(names(r2)[order(r2, decreasing = T)])
gfa_model <- subsetFactors(gfa_model,order_factors)
sampleNames(gfa_model) <- rownames(CLL_data[[1]])
factorNames(gfa_model) <- as.character(1:ncol(gfa_model_tmp$X))
featureNames(gfa_model) <- lapply(CLL_data,colnames)
saveRDS(gfa_model, "/homes/ricard/mofa_rebuttal/gfa_comparison/cll/out/gfa20.rds")
# write.table(data.frame(time=gfa_time), file="/homes/ricard/mofa_rebuttal/gfa_comparison/cll/out/gfatime.txt", col.names=F, row.names=F, quote=F)
| /GFA_iCluster_comparisons/CLL/run_gfa.R | no_license | tqh003/MOFA_analysis | R | false | false | 2,199 | r | # devtools::load_all("/Users/ricard/mofa/MOFAtools")
devtools::load_all("/homes/ricard/mofa/MOFAtools")
library(GFA)
library(data.table)
library(purrr)
data("CLL_data")
CLL_data <- lapply(CLL_data,t)
CLL_data_norm <- normalizeData(CLL_data, type="center")
opts <- getDefaultOpts()
ptm <- proc.time()
gfa_model_tmp <- gfa(CLL_data_norm$train, opts, K = 50)
gfa_time <- proc.time() - ptm
# Parse W
D <- sapply(CLL_data_norm$train,ncol)
M <- length(CLL_data)
tmp <- rep(NA,sum(D))
for (m in 1:M) {
if (m==1) {
tmp[1:D[m]] <- m
} else {
tmp[(cumsum(D)[m-1]+1):cumsum(D)[m]] <- m
}
}
W <- gfa_model_tmp$W
W <- cbind(W,tmp)
W_split <- lapply( split( W[,1:ncol(W)-1], W[,ncol(W)] ), matrix, ncol=ncol(W)-1)
names(W_split) <- names(CLL_data)
# Parse Alpha
Alpha_split <- split(gfa_model_tmp$Z, row(gfa_model_tmp$Z))
names(Alpha_split) <- names(CLL_data)
# Create a MOFA object
gfa_model <- createMOFAobject(map(CLL_data_norm$train,t))
gfa_model@ModelOpts$learnIntercept <- F
gfa_model@TrainData <- map(CLL_data_norm$train,t)
gfa_model@Expectations <- list(
"Y"=CLL_data_norm$train,
"Z"=gfa_model_tmp$X,
"W"=W_split,
"Alpha"=Alpha_split
)
gfa_model@Dimensions[["K"]] <- ncol(gfa_model_tmp$X)
viewNames(gfa_model) <- names(CLL_data)
sampleNames(gfa_model) <- as.character(1:nrow(CLL_data[[1]]))
factorNames(gfa_model) <- as.character(1:ncol(gfa_model_tmp$X))
gfa_model@Status <- "trained"
gfa_model@ModelOpts$likelihood <- c("gaussian","gaussian","gaussian","gaussian")
names(gfa_model@ModelOpts$likelihood) <- names(CLL_data)
# Sort by variance explained
r2 <- rowSums(calculateVarianceExplained(gfa_model)$R2PerFactor)
order_factors <- c(names(r2)[order(r2, decreasing = T)])
gfa_model <- subsetFactors(gfa_model,order_factors)
sampleNames(gfa_model) <- rownames(CLL_data[[1]])
factorNames(gfa_model) <- as.character(1:ncol(gfa_model_tmp$X))
featureNames(gfa_model) <- lapply(CLL_data,colnames)
saveRDS(gfa_model, "/homes/ricard/mofa_rebuttal/gfa_comparison/cll/out/gfa20.rds")
# write.table(data.frame(time=gfa_time), file="/homes/ricard/mofa_rebuttal/gfa_comparison/cll/out/gfatime.txt", col.names=F, row.names=F, quote=F)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getTriosDfFromCsv.R
\name{getTriosDfFromCsv}
\alias{getTriosDfFromCsv}
\title{Creates a dataframe with a set of potential parents with one row for each
offspring from CSV file.}
\usage{
getTriosDfFromCsv(triosFile)
}
\arguments{
\item{triosFile}{Character vector of length one having the path of the
trios file.}
}
\value{
A dataframe with one row for each offspring where the potential dams
and sires are the second and third column respectively. The \code{dam} and
\code{sire} columns contain the animal IDs in a single character string
separated as they were in the original file.
}
\description{
Creates a dataframe with a set of potential parents with one row for each
offspring from CSV file.
}
| /man/getTriosDfFromCsv.Rd | permissive | rmsharp/parentfindr | R | false | true | 779 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getTriosDfFromCsv.R
\name{getTriosDfFromCsv}
\alias{getTriosDfFromCsv}
\title{Creates a dataframe with a set of potential parents with one row for each
offspring from CSV file.}
\usage{
getTriosDfFromCsv(triosFile)
}
\arguments{
\item{triosFile}{Character vector of length one having the path of the
trios file.}
}
\value{
A dataframe with one row for each offspring where the potential dams
and sires are the second and third column respectively. The \code{dam} and
\code{sire} columns contain the animal IDs in a single character string
separated as they were in the original file.
}
\description{
Creates a dataframe with a set of potential parents with one row for each
offspring from CSV file.
}
|
#' @description
#' `new_pillar_shaft_simple()` provides an implementation of the `pillar_shaft`
#' class suitable for output that has a fixed formatting, which will be
#' truncated with a continuation character (ellipsis or `~`) if it doesn't fit
#' the available width.
#' By default, the required width is computed from the natural width of the
#' `formatted` argument.
#'
#' @details
#' The `formatted` argument may also contain ANSI escapes to change color
#' or other attributes of the text, see [crayon].
#'
#' @param formatted An object coercible to [character].
#' @param align Alignment of the column.
#' @param na String to use as `NA` value, defaults to `"NA"` styled with
#' [style_na()] with fallback if color is not available.
#' @param na_indent Indentation of `NA` values.
#' @export
#' @rdname new_pillar_shaft
new_pillar_shaft_simple <- function(formatted, ..., width = NULL, align = "left",
min_width = NULL, na = NULL, na_indent = 0L) {
if (is.null(width)) {
width <- get_max_extent(as.character(formatted))
}
if (is.null(na)) {
na <- pillar_na()
}
new_pillar_shaft(
list(formatted),
...,
width = width,
min_width = min_width,
align = align,
na = na,
na_indent = na_indent,
class = "pillar_shaft_simple"
)
}
#' @export
format.pillar_shaft_simple <- function(x, width, ...) {
align <- attr(x, "align")
desired_width <- get_width(x)
shaft <- as.character(x[[1]])
if (width < desired_width) {
shaft <- str_trunc(shaft, width)
}
shaft[is.na(shaft)] <- paste0(
strrep(" ", attr(x, "na_indent")),
attr(x, "na")
)
new_ornament(shaft, width = width, align = align)
}
| /R/shaft-simple.R | permissive | markfairbanks/pillar | R | false | false | 1,701 | r | #' @description
#' `new_pillar_shaft_simple()` provides an implementation of the `pillar_shaft`
#' class suitable for output that has a fixed formatting, which will be
#' truncated with a continuation character (ellipsis or `~`) if it doesn't fit
#' the available width.
#' By default, the required width is computed from the natural width of the
#' `formatted` argument.
#'
#' @details
#' The `formatted` argument may also contain ANSI escapes to change color
#' or other attributes of the text, see [crayon].
#'
#' @param formatted An object coercible to [character].
#' @param align Alignment of the column.
#' @param na String to use as `NA` value, defaults to `"NA"` styled with
#' [style_na()] with fallback if color is not available.
#' @param na_indent Indentation of `NA` values.
#' @export
#' @rdname new_pillar_shaft
new_pillar_shaft_simple <- function(formatted, ..., width = NULL, align = "left",
min_width = NULL, na = NULL, na_indent = 0L) {
if (is.null(width)) {
width <- get_max_extent(as.character(formatted))
}
if (is.null(na)) {
na <- pillar_na()
}
new_pillar_shaft(
list(formatted),
...,
width = width,
min_width = min_width,
align = align,
na = na,
na_indent = na_indent,
class = "pillar_shaft_simple"
)
}
#' @export
format.pillar_shaft_simple <- function(x, width, ...) {
align <- attr(x, "align")
desired_width <- get_width(x)
shaft <- as.character(x[[1]])
if (width < desired_width) {
shaft <- str_trunc(shaft, width)
}
shaft[is.na(shaft)] <- paste0(
strrep(" ", attr(x, "na_indent")),
attr(x, "na")
)
new_ornament(shaft, width = width, align = align)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/npCBPS.R
\name{npCBPS}
\alias{npCBPS}
\title{Non-Parametric Covariate Balancing Propensity Score (npCBPS) Estimation}
\usage{
npCBPS(formula, data, na.action, corprior = 0.01, print.level = 0, ...)
}
\arguments{
\item{formula}{An object of class \code{formula} (or one that can be coerced
to that class): a symbolic description of the model to be fitted.}
\item{data}{An optional data frame, list or environment (or object coercible
by as.data.frame to a data frame) containing the variables in the model. If
not found in data, the variables are taken from \code{environment(formula)},
typically the environment from which \code{CBPS} is called.}
\item{na.action}{A function which indicates what should happen when the data
contain NAs. The default is set by the na.action setting of options, and is
na.fail if that is unset.}
\item{corprior}{Prior hyperparameter controlling the expected amount of
correlation between each covariate and the treatment. Specifically, the
amount of correlation between the k-dimensional covariates, X, and the
treatment T after weighting is assumed to have prior distribution
MVN(0,sigma^2 I_k). We conceptualize sigma^2 as a tuning parameter to be
used pragmatically. It's default of 0.1 ensures that the balance constraints
are not too harsh, and that a solution is likely to exist. Once the
algorithm works at such a high value of sigma^2, the user may wish to
attempt values closer to 0 to get finer balance.}
\item{print.level}{Controls verbosity of output to the screen while npCBPS
runs. At the default of print.level=0, little output is produced. It
print.level>0, it outputs diagnostics including the log posterior
(log_post), the log empirical likelihood associated with the weights
(log_el), and the log prior probability of the (weighted) correlation of
treatment with the covariates.}
\item{...}{Other parameters to be passed.}
}
\value{
\item{weights}{The optimal weights} \item{y}{The treatment vector
used} \item{x}{The covariate matrix} \item{model}{The model frame}
\item{call}{The matched call} \item{formula}{The formula supplied}
\item{data}{The data argument} \item{log.p.eta}{The log density for the
(weighted) correlation of the covariates with the treatment, given the
choice of prior (\code{corprior})} \item{log.el}{The log empirical
likelihood of the observed data at the chosen set of IPW weights.}
\item{eta}{A vector describing the correlation between the treatment and
each covariate on the weighted data at the solution.} \item{sumw0}{The sum
of weights, provided as a check on convergence. This is always 1 when
convergence occurs unproblematically. If it differs from 1 substantially, no
solution perfectly satisfying the conditions was found, and the user may
consider a larger value of \code{corprior}.}
}
\description{
\code{npCBPS} is a method to estimate weights interpretable as (stabilized)
inverse generlized propensity score weights, w_i = f(T_i)/f(T_i|X), without
actually estimating a model for the treatment to arrive at f(T|X) estimates.
In brief, this works by maximizing the empirical likelihood of observing the
values of treatment and covariates that were observed, while constraining
the weights to be those that (a) ensure balance on the covariates, and (b)
maintain the original means of the treatment and covariates.
In the continuous treatment context, this balance on covariates means zero
correlation of each covariate with the treatment. In binary or categorical
treatment contexts, balance on covariates implies equal means on the
covariates for observations at each level of the treatment. When given a
numeric treatment the software handles it continuously. To handle the
treatment as binary or categorical is must be given as a factor.
Furthermore, we apply a Bayesian variant that allows the correlation of each
covariate with the treatment to be slightly non-zero, as might be expected
in a a given finite sample.
Estimates non-parametric covariate balancing propensity score weights.
### @aliases npCBPS npCBPS.fit
}
\examples{
##Generate data
data(LaLonde)
## Restricted two only two covariates so that it will run quickly.
## Performance will remain good if the full LaLonde specification is used
fit <- npCBPS(treat ~ age + educ, data = LaLonde, corprior=.1/nrow(LaLonde))
plot(fit)
}
\references{
Fong, Christian, Chad Hazlett, and Kosuke Imai. ``Parametric
and Nonparametric Covariate Balancing Propensity Score for General Treatment
Regimes.'' Unpublished Manuscript.
\url{http://imai.princeton.edu/research/files/CBGPS.pdf}
}
\author{
Christian Fong, Chad Hazlett, and Kosuke Imai
}
| /man/npCBPS.Rd | no_license | cran/CBPS | R | false | true | 4,780 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/npCBPS.R
\name{npCBPS}
\alias{npCBPS}
\title{Non-Parametric Covariate Balancing Propensity Score (npCBPS) Estimation}
\usage{
npCBPS(formula, data, na.action, corprior = 0.01, print.level = 0, ...)
}
\arguments{
\item{formula}{An object of class \code{formula} (or one that can be coerced
to that class): a symbolic description of the model to be fitted.}
\item{data}{An optional data frame, list or environment (or object coercible
by as.data.frame to a data frame) containing the variables in the model. If
not found in data, the variables are taken from \code{environment(formula)},
typically the environment from which \code{CBPS} is called.}
\item{na.action}{A function which indicates what should happen when the data
contain NAs. The default is set by the na.action setting of options, and is
na.fail if that is unset.}
\item{corprior}{Prior hyperparameter controlling the expected amount of
correlation between each covariate and the treatment. Specifically, the
amount of correlation between the k-dimensional covariates, X, and the
treatment T after weighting is assumed to have prior distribution
MVN(0,sigma^2 I_k). We conceptualize sigma^2 as a tuning parameter to be
used pragmatically. It's default of 0.1 ensures that the balance constraints
are not too harsh, and that a solution is likely to exist. Once the
algorithm works at such a high value of sigma^2, the user may wish to
attempt values closer to 0 to get finer balance.}
\item{print.level}{Controls verbosity of output to the screen while npCBPS
runs. At the default of print.level=0, little output is produced. It
print.level>0, it outputs diagnostics including the log posterior
(log_post), the log empirical likelihood associated with the weights
(log_el), and the log prior probability of the (weighted) correlation of
treatment with the covariates.}
\item{...}{Other parameters to be passed.}
}
\value{
\item{weights}{The optimal weights} \item{y}{The treatment vector
used} \item{x}{The covariate matrix} \item{model}{The model frame}
\item{call}{The matched call} \item{formula}{The formula supplied}
\item{data}{The data argument} \item{log.p.eta}{The log density for the
(weighted) correlation of the covariates with the treatment, given the
choice of prior (\code{corprior})} \item{log.el}{The log empirical
likelihood of the observed data at the chosen set of IPW weights.}
\item{eta}{A vector describing the correlation between the treatment and
each covariate on the weighted data at the solution.} \item{sumw0}{The sum
of weights, provided as a check on convergence. This is always 1 when
convergence occurs unproblematically. If it differs from 1 substantially, no
solution perfectly satisfying the conditions was found, and the user may
consider a larger value of \code{corprior}.}
}
\description{
\code{npCBPS} is a method to estimate weights interpretable as (stabilized)
inverse generlized propensity score weights, w_i = f(T_i)/f(T_i|X), without
actually estimating a model for the treatment to arrive at f(T|X) estimates.
In brief, this works by maximizing the empirical likelihood of observing the
values of treatment and covariates that were observed, while constraining
the weights to be those that (a) ensure balance on the covariates, and (b)
maintain the original means of the treatment and covariates.
In the continuous treatment context, this balance on covariates means zero
correlation of each covariate with the treatment. In binary or categorical
treatment contexts, balance on covariates implies equal means on the
covariates for observations at each level of the treatment. When given a
numeric treatment the software handles it continuously. To handle the
treatment as binary or categorical is must be given as a factor.
Furthermore, we apply a Bayesian variant that allows the correlation of each
covariate with the treatment to be slightly non-zero, as might be expected
in a a given finite sample.
Estimates non-parametric covariate balancing propensity score weights.
### @aliases npCBPS npCBPS.fit
}
\examples{
##Generate data
data(LaLonde)
## Restricted two only two covariates so that it will run quickly.
## Performance will remain good if the full LaLonde specification is used
fit <- npCBPS(treat ~ age + educ, data = LaLonde, corprior=.1/nrow(LaLonde))
plot(fit)
}
\references{
Fong, Christian, Chad Hazlett, and Kosuke Imai. ``Parametric
and Nonparametric Covariate Balancing Propensity Score for General Treatment
Regimes.'' Unpublished Manuscript.
\url{http://imai.princeton.edu/research/files/CBGPS.pdf}
}
\author{
Christian Fong, Chad Hazlett, and Kosuke Imai
}
|
library(tximport)
library(DESeq2)
library(tidyverse)
organism <- "ARABIDOPSIS"
work_dir="/path/to/GLDS-37/processing_scripts/04-05-DESeq2_NormCounts_DGE"
counts_dir="/path/to/GLDS-37/03-RSEM_Counts"
norm_output="/path/to/GLDS-37/04-DESeq2_NormCounts"
DGE_output="/path/to/GLDS-37/05-DESeq2_DGE"
setwd(file.path(work_dir))
study <- read.csv(Sys.glob(file.path(work_dir,"*metadata.csv")), header = TRUE, row.names = 1, stringsAsFactors = TRUE)
##### Group Formatting
if (dim(study) >= 2){
group<-apply(study,1,paste,collapse = " & ") # concatenate multiple factors into one condition per sample
} else{
group<-study[,1]
}
group_names <- paste0("(",group,")",sep = "") # human readable group names
group <- make.names(group) # group naming compatible with R models
names(group) <- group_names
rm(group_names)
##### Contrast Formatting
contrasts <- combn(levels(factor(group)),2) # generate matrix of pairwise group combinations for comparison
contrast.names <- combn(levels(factor(names(group))),2)
contrast.names <- c(paste(contrast.names[1,],contrast.names[2,],sep = "v"),paste(contrast.names[2,],contrast.names[1,],sep = "v")) # format combinations for output table files names
contrasts <- cbind(contrasts,contrasts[c(2,1),])
colnames(contrasts) <- contrast.names
rm(contrast.names)
##### Import Data
files <- list.files(file.path(counts_dir),pattern = ".genes.results", full.names = TRUE)
names(files) <- rownames(study)
txi.rsem <- tximport(files, type = "rsem", txIn = FALSE, txOut = FALSE)
# add 1 to genes with lengths of zero if necessary
txi.rsem$length[txi.rsem$length == 0] <- 1
# make DESeqDataSet object
sampleTable <- data.frame(condition=factor(group))
rownames(sampleTable) <- colnames(txi.rsem$counts)
dds <- DESeqDataSetFromTximport(txi.rsem, sampleTable, ~condition)
# filter out genes with counts of less than 10 in all conditions
keep <- rowSums(counts(dds)) > 10
dds <- dds[keep,]
summary(dds)
#### Perform DESeq analysis
dds_1 <- DESeq(dds)
# export unnormalized and normalized counts
normCounts = as.data.frame(counts(dds_1, normalized=TRUE))
setwd(file.path(norm_output))
write.csv(txi.rsem$counts,file='Unnormalized_Counts.csv')
write.csv(normCounts,file='Normalized_Counts.csv')
write.csv(sampleTable,file='SampleTable.csv')
setwd(file.path(work_dir))
normCounts <- normCounts +1
dds_1_lrt <- DESeq(dds_1, test = "LRT", reduced = ~ 1)
res_1_lrt <- results(dds_1_lrt)
organism_table <- read.csv(file.path(work_dir,"organisms.csv"))
##### Generate annotated DGE tables
library(STRINGdb) # for String database annotations
library(PANTHER.db) # for GOSLIM annotations
ann.dbi <- organism_table$annotations[organism_table$name == organism] # Organism specific gene annotation database
ann.dbi=as.character(ann.dbi)
if(!require(ann.dbi, character.only=TRUE)) {
BiocManager::install(ann.dbi, ask = FALSE)
library(ann.dbi, character.only=TRUE)
}
## for normalized counts
# start output table with normalized sample expression values
output_table_1 <- normCounts
reduced_output_table_1 <- normCounts
##### Iterate through Wald Tests
for (i in 1:dim(contrasts)[2]){
res_1 <- results(dds_1, contrast=c("condition",contrasts[1,i],contrasts[2,i]))
res_1 <- as.data.frame(res_1@listData)[,c(2,5,6)]
colnames(res_1)<-c(paste0("Log2fc_",colnames(contrasts)[i]),paste0("P.value_",colnames(contrasts)[i]),paste0("Adj.p.value_",colnames(contrasts)[i]))
output_table_1<-cbind(output_table_1,res_1)
reduced_output_table_1 <- cbind(reduced_output_table_1,res_1)
rm(res_1)
}
# Gene Annotation columns
keytype = "TAIR"
annot <- data.frame(rownames(output_table_1), stringsAsFactors = FALSE)
colnames(annot)[1]<-keytype
if ("SYMBOL" %in% columns(eval(parse(text = ann.dbi),env=.GlobalEnv))){
annot$SYMBOL<-mapIds(eval(parse(text = ann.dbi),env=.GlobalEnv),keys = rownames(output_table_1),keytype = keytype, column = "SYMBOL", multiVals = "first")
}
if ("GENENAME" %in% columns(eval(parse(text = ann.dbi),env=.GlobalEnv))){
annot$GENENAME<-mapIds(eval(parse(text = ann.dbi),env=.GlobalEnv),keys = rownames(output_table_1),keytype = keytype, column = "GENENAME", multiVals = "first")
}
if ("ENSEMBL" %in% columns(eval(parse(text = ann.dbi),env=.GlobalEnv))){
annot$ENSEMBL<-mapIds(eval(parse(text = ann.dbi),env=.GlobalEnv),keys = rownames(output_table_1),keytype = keytype, column = "ENSEMBL", multiVals = "first")
}
if ("REFSEQ" %in% columns(eval(parse(text = ann.dbi),env=.GlobalEnv))){
annot$REFSEQ<-mapIds(eval(parse(text = ann.dbi),env=.GlobalEnv),keys = rownames(output_table_1),keytype = keytype, column = "REFSEQ", multiVals = "first")
}
if ("ENTREZID" %in% columns(eval(parse(text = ann.dbi),env=.GlobalEnv))){
annot$ENTREZID<-mapIds(eval(parse(text = ann.dbi),env=.GlobalEnv),keys = rownames(output_table_1),keytype = keytype, column = "ENTREZID", multiVals = "first")
}
string_db <- STRINGdb$new( version="10", species=organism_table$taxon[organism_table$name == organism],score_threshold=0)
string_map <- string_db$map(annot,"SYMBOL",removeUnmappedRows = FALSE, takeFirst = TRUE)[,c(1,6)]
string_map <- string_map[!duplicated(string_map$SYMBOL),]
annot <- dplyr::left_join(annot,string_map, by = "SYMBOL")
pthOrganisms(PANTHER.db) <- organism
panther <- mapIds(PANTHER.db,keys = annot$ENTREZID,keytype = "ENTREZ",column = "GOSLIM_ID", multiVals = "list")
panther <- na.omit(panther)
annot$GOSLIM_IDS <- panther
rm(string_db,string_map,panther,keytype)
# add all sample mean column
output_table_1$All.mean <- rowMeans(normCounts, na.rm = TRUE, dims = 1)
reduced_output_table_1$All.mean <- rowMeans(normCounts, na.rm = TRUE, dims = 1)
# add all sample stdev column
output_table_1$stdev <- rowSds(as.matrix(normCounts), na.rm = TRUE, dims = 1)
reduced_output_table_1$All.stdev <- rowSds(as.matrix(normCounts), na.rm = TRUE, dims = 1)
# add F statistic p-value (similar to ANOVA p-value) column
output_table_1$LRT.p.value <- res_1_lrt@listData$padj
reduced_output_table_1$LRT.p.value <- res_1_lrt@listData$padj
# add group mean and stdev columns
tcounts <- as.data.frame(t(normCounts))
tcounts$group <- group
group_means <- as.data.frame(t(aggregate(. ~ group,data = tcounts,mean)))
group_means <- group_means[-c(1),]
colnames(group_means) <- paste0("Group.Mean_",levels(factor(names(group))))
group_stdev <- as.data.frame(t(aggregate(. ~ group,data = tcounts,sd)))
group_stdev <- group_stdev[-c(1),]
colnames(group_stdev) <- paste0("Group.Stdev_",levels(factor(names(group))))
output_table_1 <- cbind(output_table_1,group_means)
reduced_output_table_1 <- cbind(reduced_output_table_1,group_means)
output_table_1 <- cbind(output_table_1,group_stdev)
reduced_output_table_1 <- cbind(reduced_output_table_1,group_stdev)
rm(group_stdev,group_means,tcounts)
# add updown columns (sign of logfc columns)
updown_table <- sign(output_table_1[,grep("Log2fc_",colnames(output_table_1))])
colnames(updown_table) <- gsub("Log2fc","Updown",grep("Log2fc_",colnames(output_table_1),value = TRUE))
output_table_1 <- cbind(output_table_1,updown_table)
rm(updown_table)
# add contrast significance columns
sig.1_table <- output_table_1[,grep("P.value_",colnames(output_table_1))]<=.1
colnames(sig.1_table) <- gsub("P.value","Sig.1",grep("P.value_",colnames(output_table_1),value = TRUE))
output_table_1 <- cbind(output_table_1,sig.1_table)
rm(sig.1_table)
sig.05_table <- output_table_1[,grep("P.value_",colnames(output_table_1))]<=.05
colnames(sig.05_table) <- gsub("P.value","Sig.05",grep("P.value_",colnames(output_table_1),value = TRUE))
output_table_1 <- cbind(output_table_1,sig.05_table)
rm(sig.05_table)
# add volcano plot columns
log_pval_table <- log2(output_table_1[,grep("P.value_",colnames(output_table_1))])
colnames(log_pval_table) <- paste0("Log2_",colnames(log_pval_table))
output_table_1 <- cbind(output_table_1,log_pval_table)
rm(log_pval_table)
log_adj_pval_table <- log2(output_table_1[,grep("Adj.p.value_",colnames(output_table_1))])
colnames(log_adj_pval_table) <- paste0("Log2_",colnames(log_adj_pval_table))
output_table_1 <- cbind(output_table_1,log_adj_pval_table)
rm(log_adj_pval_table)
# add annotations to table
output_table_1 <- cbind(annot,output_table_1)
reduced_output_table_1 <- cbind(annot,reduced_output_table_1)
rownames(output_table_1) <- NULL
rownames(reduced_output_table_1) <- NULL
output_table_1$GOSLIM_IDS <- vapply(output_table_1$GOSLIM_IDS, paste, collapse = ", ", character(1L))
reduced_output_table_1$GOSLIM_IDS <- vapply(reduced_output_table_1$GOSLIM_IDS, paste, collapse = ", ", character(1L))
write.csv(output_table_1,file.path(DGE_output, "visualization_output_table.csv"), row.names = FALSE)
write.csv(contrasts,file.path(DGE_output, "contrasts.csv"))
write.csv(reduced_output_table_1,file.path(DGE_output, "differential_expression.csv"), row.names = FALSE)
##### Generate PCA
exp_raw <- log2(normCounts)
PCA_raw <- prcomp(t(exp_raw), scale = FALSE)
write.csv(PCA_raw$x,file.path(DGE_output, "visualization_PCA_table.csv"), row.names = TRUE)
rm(exp_raw,PCA_raw)
| /RNAseq/GLDS_Processing_Scripts/GLDS-37/04-05-DESeq2_NormCounts_DGE/deseq2_normcounts_noERCC_DGE_vis.R | no_license | AstrobioMike/GeneLab_Data_Processing | R | false | false | 8,995 | r | library(tximport)
library(DESeq2)
library(tidyverse)
organism <- "ARABIDOPSIS"
work_dir="/path/to/GLDS-37/processing_scripts/04-05-DESeq2_NormCounts_DGE"
counts_dir="/path/to/GLDS-37/03-RSEM_Counts"
norm_output="/path/to/GLDS-37/04-DESeq2_NormCounts"
DGE_output="/path/to/GLDS-37/05-DESeq2_DGE"
setwd(file.path(work_dir))
study <- read.csv(Sys.glob(file.path(work_dir,"*metadata.csv")), header = TRUE, row.names = 1, stringsAsFactors = TRUE)
##### Group Formatting
if (dim(study) >= 2){
group<-apply(study,1,paste,collapse = " & ") # concatenate multiple factors into one condition per sample
} else{
group<-study[,1]
}
group_names <- paste0("(",group,")",sep = "") # human readable group names
group <- make.names(group) # group naming compatible with R models
names(group) <- group_names
rm(group_names)
##### Contrast Formatting
contrasts <- combn(levels(factor(group)),2) # generate matrix of pairwise group combinations for comparison
contrast.names <- combn(levels(factor(names(group))),2)
contrast.names <- c(paste(contrast.names[1,],contrast.names[2,],sep = "v"),paste(contrast.names[2,],contrast.names[1,],sep = "v")) # format combinations for output table files names
contrasts <- cbind(contrasts,contrasts[c(2,1),])
colnames(contrasts) <- contrast.names
rm(contrast.names)
##### Import Data
files <- list.files(file.path(counts_dir),pattern = ".genes.results", full.names = TRUE)
names(files) <- rownames(study)
txi.rsem <- tximport(files, type = "rsem", txIn = FALSE, txOut = FALSE)
# add 1 to genes with lengths of zero if necessary
txi.rsem$length[txi.rsem$length == 0] <- 1
# make DESeqDataSet object
sampleTable <- data.frame(condition=factor(group))
rownames(sampleTable) <- colnames(txi.rsem$counts)
dds <- DESeqDataSetFromTximport(txi.rsem, sampleTable, ~condition)
# filter out genes with counts of less than 10 in all conditions
keep <- rowSums(counts(dds)) > 10
dds <- dds[keep,]
summary(dds)
#### Perform DESeq analysis
dds_1 <- DESeq(dds)
# export unnormalized and normalized counts
normCounts = as.data.frame(counts(dds_1, normalized=TRUE))
setwd(file.path(norm_output))
write.csv(txi.rsem$counts,file='Unnormalized_Counts.csv')
write.csv(normCounts,file='Normalized_Counts.csv')
write.csv(sampleTable,file='SampleTable.csv')
setwd(file.path(work_dir))
normCounts <- normCounts +1
dds_1_lrt <- DESeq(dds_1, test = "LRT", reduced = ~ 1)
res_1_lrt <- results(dds_1_lrt)
organism_table <- read.csv(file.path(work_dir,"organisms.csv"))
##### Generate annotated DGE tables
library(STRINGdb) # for String database annotations
library(PANTHER.db) # for GOSLIM annotations
ann.dbi <- organism_table$annotations[organism_table$name == organism] # Organism specific gene annotation database
ann.dbi=as.character(ann.dbi)
if(!require(ann.dbi, character.only=TRUE)) {
BiocManager::install(ann.dbi, ask = FALSE)
library(ann.dbi, character.only=TRUE)
}
## for normalized counts
# start output table with normalized sample expression values
output_table_1 <- normCounts
reduced_output_table_1 <- normCounts
##### Iterate through Wald Tests
for (i in 1:dim(contrasts)[2]){
res_1 <- results(dds_1, contrast=c("condition",contrasts[1,i],contrasts[2,i]))
res_1 <- as.data.frame(res_1@listData)[,c(2,5,6)]
colnames(res_1)<-c(paste0("Log2fc_",colnames(contrasts)[i]),paste0("P.value_",colnames(contrasts)[i]),paste0("Adj.p.value_",colnames(contrasts)[i]))
output_table_1<-cbind(output_table_1,res_1)
reduced_output_table_1 <- cbind(reduced_output_table_1,res_1)
rm(res_1)
}
# Gene Annotation columns
keytype = "TAIR"
annot <- data.frame(rownames(output_table_1), stringsAsFactors = FALSE)
colnames(annot)[1]<-keytype
if ("SYMBOL" %in% columns(eval(parse(text = ann.dbi),env=.GlobalEnv))){
annot$SYMBOL<-mapIds(eval(parse(text = ann.dbi),env=.GlobalEnv),keys = rownames(output_table_1),keytype = keytype, column = "SYMBOL", multiVals = "first")
}
if ("GENENAME" %in% columns(eval(parse(text = ann.dbi),env=.GlobalEnv))){
annot$GENENAME<-mapIds(eval(parse(text = ann.dbi),env=.GlobalEnv),keys = rownames(output_table_1),keytype = keytype, column = "GENENAME", multiVals = "first")
}
if ("ENSEMBL" %in% columns(eval(parse(text = ann.dbi),env=.GlobalEnv))){
annot$ENSEMBL<-mapIds(eval(parse(text = ann.dbi),env=.GlobalEnv),keys = rownames(output_table_1),keytype = keytype, column = "ENSEMBL", multiVals = "first")
}
if ("REFSEQ" %in% columns(eval(parse(text = ann.dbi),env=.GlobalEnv))){
annot$REFSEQ<-mapIds(eval(parse(text = ann.dbi),env=.GlobalEnv),keys = rownames(output_table_1),keytype = keytype, column = "REFSEQ", multiVals = "first")
}
if ("ENTREZID" %in% columns(eval(parse(text = ann.dbi),env=.GlobalEnv))){
annot$ENTREZID<-mapIds(eval(parse(text = ann.dbi),env=.GlobalEnv),keys = rownames(output_table_1),keytype = keytype, column = "ENTREZID", multiVals = "first")
}
string_db <- STRINGdb$new( version="10", species=organism_table$taxon[organism_table$name == organism],score_threshold=0)
string_map <- string_db$map(annot,"SYMBOL",removeUnmappedRows = FALSE, takeFirst = TRUE)[,c(1,6)]
string_map <- string_map[!duplicated(string_map$SYMBOL),]
annot <- dplyr::left_join(annot,string_map, by = "SYMBOL")
pthOrganisms(PANTHER.db) <- organism
panther <- mapIds(PANTHER.db,keys = annot$ENTREZID,keytype = "ENTREZ",column = "GOSLIM_ID", multiVals = "list")
panther <- na.omit(panther)
annot$GOSLIM_IDS <- panther
rm(string_db,string_map,panther,keytype)
# add all sample mean column
output_table_1$All.mean <- rowMeans(normCounts, na.rm = TRUE, dims = 1)
reduced_output_table_1$All.mean <- rowMeans(normCounts, na.rm = TRUE, dims = 1)
# add all sample stdev column
output_table_1$stdev <- rowSds(as.matrix(normCounts), na.rm = TRUE, dims = 1)
reduced_output_table_1$All.stdev <- rowSds(as.matrix(normCounts), na.rm = TRUE, dims = 1)
# add F statistic p-value (similar to ANOVA p-value) column
output_table_1$LRT.p.value <- res_1_lrt@listData$padj
reduced_output_table_1$LRT.p.value <- res_1_lrt@listData$padj
# add group mean and stdev columns
tcounts <- as.data.frame(t(normCounts))
tcounts$group <- group
group_means <- as.data.frame(t(aggregate(. ~ group,data = tcounts,mean)))
group_means <- group_means[-c(1),]
colnames(group_means) <- paste0("Group.Mean_",levels(factor(names(group))))
group_stdev <- as.data.frame(t(aggregate(. ~ group,data = tcounts,sd)))
group_stdev <- group_stdev[-c(1),]
colnames(group_stdev) <- paste0("Group.Stdev_",levels(factor(names(group))))
output_table_1 <- cbind(output_table_1,group_means)
reduced_output_table_1 <- cbind(reduced_output_table_1,group_means)
output_table_1 <- cbind(output_table_1,group_stdev)
reduced_output_table_1 <- cbind(reduced_output_table_1,group_stdev)
rm(group_stdev,group_means,tcounts)
# add updown columns (sign of logfc columns)
updown_table <- sign(output_table_1[,grep("Log2fc_",colnames(output_table_1))])
colnames(updown_table) <- gsub("Log2fc","Updown",grep("Log2fc_",colnames(output_table_1),value = TRUE))
output_table_1 <- cbind(output_table_1,updown_table)
rm(updown_table)
# add contrast significance columns
sig.1_table <- output_table_1[,grep("P.value_",colnames(output_table_1))]<=.1
colnames(sig.1_table) <- gsub("P.value","Sig.1",grep("P.value_",colnames(output_table_1),value = TRUE))
output_table_1 <- cbind(output_table_1,sig.1_table)
rm(sig.1_table)
sig.05_table <- output_table_1[,grep("P.value_",colnames(output_table_1))]<=.05
colnames(sig.05_table) <- gsub("P.value","Sig.05",grep("P.value_",colnames(output_table_1),value = TRUE))
output_table_1 <- cbind(output_table_1,sig.05_table)
rm(sig.05_table)
# add volcano plot columns
log_pval_table <- log2(output_table_1[,grep("P.value_",colnames(output_table_1))])
colnames(log_pval_table) <- paste0("Log2_",colnames(log_pval_table))
output_table_1 <- cbind(output_table_1,log_pval_table)
rm(log_pval_table)
log_adj_pval_table <- log2(output_table_1[,grep("Adj.p.value_",colnames(output_table_1))])
colnames(log_adj_pval_table) <- paste0("Log2_",colnames(log_adj_pval_table))
output_table_1 <- cbind(output_table_1,log_adj_pval_table)
rm(log_adj_pval_table)
# add annotations to table
output_table_1 <- cbind(annot,output_table_1)
reduced_output_table_1 <- cbind(annot,reduced_output_table_1)
rownames(output_table_1) <- NULL
rownames(reduced_output_table_1) <- NULL
output_table_1$GOSLIM_IDS <- vapply(output_table_1$GOSLIM_IDS, paste, collapse = ", ", character(1L))
reduced_output_table_1$GOSLIM_IDS <- vapply(reduced_output_table_1$GOSLIM_IDS, paste, collapse = ", ", character(1L))
write.csv(output_table_1,file.path(DGE_output, "visualization_output_table.csv"), row.names = FALSE)
write.csv(contrasts,file.path(DGE_output, "contrasts.csv"))
write.csv(reduced_output_table_1,file.path(DGE_output, "differential_expression.csv"), row.names = FALSE)
##### Generate PCA
exp_raw <- log2(normCounts)
PCA_raw <- prcomp(t(exp_raw), scale = FALSE)
write.csv(PCA_raw$x,file.path(DGE_output, "visualization_PCA_table.csv"), row.names = TRUE)
rm(exp_raw,PCA_raw)
|
#My first program in R Programming
myString <- "Hello World"
print(myString)
mean_values <- mean(1:50)
mean_values
| /HelloWorld.R | no_license | Ram-chundru/RStudioHelloWorld | R | false | false | 116 | r | #My first program in R Programming
myString <- "Hello World"
print(myString)
mean_values <- mean(1:50)
mean_values
|
library(shiny)
library(leaflet)
# Define UI for application that draws a histogram
shinyUI(pageWithSidebar(
headerPanel("City Analytics"),
sidebarPanel(
# First Sidebar
conditionalPanel(condition="input.tabselected==1",
h4("City"),
selectInput("MultiCitySelector",label = NULL, multiple=TRUE, choices=c("None"), selected=NULL),
h4("Feature"),
selectInput("FeatureSelector",label = NULL, choices=c("Price", "Availability/Month", "Revenue/Month"), selected=NULL),
h4("Date Range"),
dateRangeInput('DateRangeComparator', label = NULL, start = Sys.Date() - 3, end = Sys.Date() + 3, format = "dd/mm/yyyy"),
h4("Comparator"),
selectInput("Comparator",label = NULL, choices=c("None","Num/Bedrooms", "Room Type"), selected=NULL),
h4("Plot Type"),
selectInput("PlotSelector",label = NULL, choices=c("Box Distibution", "Bar Mean", "Bar Median"), selected=NULL)),
# Second Sidebar
conditionalPanel(condition="input.tabselected==2",
h4("City"),
selectInput("CitySelector",label = NULL, choices=c("None"), selected=NULL),
h4("Plot Type"),
selectInput("PlotCondition",label = NULL, choices=c("Proportion", "Distribution", "Average"), selected=NULL),
h4("Date Range"),
dateRangeInput("DateRangeSelector", label = NULL, start = Sys.Date() - 3, end = Sys.Date() + 3, format = "dd/mm/yyyy"),
conditionalPanel(condition="input.PlotCondition!='Proportion'", h4("Feature"),
selectInput("FeatureCondition",label = NULL, choices=c("Price", "Availability/Month", "Revenue/Month"), selected=NULL)),
h4("Comparator"),
selectInput("ComparatorCondition",label = NULL, choices=c("None","Num/Bedrooms", "Room Type", "Neighbourhood"), selected=NULL),
)),
mainPanel(
tabsetPanel(
#First Tab
tabPanel("City Comparator", value=1, plotOutput("comparator")),
# Second Tab
tabPanel("City Insights", value=2, plotOutput("insight"), leafletOutput("map")),
# Id of the Tabset Panel
id = "tabselected"
)
)
)) | /Application/ui.R | no_license | LouisDeveze/R-Project | R | false | false | 2,576 | r | library(shiny)
library(leaflet)
# Define UI for application that draws a histogram
shinyUI(pageWithSidebar(
headerPanel("City Analytics"),
sidebarPanel(
# First Sidebar
conditionalPanel(condition="input.tabselected==1",
h4("City"),
selectInput("MultiCitySelector",label = NULL, multiple=TRUE, choices=c("None"), selected=NULL),
h4("Feature"),
selectInput("FeatureSelector",label = NULL, choices=c("Price", "Availability/Month", "Revenue/Month"), selected=NULL),
h4("Date Range"),
dateRangeInput('DateRangeComparator', label = NULL, start = Sys.Date() - 3, end = Sys.Date() + 3, format = "dd/mm/yyyy"),
h4("Comparator"),
selectInput("Comparator",label = NULL, choices=c("None","Num/Bedrooms", "Room Type"), selected=NULL),
h4("Plot Type"),
selectInput("PlotSelector",label = NULL, choices=c("Box Distibution", "Bar Mean", "Bar Median"), selected=NULL)),
# Second Sidebar
conditionalPanel(condition="input.tabselected==2",
h4("City"),
selectInput("CitySelector",label = NULL, choices=c("None"), selected=NULL),
h4("Plot Type"),
selectInput("PlotCondition",label = NULL, choices=c("Proportion", "Distribution", "Average"), selected=NULL),
h4("Date Range"),
dateRangeInput("DateRangeSelector", label = NULL, start = Sys.Date() - 3, end = Sys.Date() + 3, format = "dd/mm/yyyy"),
conditionalPanel(condition="input.PlotCondition!='Proportion'", h4("Feature"),
selectInput("FeatureCondition",label = NULL, choices=c("Price", "Availability/Month", "Revenue/Month"), selected=NULL)),
h4("Comparator"),
selectInput("ComparatorCondition",label = NULL, choices=c("None","Num/Bedrooms", "Room Type", "Neighbourhood"), selected=NULL),
)),
mainPanel(
tabsetPanel(
#First Tab
tabPanel("City Comparator", value=1, plotOutput("comparator")),
# Second Tab
tabPanel("City Insights", value=2, plotOutput("insight"), leafletOutput("map")),
# Id of the Tabset Panel
id = "tabselected"
)
)
)) |
meta <- function(y, v, x, data, intercept.constraints=NULL, coef.constraints=NULL,
RE.constraints=NULL, RE.startvalues=0.1, RE.lbound=1e-10,
intervals.type=c("z", "LB"), I2="I2q", R2=TRUE,
model.name="Meta analysis with ML",
suppressWarnings=TRUE, silent=TRUE, run=TRUE, ...) {
mf <- match.call()
if (missing(data)) {
data <- sys.frame(sys.parent())
} else {
if (!is.data.frame(data)) data <- data.frame(data)
}
my.y <- mf[[match("y", names(mf))]]
my.v <- mf[[match("v", names(mf))]]
y <- eval(my.y, data, enclos = sys.frame(sys.parent()))
v <- eval(my.v, data, enclos = sys.frame(sys.parent()))
if (is.vector(y)) no.y <- 1 else no.y <- ncol(y)
if (is.vector(v)) no.v <- 1 else no.v <- ncol(v)
if (missing(x)) no.x <- 0 else {
my.x <- mf[[match("x", names(mf))]]
x <- eval(my.x, data, enclos = sys.frame(sys.parent()))
if (is.vector(x)) no.x <- 1 else no.x <- ncol(x)
}
if ( no.v != no.y*(no.y+1)/2 )
stop(paste("The expected no. of columns in v is ", no.y*(no.y+1)/2,
" while the observed no. of columns in v is ", no.v, ".", sep=""))
v.labels <- vech(outer(1:no.y, 1:no.y, function(x, y) paste("v", x,"_", y, sep = "")))
y.labels <- paste("y", 1:no.y, sep="")
x.labels <- paste("x", 1:no.x, sep="")
## If is.na(v), convert y into NA. NA in y will be handled automatically.
## Since NA in v (definition variable) is not allowed. Convert v into 1e10.
## Select variances only
## FIXME: how about NA in covariances?
if (no.y==1) {
y[is.na(v)] <- NA
} else {
index <- matrix(0, nrow=no.y, ncol=no.y)
index[lower.tri(index, diag=TRUE)] <- seq(1, no.y*(no.y+1)/2)
index <- Diag(index)
y[is.na(v[, index])] <- NA
}
v[is.na(v)] <- 1e10
## FIXME: It is better to modify miss.x that includes regression coefficients
if (no.x==0) {
## x <- NULL
input.df <- as.matrix(cbind(y, v))
dimnames(input.df) <- list(NULL, c(y.labels, v.labels))
# No missing value in x
miss.x <- rep(FALSE, nrow(input.df))
} else {
input.df <- as.matrix(cbind(y, v, x))
dimnames(input.df) <- list(NULL, c(y.labels, v.labels, x.labels))
if (no.x==1) miss.x <- is.na(x) else miss.x <- apply(is.na(x), 1, any)
}
## Remove missing data; my.df is used in the actual data analysis
## Missing y is automatically handled by OpenMx
my.df <- input.df[!miss.x, ]
## Fix a bug reported by Noel Card that the number of statistics are incorrect and with negative dfs.
## It is due to my.df is a matrix, whereas OpenMx expects a data frame. ???
my.df <- as.data.frame(my.df)
## Preparing the Beta1 matrix for the intercept vector
## Inter is a 1 by no.y row vector
if (is.null(intercept.constraints)) {
Inter <- matrix( paste("0*Intercept", 1:no.y, sep=""), nrow=1, ncol=no.y )
} else {
## Convert intercept.constraints into a row matrix if it is not a matrix
if (!is.matrix(intercept.constraints))
intercept.constraints <- t(as.matrix(intercept.constraints))
if (!all(dim(intercept.constraints)==c(1, no.y)))
stop("Dimensions of \"intercept.constraints\" are incorrect.")
Inter <- intercept.constraints
}
Inter <- as.mxMatrix(t(Inter), name="Inter")
## Without predictors
## X: a 1 by (1+no.x) row vector
if (no.x==0) {
X <- mxMatrix("Unit", nrow=1, ncol=1, name="X")
## No predictor
Beta1 <- mxAlgebra(Inter, name="Beta1")
## Not used; just make sure Beta is present in mxModel()
Beta <- mxMatrix("Zero", nrow=1, ncol=1, name="Beta")
} else {
if (is.null(coef.constraints)) {
yVar <- paste("y", seq(1,no.y), sep="", collapse="+")
xVar <- paste("x", seq(1,no.x), sep="", collapse="+")
# Use lm() coefficients as starting values
startValues <- tryCatch( eval(parse(text=paste("t(coefficients(lm(cbind(",
yVar, ")~", xVar,", data=my.df)))", sep=""))),
error = function(e) e )
# If error, replace it with 0. Added a column of intercepts
# Fixed a minor bug that no starting value on the last predictor
# when intercept.constraints=0
if ( inherits(startValues, "error") | !is.null(intercept.constraints) )
startValues <- matrix(0, nrow=no.y, ncol=(no.x+1))
A.labels <- outer(1:no.y, 1:no.x, function(y, x) paste("*Slope", y,"_", x, sep = ""))
Beta <- matrix( paste(startValues[,-1], A.labels, sep=""), nrow=no.y, ncol=no.x )
} else {
## Convert coef.constraints into a column matrix if it is not a matrix
if (!is.matrix(coef.constraints))
coef.constraints <- as.matrix(coef.constraints)
coef.dim <- dim(coef.constraints)
if (!coef.dim[1]==no.y | !(coef.dim[2] %in% c(no.x, no.x+no.y)))
stop("Dimensions of \"coef.constraints\" are incorrect.")
Beta <- coef.constraints
}
Beta <- as.mxMatrix(Beta)
Beta1 <- mxAlgebra( cbind(Inter, Beta), name="Beta1")
## X.matrix <- paste("mxMatrix(\"Full\", nrow=1, ncol=(1+no.x), free=FALSE, values=c(1,",
## paste("data.x",1:no.x,sep="", collapse=","), "), name=\"X\")", sep="")
## eval(parse(text = X.matrix))
X <- mxMatrix("Full", nrow=1, ncol=(1+no.x), free=FALSE, values=c(1, rep(NA, no.x)),
labels=c(NA, paste("data.x",1:no.x,sep="")), name="X")
}
expMean <- mxAlgebra( X %*% t(Beta1), name="expMean")
## Fixed a bug in 0.5-0 that lbound is not added into Tau
## when RE.constraints is used.
## lbound in variance component of the random effects
if (is.matrix(RE.lbound)) {
if (!all(dim(RE.lbound)==c(no.y, no.y)))
warning("Dimensions of \"RE.lbound\" are incorrect.")
# FIXME: need to handle unequal dimensions better
lbound <- RE.lbound
## lbound is a matrix
} else {
lbound <- matrix(NA, nrow=no.y, ncol=no.y)
Diag(lbound) <- RE.lbound
## lbound is a matrix
}
## Preparing the S matrix for covariance elements
# No predictor
if (is.null(RE.constraints)) {
# Better to use starting values based on diagonal matrix rather than the UMM
if (is.matrix(RE.startvalues)) {
if (!all(dim(RE.startvalues)==c(no.y, no.y)))
warning("Dimensions of \"RE.startvalues\" are incorrect.")
values <- vech(RE.startvalues)
} else {
values <- vech(Diag(x=RE.startvalues, nrow=no.y, ncol=no.y))
}
Tau.labels <- vech(outer(1:no.y, 1:no.y, function(x,y) { paste("Tau2_",x,"_",y,sep="")}))
Tau <- mxMatrix("Symm", ncol=no.y, nrow=no.y, free=TRUE, labels=Tau.labels,
lbound=vech(lbound), values=values, name="Tau")
} else {
## Convert RE.constraints into a column matrix if it is not a matrix
if (!is.matrix(RE.constraints))
RE.constraints <- as.matrix(RE.constraints)
if (!all(dim(RE.constraints)==c(no.y, no.y)))
stop("Dimensions of \"RE.constraints\" are incorrect.")
## Fixed a bug that reads lbound improperly
## Since as.mxMatrix expects a full matrix, lbound=vech(lbound) is incorrect
Tau <- as.mxMatrix(RE.constraints, lbound=c(lbound), name="Tau")
}
V <- mxMatrix("Symm", ncol=no.y, nrow=no.y, free=FALSE,
labels=paste("data.", v.labels, sep=""), name="V")
expCov <- mxAlgebra(V+Tau, name="expCov")
## Assuming NA first
mx0.fit <- NA
if (no.x==0) {
I2 <- match.arg(I2, c("I2q", "I2hm", "I2am"), several.ok=TRUE)
## Select variances and exclude covariances
v_het <- input.df[, paste("v", 1:no.y, "_", 1:no.y, sep=""), drop=FALSE]
## Calculate I2
## Based on Higgins and Thompson (2002), Eq. 9
sum.w <- apply(v_het, 2, function(x) sum(1/x))
sum.w2 <- apply(v_het, 2, function(x) sum(1/x^2))
## NA in v has been replaced by 1e10
no.studies <- apply(v_het, 2, function(x) sum(x<1e9))
## Typical V based on Q statistic
qV <- matrix((no.studies-1)*sum.w/(sum.w^2-sum.w2), nrow=1)
## Typical V based on harmonic mean
hmV <- matrix(no.studies/sum.w, nrow=1)
## Typical V based on arithmatic mean
amV <- apply(v_het, 2, function(x) mean(x[x<1e9]))
amV <- matrix(amV, nrow=1)
V_het <- rbind(qV, hmV, amV)
## Select the heter.indices
## Before selection: V_het is a c("I2q","I2hm","I2am") by c(y1, y2, y3) matrix
## After selecting: A column vector of I2q(y1, y2, y3), I2hm(y1, y2, y3), I2am(y1, y2, y3)
V_het <- matrix( t( V_het[c("I2q","I2hm","I2am")%in%I2, ] ), ncol=1 )
V_het <- as.mxMatrix(V_het)
One <- mxMatrix("Unit", nrow=length(I2), ncol=1, name="One")
Tau_het <- mxAlgebra( One %x% diag2vec(Tau), name="Tau_het")
I2_values <- mxAlgebra( Tau_het/(Tau_het+V_het), name="I2_values")
## Modified for OpenMx 2.0
mx.model <- mxModel(model=model.name, mxData(observed=my.df, type="raw"),
mxExpectationNormal(covariance="expCov", means="expMean", dimnames=y.labels),
mxFitFunctionML(),
Inter, Beta, Beta1, expMean, X, expCov, Tau, V, One, V_het, Tau_het, I2_values,
mxCI(c("Tau","Inter","I2_values")))
} else {
## no.x > 0
## Modified for OpenMx 2.0
mx.model <- mxModel(model=model.name, mxData(observed=my.df, type="raw"),
mxExpectationNormal(covariance="expCov", means="expMean", dimnames=y.labels),
mxFitFunctionML(),
Inter, Beta, Beta1, expMean, X, expCov, Tau, V, mxCI(c("Tau","Inter","Beta")))
## Calculate R2
if (R2) mx0.fit <- tryCatch( meta(y=y, v=v, data=my.df, model.name="No predictor",
suppressWarnings=TRUE, silent=TRUE), error = function(e) e )
}
## meta <- mxModel(model=model.name, mxData(observed=my.df, type="raw"),
## mxFIMLObjective( covariance="S", means="M", dimnames=y.labels),
## Beta1, M, X, S, Tau, V, mxCI(c("Tau","Beta1")))
## Return mx model without running the analysis
if (run==FALSE) return(mx.model)
intervals.type <- match.arg(intervals.type)
# Default is z
switch(intervals.type,
z = mx.fit <- tryCatch( mxRun(mx.model, intervals=FALSE, suppressWarnings=suppressWarnings,
silent=silent, ...), error = function(e) e ),
LB = mx.fit <- tryCatch( mxRun(mx.model, intervals=TRUE, suppressWarnings=suppressWarnings,
silent=silent, ...), error = function(e) e ) )
if (inherits(mx.fit, "error")) {
cat("Error in running mxModel:\n")
warning(print(mx.fit))
return(mx.fit)
} else {
out <- list(call=mf, data=input.df, no.y=no.y, no.x=no.x, miss.x=miss.x, mx.model=mx.model,
I2=I2, R2=R2, mx.fit=mx.fit, mx0.fit=mx0.fit, intervals.type=intervals.type)
class(out) <- "meta"
}
return(out)
}
| /R/meta.R | no_license | mikewlcheung/metasem | R | false | false | 10,992 | r | meta <- function(y, v, x, data, intercept.constraints=NULL, coef.constraints=NULL,
RE.constraints=NULL, RE.startvalues=0.1, RE.lbound=1e-10,
intervals.type=c("z", "LB"), I2="I2q", R2=TRUE,
model.name="Meta analysis with ML",
suppressWarnings=TRUE, silent=TRUE, run=TRUE, ...) {
mf <- match.call()
if (missing(data)) {
data <- sys.frame(sys.parent())
} else {
if (!is.data.frame(data)) data <- data.frame(data)
}
my.y <- mf[[match("y", names(mf))]]
my.v <- mf[[match("v", names(mf))]]
y <- eval(my.y, data, enclos = sys.frame(sys.parent()))
v <- eval(my.v, data, enclos = sys.frame(sys.parent()))
if (is.vector(y)) no.y <- 1 else no.y <- ncol(y)
if (is.vector(v)) no.v <- 1 else no.v <- ncol(v)
if (missing(x)) no.x <- 0 else {
my.x <- mf[[match("x", names(mf))]]
x <- eval(my.x, data, enclos = sys.frame(sys.parent()))
if (is.vector(x)) no.x <- 1 else no.x <- ncol(x)
}
if ( no.v != no.y*(no.y+1)/2 )
stop(paste("The expected no. of columns in v is ", no.y*(no.y+1)/2,
" while the observed no. of columns in v is ", no.v, ".", sep=""))
v.labels <- vech(outer(1:no.y, 1:no.y, function(x, y) paste("v", x,"_", y, sep = "")))
y.labels <- paste("y", 1:no.y, sep="")
x.labels <- paste("x", 1:no.x, sep="")
## If is.na(v), convert y into NA. NA in y will be handled automatically.
## Since NA in v (definition variable) is not allowed. Convert v into 1e10.
## Select variances only
## FIXME: how about NA in covariances?
if (no.y==1) {
y[is.na(v)] <- NA
} else {
index <- matrix(0, nrow=no.y, ncol=no.y)
index[lower.tri(index, diag=TRUE)] <- seq(1, no.y*(no.y+1)/2)
index <- Diag(index)
y[is.na(v[, index])] <- NA
}
v[is.na(v)] <- 1e10
## FIXME: It is better to modify miss.x that includes regression coefficients
if (no.x==0) {
## x <- NULL
input.df <- as.matrix(cbind(y, v))
dimnames(input.df) <- list(NULL, c(y.labels, v.labels))
# No missing value in x
miss.x <- rep(FALSE, nrow(input.df))
} else {
input.df <- as.matrix(cbind(y, v, x))
dimnames(input.df) <- list(NULL, c(y.labels, v.labels, x.labels))
if (no.x==1) miss.x <- is.na(x) else miss.x <- apply(is.na(x), 1, any)
}
## Remove missing data; my.df is used in the actual data analysis
## Missing y is automatically handled by OpenMx
my.df <- input.df[!miss.x, ]
## Fix a bug reported by Noel Card that the number of statistics are incorrect and with negative dfs.
## It is due to my.df is a matrix, whereas OpenMx expects a data frame. ???
my.df <- as.data.frame(my.df)
## Preparing the Beta1 matrix for the intercept vector
## Inter is a 1 by no.y row vector
if (is.null(intercept.constraints)) {
Inter <- matrix( paste("0*Intercept", 1:no.y, sep=""), nrow=1, ncol=no.y )
} else {
## Convert intercept.constraints into a row matrix if it is not a matrix
if (!is.matrix(intercept.constraints))
intercept.constraints <- t(as.matrix(intercept.constraints))
if (!all(dim(intercept.constraints)==c(1, no.y)))
stop("Dimensions of \"intercept.constraints\" are incorrect.")
Inter <- intercept.constraints
}
Inter <- as.mxMatrix(t(Inter), name="Inter")
## Without predictors
## X: a 1 by (1+no.x) row vector
if (no.x==0) {
X <- mxMatrix("Unit", nrow=1, ncol=1, name="X")
## No predictor
Beta1 <- mxAlgebra(Inter, name="Beta1")
## Not used; just make sure Beta is present in mxModel()
Beta <- mxMatrix("Zero", nrow=1, ncol=1, name="Beta")
} else {
if (is.null(coef.constraints)) {
yVar <- paste("y", seq(1,no.y), sep="", collapse="+")
xVar <- paste("x", seq(1,no.x), sep="", collapse="+")
# Use lm() coefficients as starting values
startValues <- tryCatch( eval(parse(text=paste("t(coefficients(lm(cbind(",
yVar, ")~", xVar,", data=my.df)))", sep=""))),
error = function(e) e )
# If error, replace it with 0. Added a column of intercepts
# Fixed a minor bug that no starting value on the last predictor
# when intercept.constraints=0
if ( inherits(startValues, "error") | !is.null(intercept.constraints) )
startValues <- matrix(0, nrow=no.y, ncol=(no.x+1))
A.labels <- outer(1:no.y, 1:no.x, function(y, x) paste("*Slope", y,"_", x, sep = ""))
Beta <- matrix( paste(startValues[,-1], A.labels, sep=""), nrow=no.y, ncol=no.x )
} else {
## Convert coef.constraints into a column matrix if it is not a matrix
if (!is.matrix(coef.constraints))
coef.constraints <- as.matrix(coef.constraints)
coef.dim <- dim(coef.constraints)
if (!coef.dim[1]==no.y | !(coef.dim[2] %in% c(no.x, no.x+no.y)))
stop("Dimensions of \"coef.constraints\" are incorrect.")
Beta <- coef.constraints
}
Beta <- as.mxMatrix(Beta)
Beta1 <- mxAlgebra( cbind(Inter, Beta), name="Beta1")
## X.matrix <- paste("mxMatrix(\"Full\", nrow=1, ncol=(1+no.x), free=FALSE, values=c(1,",
## paste("data.x",1:no.x,sep="", collapse=","), "), name=\"X\")", sep="")
## eval(parse(text = X.matrix))
X <- mxMatrix("Full", nrow=1, ncol=(1+no.x), free=FALSE, values=c(1, rep(NA, no.x)),
labels=c(NA, paste("data.x",1:no.x,sep="")), name="X")
}
expMean <- mxAlgebra( X %*% t(Beta1), name="expMean")
## Fixed a bug in 0.5-0 that lbound is not added into Tau
## when RE.constraints is used.
## lbound in variance component of the random effects
if (is.matrix(RE.lbound)) {
if (!all(dim(RE.lbound)==c(no.y, no.y)))
warning("Dimensions of \"RE.lbound\" are incorrect.")
# FIXME: need to handle unequal dimensions better
lbound <- RE.lbound
## lbound is a matrix
} else {
lbound <- matrix(NA, nrow=no.y, ncol=no.y)
Diag(lbound) <- RE.lbound
## lbound is a matrix
}
## Preparing the S matrix for covariance elements
# No predictor
if (is.null(RE.constraints)) {
# Better to use starting values based on diagonal matrix rather than the UMM
if (is.matrix(RE.startvalues)) {
if (!all(dim(RE.startvalues)==c(no.y, no.y)))
warning("Dimensions of \"RE.startvalues\" are incorrect.")
values <- vech(RE.startvalues)
} else {
values <- vech(Diag(x=RE.startvalues, nrow=no.y, ncol=no.y))
}
Tau.labels <- vech(outer(1:no.y, 1:no.y, function(x,y) { paste("Tau2_",x,"_",y,sep="")}))
Tau <- mxMatrix("Symm", ncol=no.y, nrow=no.y, free=TRUE, labels=Tau.labels,
lbound=vech(lbound), values=values, name="Tau")
} else {
## Convert RE.constraints into a column matrix if it is not a matrix
if (!is.matrix(RE.constraints))
RE.constraints <- as.matrix(RE.constraints)
if (!all(dim(RE.constraints)==c(no.y, no.y)))
stop("Dimensions of \"RE.constraints\" are incorrect.")
## Fixed a bug that reads lbound improperly
## Since as.mxMatrix expects a full matrix, lbound=vech(lbound) is incorrect
Tau <- as.mxMatrix(RE.constraints, lbound=c(lbound), name="Tau")
}
V <- mxMatrix("Symm", ncol=no.y, nrow=no.y, free=FALSE,
labels=paste("data.", v.labels, sep=""), name="V")
expCov <- mxAlgebra(V+Tau, name="expCov")
## Assuming NA first
mx0.fit <- NA
if (no.x==0) {
I2 <- match.arg(I2, c("I2q", "I2hm", "I2am"), several.ok=TRUE)
## Select variances and exclude covariances
v_het <- input.df[, paste("v", 1:no.y, "_", 1:no.y, sep=""), drop=FALSE]
## Calculate I2
## Based on Higgins and Thompson (2002), Eq. 9
sum.w <- apply(v_het, 2, function(x) sum(1/x))
sum.w2 <- apply(v_het, 2, function(x) sum(1/x^2))
## NA in v has been replaced by 1e10
no.studies <- apply(v_het, 2, function(x) sum(x<1e9))
## Typical V based on Q statistic
qV <- matrix((no.studies-1)*sum.w/(sum.w^2-sum.w2), nrow=1)
## Typical V based on harmonic mean
hmV <- matrix(no.studies/sum.w, nrow=1)
## Typical V based on arithmatic mean
amV <- apply(v_het, 2, function(x) mean(x[x<1e9]))
amV <- matrix(amV, nrow=1)
V_het <- rbind(qV, hmV, amV)
## Select the heter.indices
## Before selection: V_het is a c("I2q","I2hm","I2am") by c(y1, y2, y3) matrix
## After selecting: A column vector of I2q(y1, y2, y3), I2hm(y1, y2, y3), I2am(y1, y2, y3)
V_het <- matrix( t( V_het[c("I2q","I2hm","I2am")%in%I2, ] ), ncol=1 )
V_het <- as.mxMatrix(V_het)
One <- mxMatrix("Unit", nrow=length(I2), ncol=1, name="One")
Tau_het <- mxAlgebra( One %x% diag2vec(Tau), name="Tau_het")
I2_values <- mxAlgebra( Tau_het/(Tau_het+V_het), name="I2_values")
## Modified for OpenMx 2.0
mx.model <- mxModel(model=model.name, mxData(observed=my.df, type="raw"),
mxExpectationNormal(covariance="expCov", means="expMean", dimnames=y.labels),
mxFitFunctionML(),
Inter, Beta, Beta1, expMean, X, expCov, Tau, V, One, V_het, Tau_het, I2_values,
mxCI(c("Tau","Inter","I2_values")))
} else {
## no.x > 0
## Modified for OpenMx 2.0
mx.model <- mxModel(model=model.name, mxData(observed=my.df, type="raw"),
mxExpectationNormal(covariance="expCov", means="expMean", dimnames=y.labels),
mxFitFunctionML(),
Inter, Beta, Beta1, expMean, X, expCov, Tau, V, mxCI(c("Tau","Inter","Beta")))
## Calculate R2
if (R2) mx0.fit <- tryCatch( meta(y=y, v=v, data=my.df, model.name="No predictor",
suppressWarnings=TRUE, silent=TRUE), error = function(e) e )
}
## meta <- mxModel(model=model.name, mxData(observed=my.df, type="raw"),
## mxFIMLObjective( covariance="S", means="M", dimnames=y.labels),
## Beta1, M, X, S, Tau, V, mxCI(c("Tau","Beta1")))
## Return mx model without running the analysis
if (run==FALSE) return(mx.model)
intervals.type <- match.arg(intervals.type)
# Default is z
switch(intervals.type,
z = mx.fit <- tryCatch( mxRun(mx.model, intervals=FALSE, suppressWarnings=suppressWarnings,
silent=silent, ...), error = function(e) e ),
LB = mx.fit <- tryCatch( mxRun(mx.model, intervals=TRUE, suppressWarnings=suppressWarnings,
silent=silent, ...), error = function(e) e ) )
if (inherits(mx.fit, "error")) {
cat("Error in running mxModel:\n")
warning(print(mx.fit))
return(mx.fit)
} else {
out <- list(call=mf, data=input.df, no.y=no.y, no.x=no.x, miss.x=miss.x, mx.model=mx.model,
I2=I2, R2=R2, mx.fit=mx.fit, mx0.fit=mx0.fit, intervals.type=intervals.type)
class(out) <- "meta"
}
return(out)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_functions.R
\name{get_perm_source_types}
\alias{get_perm_source_types}
\title{extract source types}
\usage{
get_perm_source_types(pv)
}
\arguments{
\item{pv}{[\code{character(1)}]\cr
the name ID of the pollyvote object, defaults to 'pollyvote'.}
}
\value{
character vector containing all source types stored in \code{pv}.
}
\description{
This function extract source types from a pollyvote container.
}
\examples{
pv = create_pollyvote(perm_source_types = "poll")
get_perm_source_types(pv)
}
| /man/get_perm_source_types.Rd | no_license | pollyvote/pollyvoter | R | false | true | 575 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_functions.R
\name{get_perm_source_types}
\alias{get_perm_source_types}
\title{extract source types}
\usage{
get_perm_source_types(pv)
}
\arguments{
\item{pv}{[\code{character(1)}]\cr
the name ID of the pollyvote object, defaults to 'pollyvote'.}
}
\value{
character vector containing all source types stored in \code{pv}.
}
\description{
This function extract source types from a pollyvote container.
}
\examples{
pv = create_pollyvote(perm_source_types = "poll")
get_perm_source_types(pv)
}
|
\name{mixed_ks_c_cdf}
\alias{mixed_ks_c_cdf}
\title{
Computes the complementary cumulative distribution function of the two-sided Kolmogorov-Smirnov statistic when the cdf under the null hypothesis is mixed
}
\description{
Computes the complementary cdf, \eqn{P(D_{n} \ge q)} at a fixed \eqn{q}, \eqn{q\in[0, 1]}, of the one-sample two-sided Kolmogorov-Smirnov statistic, when the cdf \eqn{F(x)} under the null hypothesis is mixed, using the Exact-KS-FFT method expressing the p-value as a double-boundary non-crossing probability for a homogeneous Poisson process, which is then efficiently computed using FFT (see Dimitrova, Kaishev, Tan (2020)).
}
\usage{
mixed_ks_c_cdf(q, n, jump_points, Mixed_dist, ..., tol = 1e-10)
}
\arguments{
\item{q}{
numeric value between 0 and 1, at which the complementary cdf \eqn{P(D_{n} \ge q)} is computed
}
\item{n}{
the sample size
}
\item{jump_points}{
a numeric vector containing the points of (jump) discontinuity, i.e. where the underlying cdf \eqn{F(x)} has jump(s)
}
\item{Mixed_dist}{
a pre-specified (user-defined) mixed cdf, \eqn{F(x)}, under the null hypothesis.
}
\item{\dots}{
values of the parameters of the cdf, \eqn{F(x)} specified (as a character string) by \code{Mixed_dist}.
}
\item{tol}{
the value of \eqn{\epsilon} that is used to compute the values of \eqn{A_{i}} and \eqn{B_{i}}, \eqn{i = 1, ..., n}, as detailed in Step 1 of Section 2.1 in Dimitrova, Kaishev and Tan (2020) (see also (ii) in the Procedure Exact-KS-FFT therein). By default, \code{tol = 1e-10}. Note that a value of \code{NA} or \code{0} will lead to an error!
}
}
\details{
Given a random sample \eqn{\{X_{1}, ..., X_{n}\}} of size \code{n} with an empirical cdf \eqn{F_{n}(x)}, the Kolmogorov-Smirnov goodness-of-fit statistic is defined as \eqn{D_{n} = \sup | F_{n}(x) - F(x) | }, where \eqn{F(x)} is the cdf of a prespecified theoretical distribution under the null hypothesis \eqn{H_{0}}, that \eqn{\{X_{1}, ..., X_{n}\}} comes from \eqn{F(x)}.
The function \code{\link{mixed_ks_c_cdf}} implements the Exact-KS-FFT method, proposed by Dimitrova, Kaishev, Tan (2020) to compute the complementary cdf \eqn{P(D_{n} \ge q)} at a value \eqn{q}, when \eqn{F(x)} is mixed.
This algorithm ensures a total worst-case run-time of order \eqn{O(n^{2}log(n))}.
We have not been able to identify alternative, fast and accurate, method (software) that has been developed/implemented when the hypothesized \eqn{F(x)} is mixed.
}
\value{
Numeric value corresponding to \eqn{P(D_{n} \ge q)}.
}
\references{
Dimitrina S. Dimitrova, Vladimir K. Kaishev, Senren Tan. (2020) "Computing the Kolmogorov-Smirnov Distribution When the Underlying CDF is Purely Discrete, Mixed or Continuous". Journal of Statistical Software, \bold{95}(10): 1-42. doi:10.18637/jss.v095.i10.
}
\examples{
# Compute the complementary cdf of D_{n}
# when the underlying distribution is a mixed distribution
# with two jumps at 0 and log(2.5),
# as in Example 3.1 of Dimitrova, Kaishev, Tan (2020)
## Defining the mixed distribution
Mixed_cdf_example <- function(x)
{
result <- 0
if (x < 0){
result <- 0
}
else if (x == 0){
result <- 0.5
}
else if (x < log(2.5)){
result <- 1 - 0.5 * exp(-x)
}
else{
result <- 1
}
return (result)
}
KSgeneral::mixed_ks_c_cdf(0.1, 25, c(0, log(2.5)), Mixed_cdf_example)
\dontrun{
## Compute P(D_{n} >= q) for n = 5,
## q = 1/5000, 2/5000, ..., 5000/5000
## when the underlying distribution is a mixed distribution
## with four jumps at 0, 0.2, 0.8, 1.0,
## as in Example 2.8 of Dimitrova, Kaishev, Tan (2020)
n <- 5
q <- 1:5000/5000
Mixed_cdf_example <- function(x)
{
result <- 0
if (x < 0){
result <- 0
}
else if (x == 0){
result <- 0.2
}
else if (x < 0.2){
result <- 0.2 + x
}
else if (x < 0.8){
result <- 0.5
}
else if (x < 1){
result <- x - 0.1
}
else{
result <- 1
}
return (result)
}
plot(q, sapply(q, function(x) KSgeneral::mixed_ks_c_cdf(x, n,
c(0, 0.2, 0.8, 1.0), Mixed_cdf_example)), type='l')
}
}
| /KSgeneral/man/mixed_ks_c_cdf.Rd | no_license | akhikolla/InformationHouse | R | false | false | 4,123 | rd | \name{mixed_ks_c_cdf}
\alias{mixed_ks_c_cdf}
\title{
Computes the complementary cumulative distribution function of the two-sided Kolmogorov-Smirnov statistic when the cdf under the null hypothesis is mixed
}
\description{
Computes the complementary cdf, \eqn{P(D_{n} \ge q)} at a fixed \eqn{q}, \eqn{q\in[0, 1]}, of the one-sample two-sided Kolmogorov-Smirnov statistic, when the cdf \eqn{F(x)} under the null hypothesis is mixed, using the Exact-KS-FFT method expressing the p-value as a double-boundary non-crossing probability for a homogeneous Poisson process, which is then efficiently computed using FFT (see Dimitrova, Kaishev, Tan (2020)).
}
\usage{
mixed_ks_c_cdf(q, n, jump_points, Mixed_dist, ..., tol = 1e-10)
}
\arguments{
\item{q}{
numeric value between 0 and 1, at which the complementary cdf \eqn{P(D_{n} \ge q)} is computed
}
\item{n}{
the sample size
}
\item{jump_points}{
a numeric vector containing the points of (jump) discontinuity, i.e. where the underlying cdf \eqn{F(x)} has jump(s)
}
\item{Mixed_dist}{
a pre-specified (user-defined) mixed cdf, \eqn{F(x)}, under the null hypothesis.
}
\item{\dots}{
values of the parameters of the cdf, \eqn{F(x)} specified (as a character string) by \code{Mixed_dist}.
}
\item{tol}{
the value of \eqn{\epsilon} that is used to compute the values of \eqn{A_{i}} and \eqn{B_{i}}, \eqn{i = 1, ..., n}, as detailed in Step 1 of Section 2.1 in Dimitrova, Kaishev and Tan (2020) (see also (ii) in the Procedure Exact-KS-FFT therein). By default, \code{tol = 1e-10}. Note that a value of \code{NA} or \code{0} will lead to an error!
}
}
\details{
Given a random sample \eqn{\{X_{1}, ..., X_{n}\}} of size \code{n} with an empirical cdf \eqn{F_{n}(x)}, the Kolmogorov-Smirnov goodness-of-fit statistic is defined as \eqn{D_{n} = \sup | F_{n}(x) - F(x) | }, where \eqn{F(x)} is the cdf of a prespecified theoretical distribution under the null hypothesis \eqn{H_{0}}, that \eqn{\{X_{1}, ..., X_{n}\}} comes from \eqn{F(x)}.
The function \code{\link{mixed_ks_c_cdf}} implements the Exact-KS-FFT method, proposed by Dimitrova, Kaishev, Tan (2020) to compute the complementary cdf \eqn{P(D_{n} \ge q)} at a value \eqn{q}, when \eqn{F(x)} is mixed.
This algorithm ensures a total worst-case run-time of order \eqn{O(n^{2}log(n))}.
We have not been able to identify alternative, fast and accurate, method (software) that has been developed/implemented when the hypothesized \eqn{F(x)} is mixed.
}
\value{
Numeric value corresponding to \eqn{P(D_{n} \ge q)}.
}
\references{
Dimitrina S. Dimitrova, Vladimir K. Kaishev, Senren Tan. (2020) "Computing the Kolmogorov-Smirnov Distribution When the Underlying CDF is Purely Discrete, Mixed or Continuous". Journal of Statistical Software, \bold{95}(10): 1-42. doi:10.18637/jss.v095.i10.
}
\examples{
# Compute the complementary cdf of D_{n}
# when the underlying distribution is a mixed distribution
# with two jumps at 0 and log(2.5),
# as in Example 3.1 of Dimitrova, Kaishev, Tan (2020)
## Defining the mixed distribution
Mixed_cdf_example <- function(x)
{
result <- 0
if (x < 0){
result <- 0
}
else if (x == 0){
result <- 0.5
}
else if (x < log(2.5)){
result <- 1 - 0.5 * exp(-x)
}
else{
result <- 1
}
return (result)
}
KSgeneral::mixed_ks_c_cdf(0.1, 25, c(0, log(2.5)), Mixed_cdf_example)
\dontrun{
## Compute P(D_{n} >= q) for n = 5,
## q = 1/5000, 2/5000, ..., 5000/5000
## when the underlying distribution is a mixed distribution
## with four jumps at 0, 0.2, 0.8, 1.0,
## as in Example 2.8 of Dimitrova, Kaishev, Tan (2020)
n <- 5
q <- 1:5000/5000
Mixed_cdf_example <- function(x)
{
result <- 0
if (x < 0){
result <- 0
}
else if (x == 0){
result <- 0.2
}
else if (x < 0.2){
result <- 0.2 + x
}
else if (x < 0.8){
result <- 0.5
}
else if (x < 1){
result <- x - 0.1
}
else{
result <- 1
}
return (result)
}
plot(q, sapply(q, function(x) KSgeneral::mixed_ks_c_cdf(x, n,
c(0, 0.2, 0.8, 1.0), Mixed_cdf_example)), type='l')
}
}
|
# BSD_2_clause
shinyUI(fluidPage(
useShinyjs(),
tags$style(
"div.outer {
position: fixed;
background-color: #bfbfbf;
top: 0px;
left: 0;
right: 0;
bottom: 0;
overflow: hidden;
padding: 0;
}"
),
div(class = "outer",
leafletOutput("sp_map", height = "100%", width = "100%")
)
))
| /ui.R | no_license | jacob-ogre/gbif_map_comp | R | false | false | 347 | r | # BSD_2_clause
shinyUI(fluidPage(
useShinyjs(),
tags$style(
"div.outer {
position: fixed;
background-color: #bfbfbf;
top: 0px;
left: 0;
right: 0;
bottom: 0;
overflow: hidden;
padding: 0;
}"
),
div(class = "outer",
leafletOutput("sp_map", height = "100%", width = "100%")
)
))
|
library(plyr)
spottings <- list.files(path="spottings")
spottings <- strsplit(spottings, "\\.")
spottings <- laply(spottings, function(x) x[1])
get_id <- function(spotting) {
keys <- strsplit(spotting, '')[[1]]
# Components
keysize <- length(keys)
major <- as.numeric(paste(keys[1:(keysize-6)], collapse=""))
minor <- as.numeric(paste(keys[(keysize-5):(keysize-4)], collapse=""))
magic <- as.numeric(paste(keys[(keysize-3):keysize], collapse=""))
# Wrap things up and return
data.frame(M = major, m = minor, k = magic)
}
ids <- adply(spottings, 1, get_id)
| /successes.r | permissive | tpoisot/noah-scrapper | R | false | false | 576 | r | library(plyr)
spottings <- list.files(path="spottings")
spottings <- strsplit(spottings, "\\.")
spottings <- laply(spottings, function(x) x[1])
get_id <- function(spotting) {
keys <- strsplit(spotting, '')[[1]]
# Components
keysize <- length(keys)
major <- as.numeric(paste(keys[1:(keysize-6)], collapse=""))
minor <- as.numeric(paste(keys[(keysize-5):(keysize-4)], collapse=""))
magic <- as.numeric(paste(keys[(keysize-3):keysize], collapse=""))
# Wrap things up and return
data.frame(M = major, m = minor, k = magic)
}
ids <- adply(spottings, 1, get_id)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dynamac.R
\name{dynardl.auto.correlated}
\alias{dynardl.auto.correlated}
\title{Run a variety of autocorrelation tests on the residuals from a \code{\link{dynardl}} model}
\usage{
dynardl.auto.correlated(
x,
bg.type = "Chisq",
digits = 3,
order = NULL,
object.out = FALSE
)
}
\arguments{
\item{x}{a \code{dynardl} model}
\item{bg.type}{a character string for the type of Breusch-Godfrey test to run. The default is \code{Chisq}: the Chisq test statistic. The other option is \code{F}: the F-test statistic}
\item{digits}{the number of digits to round to when showing output. The default is \code{3}}
\item{order}{the maximum order of serial autocorrelation to test when executing the Breusch-Godfrey test}
\item{object.out}{if \code{TRUE}, and \code{dynardl.auto.correlated} is assigned to an object, the AIC, BIC, and results will be stored for the user's convenience}
}
\value{
The results of autocorrelation tests
}
\description{
Run a variety of autocorrelation tests on the residuals from a \code{\link{dynardl}} model
}
\details{
This is a simple and convenient way to test whether the residuals from the \code{dynardl} model are white noise. As an aside, this is also why \code{dynardl} has a \code{simulate = FALSE} argument: users can ensure the model has white noise residuals before estimating a potentially time-intensive simulation. The output also reminds the user of the null hypotheses for the autocorrelation tests
}
\examples{
# Using the ineq data from dynamac
ardl.model <- dynardl(concern ~ incshare10 + urate, data = ineq,
lags = list("concern" = 1, "incshare10" = 1),
diffs = c("incshare10", "urate"),
lagdiffs = list("concern" = 1),
ec = TRUE, simulate = FALSE)
dynardl.auto.correlated(ardl.model)
}
\author{
Soren Jordan and Andrew Q. Philips
}
\keyword{utilities}
| /man/dynardl.auto.correlated.Rd | no_license | cran/dynamac | R | false | true | 1,912 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dynamac.R
\name{dynardl.auto.correlated}
\alias{dynardl.auto.correlated}
\title{Run a variety of autocorrelation tests on the residuals from a \code{\link{dynardl}} model}
\usage{
dynardl.auto.correlated(
x,
bg.type = "Chisq",
digits = 3,
order = NULL,
object.out = FALSE
)
}
\arguments{
\item{x}{a \code{dynardl} model}
\item{bg.type}{a character string for the type of Breusch-Godfrey test to run. The default is \code{Chisq}: the Chisq test statistic. The other option is \code{F}: the F-test statistic}
\item{digits}{the number of digits to round to when showing output. The default is \code{3}}
\item{order}{the maximum order of serial autocorrelation to test when executing the Breusch-Godfrey test}
\item{object.out}{if \code{TRUE}, and \code{dynardl.auto.correlated} is assigned to an object, the AIC, BIC, and results will be stored for the user's convenience}
}
\value{
The results of autocorrelation tests
}
\description{
Run a variety of autocorrelation tests on the residuals from a \code{\link{dynardl}} model
}
\details{
This is a simple and convenient way to test whether the residuals from the \code{dynardl} model are white noise. As an aside, this is also why \code{dynardl} has a \code{simulate = FALSE} argument: users can ensure the model has white noise residuals before estimating a potentially time-intensive simulation. The output also reminds the user of the null hypotheses for the autocorrelation tests
}
\examples{
# Using the ineq data from dynamac
ardl.model <- dynardl(concern ~ incshare10 + urate, data = ineq,
lags = list("concern" = 1, "incshare10" = 1),
diffs = c("incshare10", "urate"),
lagdiffs = list("concern" = 1),
ec = TRUE, simulate = FALSE)
dynardl.auto.correlated(ardl.model)
}
\author{
Soren Jordan and Andrew Q. Philips
}
\keyword{utilities}
|
#' Returns the levels for each grouping factor in the fitted object
#'
#' @inherit lme4::ngrps params return
#' @export
#'
ngrps.mixed <- function(object, ...) vapply(.flist(object), nlevels, 1)
#' Terms method for epimodel objects
#' @export
#' @param x,fixed.only,random.only,... See \code{\link[lme4]{terms.merMod}}
#'
terms.epimodel <- function (x, fixed.only=TRUE, random.only=FALSE, ...) {
if (!is.mixed(x))
return(NextMethod("terms"))
fr <- x$glmod$fr
if (missing(fixed.only) && random.only)
fixed.only <- FALSE
if (fixed.only && random.only)
stop("can't specify 'only fixed' and 'only random' terms")
tt <- attr(fr, "terms")
if (fixed.only) {
tt <- terms.formula(formula(x, fixed.only = TRUE))
attr(tt, "predvars") <- attr(terms(fr), "predvars.fixed")
}
if (random.only) {
tt <- terms.formula(lme4::subbars(formula(x, random.only = TRUE)))
attr(tt, "predvars") <- attr(terms(fr), "predvars.random")
}
return(tt)
}
#' model.frame method for epimodel objects. Please see \code{\link[stats]{model.frame}}
#' for more details.
#'
#' @export
#' @templateVar epimodelArg formula
#' @template args-epimodel-object
#' @param ... See \code{\link[stats]{model.frame}}.
#' @param fixed.only See \code{\link[lme4]{model.frame.merMod}}.
#'
model.frame.epimodel <- function(formula, fixed.only=FALSE, ...) {
if (is.mixed(formula)) {
fr <- formula$glmod$fr
if (fixed.only) {
trms <- delete.response(terms(formula, fixed.only=TRUE))
vars <- all.vars(trms)
fr <- fr[vars]
}
} else {
form <- rhs(formula)
fr <- model.frame(formula=form, data=formula$data, drop.unused.levels=TRUE)
}
return(fr)
}
#' formula method for epimodel objects
#'
#' @export
#' @param x An epimodel object.
#' @param ... Can contain \code{fixed.only} and \code{random.only} arguments
#' that both default to \code{FALSE}.
#'
formula.epimodel <- function(x, ...) {
return(formula(x$formula, ...))
}
#' Extract X or Z from an epimodel object
#'
#' @export
#' @templateVar epimodelArg object
#' @template args-epimodel-object
#' @param ... Other arguments passed to methods.
#' @return A matrix.
#' @export
get_x <- function(object, ...) UseMethod("get_x")
#' @rdname get_x
#' @export
get_z <- function(object, ...) UseMethod("get_z")
#' @export
get_x.default <- function(object, ...) {
object[["x"]] %ORifNULL% model.matrix(object)
}
#' @export
get_x.mixed <- function(object, ...) {
object$glmod$X %ORifNULL% stop("X not found")
}
#' @export
get_z.mixed <- function(object, ...) {
Zt <- object$glmod$reTrms$Zt %ORifNULL% stop("Z not found")
Matrix::t(Zt)
}
VarCorr.epimodel <- function(x, sigma = 1, ...) {
cnms <- .cnms(x)
mat <- as.matrix(x)
useSc <- "sigma" %in% colnames(mat)
if (useSc) sc <- mat[,"sigma"] else sc <- 1
Sigma <- colMeans(mat[,grepl("^Sigma\\[", colnames(mat)), drop = FALSE])
nc <- vapply(cnms, FUN = length, FUN.VALUE = 1L)
nms <- names(cnms)
ncseq <- seq_along(nc)
if (length(Sigma) == sum(nc * nc)) { # stanfit contains all Sigma entries
spt <- split(Sigma, rep.int(ncseq, nc * nc))
ans <- lapply(ncseq, function(i) {
Sigma <- matrix(0, nc[i], nc[i])
Sigma[,] <- spt[[i]]
rownames(Sigma) <- colnames(Sigma) <- cnms[[i]]
stddev <- sqrt(diag(Sigma))
corr <- cov2cor(Sigma)
structure(Sigma, stddev = stddev, correlation = corr)
})
} else { # stanfit contains lower tri Sigma entries
spt <- split(Sigma, rep.int(ncseq, (nc * (nc + 1)) / 2))
ans <- lapply(ncseq, function(i) {
Sigma <- matrix(0, nc[i], nc[i])
Sigma[lower.tri(Sigma, diag = TRUE)] <- spt[[i]]
Sigma <- Sigma + t(Sigma)
diag(Sigma) <- diag(Sigma) / 2
rownames(Sigma) <- colnames(Sigma) <- cnms[[i]]
stddev <- sqrt(diag(Sigma))
corr <- cov2cor(Sigma)
structure(Sigma, stddev = stddev, correlation = corr)
})
}
names(ans) <- nms
structure(ans, sc = mean(sc), useSc = useSc, class = "VarCorr.merMod")
}
.mixed_check <- function(object) {
if (!is.mixed(object))
stop("This method is for mixed effects models only.", call.=FALSE)
}
.cnms <- function(object, ...) UseMethod(".cnms")
.cnms.epimodel <- function(object, ...) {
.mixed_check(object)
object$glmod$reTrms$cnms
}
.flist <- function(object, ...) UseMethod(".flist")
.flist.epimodel <- function(object, ...) {
.mixed_check(object)
as.list(object$glmod$reTrms$flist)
}
| /R/epimodel-methods.R | no_license | gkarthik/epidemia | R | false | false | 4,472 | r | #' Returns the levels for each grouping factor in the fitted object
#'
#' @inherit lme4::ngrps params return
#' @export
#'
ngrps.mixed <- function(object, ...) vapply(.flist(object), nlevels, 1)
#' Terms method for epimodel objects
#' @export
#' @param x,fixed.only,random.only,... See \code{\link[lme4]{terms.merMod}}
#'
terms.epimodel <- function (x, fixed.only=TRUE, random.only=FALSE, ...) {
if (!is.mixed(x))
return(NextMethod("terms"))
fr <- x$glmod$fr
if (missing(fixed.only) && random.only)
fixed.only <- FALSE
if (fixed.only && random.only)
stop("can't specify 'only fixed' and 'only random' terms")
tt <- attr(fr, "terms")
if (fixed.only) {
tt <- terms.formula(formula(x, fixed.only = TRUE))
attr(tt, "predvars") <- attr(terms(fr), "predvars.fixed")
}
if (random.only) {
tt <- terms.formula(lme4::subbars(formula(x, random.only = TRUE)))
attr(tt, "predvars") <- attr(terms(fr), "predvars.random")
}
return(tt)
}
#' model.frame method for epimodel objects. Please see \code{\link[stats]{model.frame}}
#' for more details.
#'
#' @export
#' @templateVar epimodelArg formula
#' @template args-epimodel-object
#' @param ... See \code{\link[stats]{model.frame}}.
#' @param fixed.only See \code{\link[lme4]{model.frame.merMod}}.
#'
model.frame.epimodel <- function(formula, fixed.only=FALSE, ...) {
if (is.mixed(formula)) {
fr <- formula$glmod$fr
if (fixed.only) {
trms <- delete.response(terms(formula, fixed.only=TRUE))
vars <- all.vars(trms)
fr <- fr[vars]
}
} else {
form <- rhs(formula)
fr <- model.frame(formula=form, data=formula$data, drop.unused.levels=TRUE)
}
return(fr)
}
#' formula method for epimodel objects
#'
#' @export
#' @param x An epimodel object.
#' @param ... Can contain \code{fixed.only} and \code{random.only} arguments
#' that both default to \code{FALSE}.
#'
formula.epimodel <- function(x, ...) {
return(formula(x$formula, ...))
}
#' Extract X or Z from an epimodel object
#'
#' @export
#' @templateVar epimodelArg object
#' @template args-epimodel-object
#' @param ... Other arguments passed to methods.
#' @return A matrix.
#' @export
get_x <- function(object, ...) UseMethod("get_x")
#' @rdname get_x
#' @export
get_z <- function(object, ...) UseMethod("get_z")
#' @export
get_x.default <- function(object, ...) {
object[["x"]] %ORifNULL% model.matrix(object)
}
#' @export
get_x.mixed <- function(object, ...) {
object$glmod$X %ORifNULL% stop("X not found")
}
#' @export
get_z.mixed <- function(object, ...) {
Zt <- object$glmod$reTrms$Zt %ORifNULL% stop("Z not found")
Matrix::t(Zt)
}
VarCorr.epimodel <- function(x, sigma = 1, ...) {
cnms <- .cnms(x)
mat <- as.matrix(x)
useSc <- "sigma" %in% colnames(mat)
if (useSc) sc <- mat[,"sigma"] else sc <- 1
Sigma <- colMeans(mat[,grepl("^Sigma\\[", colnames(mat)), drop = FALSE])
nc <- vapply(cnms, FUN = length, FUN.VALUE = 1L)
nms <- names(cnms)
ncseq <- seq_along(nc)
if (length(Sigma) == sum(nc * nc)) { # stanfit contains all Sigma entries
spt <- split(Sigma, rep.int(ncseq, nc * nc))
ans <- lapply(ncseq, function(i) {
Sigma <- matrix(0, nc[i], nc[i])
Sigma[,] <- spt[[i]]
rownames(Sigma) <- colnames(Sigma) <- cnms[[i]]
stddev <- sqrt(diag(Sigma))
corr <- cov2cor(Sigma)
structure(Sigma, stddev = stddev, correlation = corr)
})
} else { # stanfit contains lower tri Sigma entries
spt <- split(Sigma, rep.int(ncseq, (nc * (nc + 1)) / 2))
ans <- lapply(ncseq, function(i) {
Sigma <- matrix(0, nc[i], nc[i])
Sigma[lower.tri(Sigma, diag = TRUE)] <- spt[[i]]
Sigma <- Sigma + t(Sigma)
diag(Sigma) <- diag(Sigma) / 2
rownames(Sigma) <- colnames(Sigma) <- cnms[[i]]
stddev <- sqrt(diag(Sigma))
corr <- cov2cor(Sigma)
structure(Sigma, stddev = stddev, correlation = corr)
})
}
names(ans) <- nms
structure(ans, sc = mean(sc), useSc = useSc, class = "VarCorr.merMod")
}
.mixed_check <- function(object) {
if (!is.mixed(object))
stop("This method is for mixed effects models only.", call.=FALSE)
}
.cnms <- function(object, ...) UseMethod(".cnms")
.cnms.epimodel <- function(object, ...) {
.mixed_check(object)
object$glmod$reTrms$cnms
}
.flist <- function(object, ...) UseMethod(".flist")
.flist.epimodel <- function(object, ...) {
.mixed_check(object)
as.list(object$glmod$reTrms$flist)
}
|
library(twitteR)
#getUser("minsaude")
#load("~/Dropbox/credentials/my_oauth")
#options(httr_oauth_cache=TRUE) #This will enable the use of a local file to cache OAuth access credentials between R sessions.
#setup_twitter_oauth(consumer_key = "Z2ylc2PCCT1RHC3bULRlFZe6e",
# consumer_secret = "tTGnPZiPJhRhMAS0AG5nYDbPQ1sxLTr838ibuQ7QDqdCVcWYTH",
# access_token = "616715766-l8bzWPYKARP6DBQnvOAcJH6TEFBlSWyoXOGsPjCK",
# access_secret = "FhRd94eTycnvJp5tgUAGfR7O8AZTwv6iQ7ecDQ8tRRMVt")
library(smappR)
minsaude_followers <- smappR::getFollowers("minsaude", oauth_folder = "~/Dropbox/credentials")
user_data <- smappR::getUsersBatch(id = minsaude_followers, oauth_folder = "~/Dropbox/credentials")
#save(list = "user_data", file = "data/user_data.RData")
user_data$location[user_data$location == ""] <- NA
### from lucas puente
#Install key package helpers:
source("https://raw.githubusercontent.com/LucasPuente/geocoding/master/geocode_helpers.R")
#Install modified version of the geocode function
#(that now includes the api_key parameter):
source("https://raw.githubusercontent.com/LucasPuente/geocoding/master/modified_geocode.R")
geocode_apply<-function(x){
geocode(x, source = "google", output = "all", api_key="AIzaSyBnR6m37FrIR7SkDj7ZPyg5pmbrL68PWFI")
}
# api key calling will take time, I think
library(jsonlite)
library(dplyr)
library(plyr)
#####
#foo <- user_data$location[1:500]
#foo <- foo[!is.na(foo)]
#geocode_results <- sapply(foo, geocode_apply, simplify = FALSE)
#####
locs <- user_data$location[!is.na(user_data$location)]
geocode_results <- sapply(locs, geocode_apply, simplify = FALSE)
condition_a <- sapply(geocode_results, function(x) x["status"]=="OK")
geocode_results<-geocode_results[condition_a]
condition_b <- lapply(geocode_results, lapply, length)
condition_b2<-sapply(condition_b, function(x) x["results"]=="1")
geocode_results<-geocode_results[condition_b2]
length(geocode_results)
source("https://raw.githubusercontent.com/LucasPuente/geocoding/master/cleaning_geocoded_results.R")
results_b<-lapply(geocode_results, as.data.frame)
results_c<-lapply(results_b,function(x) subset(x, select=c("results.formatted_address",
"results.geometry.location")))
results_d<-lapply(results_c,function(x) data.frame(Location=x[1,"results.formatted_address"],
lat=x[1,"results.geometry.location"],
lng=x[2,"results.geometry.location"]))
results_e <- data.table::rbindlist(results_d)
results_f<-results_e[,Original_Location:=names(results_d)]
ipak <- function(pkg){
new.pkg <- pkg[!(pkg %in% installed.packages()[, "Package"])]
if (length(new.pkg))
install.packages(new.pkg, dependencies = TRUE, repos="http://cran.rstudio.com/")
sapply(pkg, require, character.only = TRUE)
}
packages <- c("maps", "mapproj")
ipak(packages)
| /R/mapping-followers.R | permissive | fboehm/brazil | R | false | false | 2,976 | r | library(twitteR)
#getUser("minsaude")
#load("~/Dropbox/credentials/my_oauth")
#options(httr_oauth_cache=TRUE) #This will enable the use of a local file to cache OAuth access credentials between R sessions.
#setup_twitter_oauth(consumer_key = "Z2ylc2PCCT1RHC3bULRlFZe6e",
# consumer_secret = "tTGnPZiPJhRhMAS0AG5nYDbPQ1sxLTr838ibuQ7QDqdCVcWYTH",
# access_token = "616715766-l8bzWPYKARP6DBQnvOAcJH6TEFBlSWyoXOGsPjCK",
# access_secret = "FhRd94eTycnvJp5tgUAGfR7O8AZTwv6iQ7ecDQ8tRRMVt")
library(smappR)
minsaude_followers <- smappR::getFollowers("minsaude", oauth_folder = "~/Dropbox/credentials")
user_data <- smappR::getUsersBatch(id = minsaude_followers, oauth_folder = "~/Dropbox/credentials")
#save(list = "user_data", file = "data/user_data.RData")
user_data$location[user_data$location == ""] <- NA
### from lucas puente
#Install key package helpers:
source("https://raw.githubusercontent.com/LucasPuente/geocoding/master/geocode_helpers.R")
#Install modified version of the geocode function
#(that now includes the api_key parameter):
source("https://raw.githubusercontent.com/LucasPuente/geocoding/master/modified_geocode.R")
geocode_apply<-function(x){
geocode(x, source = "google", output = "all", api_key="AIzaSyBnR6m37FrIR7SkDj7ZPyg5pmbrL68PWFI")
}
# api key calling will take time, I think
library(jsonlite)
library(dplyr)
library(plyr)
#####
#foo <- user_data$location[1:500]
#foo <- foo[!is.na(foo)]
#geocode_results <- sapply(foo, geocode_apply, simplify = FALSE)
#####
locs <- user_data$location[!is.na(user_data$location)]
geocode_results <- sapply(locs, geocode_apply, simplify = FALSE)
condition_a <- sapply(geocode_results, function(x) x["status"]=="OK")
geocode_results<-geocode_results[condition_a]
condition_b <- lapply(geocode_results, lapply, length)
condition_b2<-sapply(condition_b, function(x) x["results"]=="1")
geocode_results<-geocode_results[condition_b2]
length(geocode_results)
source("https://raw.githubusercontent.com/LucasPuente/geocoding/master/cleaning_geocoded_results.R")
results_b<-lapply(geocode_results, as.data.frame)
results_c<-lapply(results_b,function(x) subset(x, select=c("results.formatted_address",
"results.geometry.location")))
results_d<-lapply(results_c,function(x) data.frame(Location=x[1,"results.formatted_address"],
lat=x[1,"results.geometry.location"],
lng=x[2,"results.geometry.location"]))
results_e <- data.table::rbindlist(results_d)
results_f<-results_e[,Original_Location:=names(results_d)]
ipak <- function(pkg){
new.pkg <- pkg[!(pkg %in% installed.packages()[, "Package"])]
if (length(new.pkg))
install.packages(new.pkg, dependencies = TRUE, repos="http://cran.rstudio.com/")
sapply(pkg, require, character.only = TRUE)
}
packages <- c("maps", "mapproj")
ipak(packages)
|
complete <- function(directory, id = 1:332) {
# Create empty vectors to store ID's and Row number
idlist <- c()
rowlist <- c()
idx <- 1
# Obtain the list of .csv files to be read
temp <- list.files(directory)[id]
# Enter directory with .csv files
setwd(directory)
for (i in id) {
# Read .csv files
DF <- read.csv(temp[idx], header = TRUE)
# Extract rows that does not have missing values
DFnotna <- DF[complete.cases(DF), ]
# Concatenate the ID's and Row number vectors
idlist <- c(idlist, i)
rowlist <- c(rowlist, nrow(DFnotna))
# Increment of index by 1
idx <- idx + 1
}
# Exit to previous directory
setwd('..')
# Return data frame of file name and number of complete cases
data.frame(id = idlist, nobs = rowlist)
}
| /2_R_Programming/Assignment1/complete.R | no_license | figochin/datasciencecoursera | R | false | false | 949 | r | complete <- function(directory, id = 1:332) {
# Create empty vectors to store ID's and Row number
idlist <- c()
rowlist <- c()
idx <- 1
# Obtain the list of .csv files to be read
temp <- list.files(directory)[id]
# Enter directory with .csv files
setwd(directory)
for (i in id) {
# Read .csv files
DF <- read.csv(temp[idx], header = TRUE)
# Extract rows that does not have missing values
DFnotna <- DF[complete.cases(DF), ]
# Concatenate the ID's and Row number vectors
idlist <- c(idlist, i)
rowlist <- c(rowlist, nrow(DFnotna))
# Increment of index by 1
idx <- idx + 1
}
# Exit to previous directory
setwd('..')
# Return data frame of file name and number of complete cases
data.frame(id = idlist, nobs = rowlist)
}
|
#############################################
## CausalMPE project
# This script combines the results from multiple simualation scripts carried online on the O2 cluster
# This set of simulations concerned a scenario where there are two subtypes, one with a null effect.
#########################################
rm(list = ls())
#library(Daniel)
library(dplyr)
x <- c(11:16, 21:26, 31:36, 41:46, 51:56, 61:66)
all.patts <- chartr("123456789", "ABCDEFGHI", x)
all.res <- matrix(nr = length(all.patts)*1000, nc = 25)
keep <- list(c("keep", "all.patts","all.res", "ii"))
#j <- 1
setwd("/home/dn84/CausalMPE/Results/Scenario 28a")
for (ii in 1:length(all.patts))
{
# j <- j + 1
temp.patt <- all.patts[ii]
load(paste0("CMPEn50krareScen28a",temp.patt,".RData"))
all.res[((ii - 1) * 1000 + 1):(ii * 1000) , 1:3] <- AllY
all.res[((ii - 1) * 1000 + 1):(ii * 1000) , 4] <- pop.never.s1
all.res[((ii - 1) * 1000 + 1):(ii * 1000) , 5] <- pop.never.s2
all.res[((ii - 1) * 1000 + 1):(ii * 1000) , 6] <- sace.diff1
all.res[((ii - 1) * 1000 + 1):(ii * 1000) , 7] <- sace.diff2
all.res[((ii - 1) * 1000 + 1):(ii * 1000) , 8] <- ace.diff1
all.res[((ii - 1) * 1000 + 1):(ii * 1000) , 9] <- ace.diff2
all.res[((ii - 1) * 1000 + 1):(ii * 1000) , 10:11] <- ci1
all.res[((ii - 1) * 1000 + 1):(ii * 1000) , 12:13] <- ci2
all.res[((ii - 1) * 1000 + 1):(ii * 1000) , 14] <- ace.or1
all.res[((ii - 1) * 1000 + 1):(ii * 1000) , 15] <- ace.or2
all.res[((ii - 1) * 1000 + 1):(ii * 1000) , 16] <- sace.or1
all.res[((ii - 1) * 1000 + 1):(ii * 1000) , 17] <- sace.or2
all.res[((ii - 1) * 1000 + 1):(ii * 1000) , 18] <- exp(betaE[1])
all.res[((ii - 1) * 1000 + 1):(ii * 1000) , 19] <- exp(betaE[2])
all.res[((ii - 1) * 1000 + 1):(ii * 1000) , 20] <- exp(betaU[1])
all.res[((ii - 1) * 1000 + 1):(ii * 1000) , 21] <- exp(betaU[2])
all.res[((ii - 1) * 1000 + 1):(ii * 1000) , 22] <- or.approx1
all.res[((ii - 1) * 1000 + 1):(ii * 1000) , 23] <- or.approx2
all.res[((ii - 1) * 1000 + 1):(ii * 1000) , 24] <- or.approx.true1
all.res[((ii - 1) * 1000 + 1):(ii * 1000) , 25] <- or.approx.true2
#rm(list = setdiff(ls(),keep))
}
colnames(all.res) <- c(paste0("AllY",1:3), "pop.never.s1", "pop.never.s2", "sace.diff1",
"sace.diff2", "ace.diff1", "ace.diff2", "ci1.L", "ci1.H",
"ci2.L", "ci2.H", "ace.or1", "ace.or2", "sace.or1", "sace.or2",
"betaE1", "betaE2", "betaU1", "betaU2",
"or.approx1", "or.approx2", "or.approx.true1","or.approx.true2")
setwd("/home/dn84/CausalMPE/")
write.csv(all.res, "all.res.scen28a.csv", row.names = F)
| /Simulations/Scripts/Summaries/SummarizeOnlineScenario28a.R | no_license | yadevi/CausalMPE | R | false | false | 2,639 | r | #############################################
## CausalMPE project
# This script combines the results from multiple simualation scripts carried online on the O2 cluster
# This set of simulations concerned a scenario where there are two subtypes, one with a null effect.
#########################################
rm(list = ls())
#library(Daniel)
library(dplyr)
x <- c(11:16, 21:26, 31:36, 41:46, 51:56, 61:66)
all.patts <- chartr("123456789", "ABCDEFGHI", x)
all.res <- matrix(nr = length(all.patts)*1000, nc = 25)
keep <- list(c("keep", "all.patts","all.res", "ii"))
#j <- 1
setwd("/home/dn84/CausalMPE/Results/Scenario 28a")
for (ii in 1:length(all.patts))
{
# j <- j + 1
temp.patt <- all.patts[ii]
load(paste0("CMPEn50krareScen28a",temp.patt,".RData"))
all.res[((ii - 1) * 1000 + 1):(ii * 1000) , 1:3] <- AllY
all.res[((ii - 1) * 1000 + 1):(ii * 1000) , 4] <- pop.never.s1
all.res[((ii - 1) * 1000 + 1):(ii * 1000) , 5] <- pop.never.s2
all.res[((ii - 1) * 1000 + 1):(ii * 1000) , 6] <- sace.diff1
all.res[((ii - 1) * 1000 + 1):(ii * 1000) , 7] <- sace.diff2
all.res[((ii - 1) * 1000 + 1):(ii * 1000) , 8] <- ace.diff1
all.res[((ii - 1) * 1000 + 1):(ii * 1000) , 9] <- ace.diff2
all.res[((ii - 1) * 1000 + 1):(ii * 1000) , 10:11] <- ci1
all.res[((ii - 1) * 1000 + 1):(ii * 1000) , 12:13] <- ci2
all.res[((ii - 1) * 1000 + 1):(ii * 1000) , 14] <- ace.or1
all.res[((ii - 1) * 1000 + 1):(ii * 1000) , 15] <- ace.or2
all.res[((ii - 1) * 1000 + 1):(ii * 1000) , 16] <- sace.or1
all.res[((ii - 1) * 1000 + 1):(ii * 1000) , 17] <- sace.or2
all.res[((ii - 1) * 1000 + 1):(ii * 1000) , 18] <- exp(betaE[1])
all.res[((ii - 1) * 1000 + 1):(ii * 1000) , 19] <- exp(betaE[2])
all.res[((ii - 1) * 1000 + 1):(ii * 1000) , 20] <- exp(betaU[1])
all.res[((ii - 1) * 1000 + 1):(ii * 1000) , 21] <- exp(betaU[2])
all.res[((ii - 1) * 1000 + 1):(ii * 1000) , 22] <- or.approx1
all.res[((ii - 1) * 1000 + 1):(ii * 1000) , 23] <- or.approx2
all.res[((ii - 1) * 1000 + 1):(ii * 1000) , 24] <- or.approx.true1
all.res[((ii - 1) * 1000 + 1):(ii * 1000) , 25] <- or.approx.true2
#rm(list = setdiff(ls(),keep))
}
colnames(all.res) <- c(paste0("AllY",1:3), "pop.never.s1", "pop.never.s2", "sace.diff1",
"sace.diff2", "ace.diff1", "ace.diff2", "ci1.L", "ci1.H",
"ci2.L", "ci2.H", "ace.or1", "ace.or2", "sace.or1", "sace.or2",
"betaE1", "betaE2", "betaU1", "betaU2",
"or.approx1", "or.approx2", "or.approx.true1","or.approx.true2")
setwd("/home/dn84/CausalMPE/")
write.csv(all.res, "all.res.scen28a.csv", row.names = F)
|
\name{get_t_outlier_cat}
\alias{get_t_outlier_cat}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
%% ~~function to do ... ~~
Calculate median price category based upon Tukey's Fences
}
\description{
%% ~~ A concise (1-5 lines) description of what the function does. ~~
Determines if median price is within (Q1-3*IQR, Q3+3*IQR) known as "Far Out" points using Tukey's terminology. It creates a new variable in the table "bundle_group" and assigns it values of "above avg", "avg", "below avg"
}
\usage{
get_t_outlier_cat(df)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{df}{
%% ~~Describe \code{x} here~~
An APCD dataframe with tags.
}
}
\details{
%% ~~ If necessary, more details than the description above ~~
}
\value{
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
dataframe
}
\references{
%% ~put references to the literature/web site here ~
}
\author{
%% ~~who you are~~
Alexander Nielson
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }% use one of RShowDoc("KEYWORDS")
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
| /man/get_t_outlier_cat.Rd | no_license | utah-osa/hcctools | R | false | false | 1,423 | rd | \name{get_t_outlier_cat}
\alias{get_t_outlier_cat}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
%% ~~function to do ... ~~
Calculate median price category based upon Tukey's Fences
}
\description{
%% ~~ A concise (1-5 lines) description of what the function does. ~~
Determines if median price is within (Q1-3*IQR, Q3+3*IQR) known as "Far Out" points using Tukey's terminology. It creates a new variable in the table "bundle_group" and assigns it values of "above avg", "avg", "below avg"
}
\usage{
get_t_outlier_cat(df)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{df}{
%% ~~Describe \code{x} here~~
An APCD dataframe with tags.
}
}
\details{
%% ~~ If necessary, more details than the description above ~~
}
\value{
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
dataframe
}
\references{
%% ~put references to the literature/web site here ~
}
\author{
%% ~~who you are~~
Alexander Nielson
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }% use one of RShowDoc("KEYWORDS")
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
library(acct)
library(readxl)
library(tidyverse)
hs_crosswalk <- read_csv("N:/ORP_accountability/projects/Jessica/Data Returns/Helpful Documents/ACT_xwalkAug2018rev.csv")
# Current Year ACT Junior Day File
act_highest_math <- read_excel("N:/Assessment_Data Returns/ACT/2018-19/2019 Spring/Spring 2019 Final File/Spring 2019 Final School Day ACT.xlsx") %>%
janitor::clean_names() %>%
mutate_at(
.vars = c("act_h_s_code_number", "act_scale_score_composite", "act_scale_score_mathematics", "state_assigned_student_id_number"),
.f = as.numeric
) %>%
filter(not_na(state_assigned_student_id_number), test_location != "M", grade_level == 11) %>%
inner_join(hs_crosswalk, by = c("act_h_s_code_number" = "acthscode")) %>%
filter(system != 99855) %>%
group_by(state_assigned_student_id_number) %>%
mutate(highest = max(act_scale_score_mathematics)) %>%
ungroup() %>%
filter(act_scale_score_mathematics == highest) %>%
transmute(
system,
school,
first_name = student_first_name,
last_name = student_last_name,
grade = grade_level,
subject = "ACT Math",
state_student_id = state_assigned_student_id_number,
act_subscore = act_scale_score_mathematics
)
student_level <- read_csv("N:/ORP_accountability/projects/2019_student_level_file/2019_student_level_file.csv")
# Student ACT Substitution File
substitution_student <- student_level %>%
filter(original_subject %in% c("Algebra I", "Algebra II", "Geometry", "Integrated Math I", "Integrated Math II", "Integrated Math III")) %>%
anti_join(act_highest_math, ., by = "state_student_id") %>%
arrange(system, school, state_student_id)
write_csv(substitution_student, "N:/ORP_accountability/data/2019_final_accountability_files/act_substitution_student.csv", na = "")
# Split Student Level File
district_numbers <- sort(unique(substitution_student$system))
substitution_student %>%
split(., .$system) %>%
walk2(
.x = .,
.y = district_numbers,
.f = ~ write_csv(.x, path = paste0("N:/ORP_accountability/data/2019_final_accountability_files/split/", .y, "_ACTSubstitutionStudentLevelFile_20Jun2019.csv"), na = "")
)
school_names <- read_csv("N:/ORP_accountability/data/2019_final_accountability_files/names.csv")
# School ACT Substitution File
substitution_school <- substitution_student %>%
filter(not_na(school)) %>%
mutate(
valid_tests = 1,
n_met_benchmark = act_subscore >= 22,
n_not_met_benchmark = act_subscore < 22
) %>%
group_by(system, school, subject) %>%
summarise_at(c("valid_tests", "n_met_benchmark", "n_not_met_benchmark"), sum, na.rm = TRUE) %>%
ungroup() %>%
mutate(
pct_met_benchmark = round5(100 * n_met_benchmark/valid_tests, 1),
pct_not_met_benchmark = 100 - pct_met_benchmark
) %>%
left_join(school_names, by = c("system", "school")) %>%
select(system, system_name, school, school_name, everything())
write_csv(substitution_school, "N:/ORP_accountability/data/2019_final_accountability_files/act_substitution_school.csv", na = "")
district_names <- read_csv("N:/ORP_accountability/data/2019_final_accountability_files/names.csv") %>%
select(system, system_name) %>%
distinct()
# District ACT Substitution File
substitution_district <- substitution_student %>%
mutate(
valid_tests = 1,
n_met_benchmark = as.integer(act_subscore >= 22),
n_not_met_benchmark = as.integer(act_subscore < 22)
) %>%
group_by(system, subject) %>%
summarise_at(c("valid_tests", "n_met_benchmark", "n_not_met_benchmark"), sum, na.rm = TRUE) %>%
ungroup() %>%
mutate(
pct_met_benchmark = round5(100 * n_met_benchmark/valid_tests, 1),
pct_not_met_benchmark = 100 - pct_met_benchmark
) %>%
left_join(district_names, by = "system") %>%
select(system, system_name, everything())
write_csv(substitution_district, "N:/ORP_accountability/data/2019_final_accountability_files/act_substitution_district.csv", na = "")
# State ACT Substitution File
substitution_state <- substitution_student %>%
mutate(
valid_tests = 1,
n_met_benchmark = as.integer(act_subscore >= 22),
n_not_met_benchmark = as.integer(act_subscore < 22)
) %>%
group_by(subject) %>%
summarise_at(c("valid_tests", "n_met_benchmark", "n_not_met_benchmark"), sum, na.rm = TRUE) %>%
ungroup() %>%
mutate(
system = 0,
system_name = "State of Tennessee",
pct_met_benchmark = round5(100 * n_met_benchmark/valid_tests, 1),
pct_not_met_benchmark = 100 - pct_met_benchmark
) %>%
select(system, system_name, everything())
write_csv(substitution_state, "N:/ORP_accountability/data/2019_final_accountability_files/act_substitution_state.csv", na = "")
| /2019-accountability/act_substitution.R | no_license | tnedu/accountability | R | false | false | 4,869 | r | library(acct)
library(readxl)
library(tidyverse)
hs_crosswalk <- read_csv("N:/ORP_accountability/projects/Jessica/Data Returns/Helpful Documents/ACT_xwalkAug2018rev.csv")
# Current Year ACT Junior Day File
act_highest_math <- read_excel("N:/Assessment_Data Returns/ACT/2018-19/2019 Spring/Spring 2019 Final File/Spring 2019 Final School Day ACT.xlsx") %>%
janitor::clean_names() %>%
mutate_at(
.vars = c("act_h_s_code_number", "act_scale_score_composite", "act_scale_score_mathematics", "state_assigned_student_id_number"),
.f = as.numeric
) %>%
filter(not_na(state_assigned_student_id_number), test_location != "M", grade_level == 11) %>%
inner_join(hs_crosswalk, by = c("act_h_s_code_number" = "acthscode")) %>%
filter(system != 99855) %>%
group_by(state_assigned_student_id_number) %>%
mutate(highest = max(act_scale_score_mathematics)) %>%
ungroup() %>%
filter(act_scale_score_mathematics == highest) %>%
transmute(
system,
school,
first_name = student_first_name,
last_name = student_last_name,
grade = grade_level,
subject = "ACT Math",
state_student_id = state_assigned_student_id_number,
act_subscore = act_scale_score_mathematics
)
student_level <- read_csv("N:/ORP_accountability/projects/2019_student_level_file/2019_student_level_file.csv")
# Student ACT Substitution File
substitution_student <- student_level %>%
filter(original_subject %in% c("Algebra I", "Algebra II", "Geometry", "Integrated Math I", "Integrated Math II", "Integrated Math III")) %>%
anti_join(act_highest_math, ., by = "state_student_id") %>%
arrange(system, school, state_student_id)
write_csv(substitution_student, "N:/ORP_accountability/data/2019_final_accountability_files/act_substitution_student.csv", na = "")
# Split Student Level File
district_numbers <- sort(unique(substitution_student$system))
substitution_student %>%
split(., .$system) %>%
walk2(
.x = .,
.y = district_numbers,
.f = ~ write_csv(.x, path = paste0("N:/ORP_accountability/data/2019_final_accountability_files/split/", .y, "_ACTSubstitutionStudentLevelFile_20Jun2019.csv"), na = "")
)
school_names <- read_csv("N:/ORP_accountability/data/2019_final_accountability_files/names.csv")
# School ACT Substitution File
substitution_school <- substitution_student %>%
filter(not_na(school)) %>%
mutate(
valid_tests = 1,
n_met_benchmark = act_subscore >= 22,
n_not_met_benchmark = act_subscore < 22
) %>%
group_by(system, school, subject) %>%
summarise_at(c("valid_tests", "n_met_benchmark", "n_not_met_benchmark"), sum, na.rm = TRUE) %>%
ungroup() %>%
mutate(
pct_met_benchmark = round5(100 * n_met_benchmark/valid_tests, 1),
pct_not_met_benchmark = 100 - pct_met_benchmark
) %>%
left_join(school_names, by = c("system", "school")) %>%
select(system, system_name, school, school_name, everything())
write_csv(substitution_school, "N:/ORP_accountability/data/2019_final_accountability_files/act_substitution_school.csv", na = "")
district_names <- read_csv("N:/ORP_accountability/data/2019_final_accountability_files/names.csv") %>%
select(system, system_name) %>%
distinct()
# District ACT Substitution File
substitution_district <- substitution_student %>%
mutate(
valid_tests = 1,
n_met_benchmark = as.integer(act_subscore >= 22),
n_not_met_benchmark = as.integer(act_subscore < 22)
) %>%
group_by(system, subject) %>%
summarise_at(c("valid_tests", "n_met_benchmark", "n_not_met_benchmark"), sum, na.rm = TRUE) %>%
ungroup() %>%
mutate(
pct_met_benchmark = round5(100 * n_met_benchmark/valid_tests, 1),
pct_not_met_benchmark = 100 - pct_met_benchmark
) %>%
left_join(district_names, by = "system") %>%
select(system, system_name, everything())
write_csv(substitution_district, "N:/ORP_accountability/data/2019_final_accountability_files/act_substitution_district.csv", na = "")
# State ACT Substitution File
substitution_state <- substitution_student %>%
mutate(
valid_tests = 1,
n_met_benchmark = as.integer(act_subscore >= 22),
n_not_met_benchmark = as.integer(act_subscore < 22)
) %>%
group_by(subject) %>%
summarise_at(c("valid_tests", "n_met_benchmark", "n_not_met_benchmark"), sum, na.rm = TRUE) %>%
ungroup() %>%
mutate(
system = 0,
system_name = "State of Tennessee",
pct_met_benchmark = round5(100 * n_met_benchmark/valid_tests, 1),
pct_not_met_benchmark = 100 - pct_met_benchmark
) %>%
select(system, system_name, everything())
write_csv(substitution_state, "N:/ORP_accountability/data/2019_final_accountability_files/act_substitution_state.csv", na = "")
|
#' @include internal.R
NULL
#' Deprecation notice
#'
#' The functions listed here are deprecated.
#' This means that they once existed in earlier versions of the
#' of the \pkg{prioritizr} package, but they have since been removed
#' entirely, replaced by other functions, or renamed as other functions
#' in newer versions.
#' To help make it easier to transition to new versions of the \pkg{prioritizr}
#' package, we have listed alternatives for deprecated the functions
#' (where applicable).
#' If a function is described as being renamed, then this means
#' that only the name of the function has changed
#' (i.e. the inputs, outputs, and underlying code remain the same).
#'
#' @param ... not used.
#'
#' @details
#' The following functions have been deprecated:
#'
#' \describe{
#'
#' \item{`add_connected_constraints()`}{renamed
#' as the [add_contiguity_constraints()] function.}
#'
#' \item{`add_corridor_constraints()`}{replaced by the
#' [add_feature_contiguity_constraints()] function.}
#'
#' \item{`set_number_of_threads()`}{no longer needed used with the
#' implementation of superior data extraction.}
#'
#' \item{`get_number_of_threads()`}{no longer needed used with the
#' implementation of superior data extraction.}
#'
#' \item{`is.parallel()`}{no longer needed used with the
#' implementation of superior data extraction.}
#'
#' \item{`add_pool_portfolio()`}{replaced by the
#' [add_extra_portfolio()] and [add_top_portfolio()].}
#'
#' \item{`connected_matrix()`}{renamed as
#' the [adjacency_matrix()] function.}
#'
#' \item{`feature_representation()`}{replaced by
#' the [eval_feature_representation_summary()] function for consistency with
#' other functions.}
#'
#' \item{`replacement_cost()`}{renamed as
#' the [eval_replacement_importance()] function for consistency with
#' other functions for evaluating solutions.}
#'
#' \item{`rarity_weighted_richness()`}{renamed as
#' the [eval_rare_richness_importance()] function for consistency with
#' other functions for evaluating solutions.}
#'
#' \item{`ferrier_score()`}{renamed as
#' the [eval_ferrier_importance()] function for consistency with
#' other functions for evaluating solutions.}
#'
#' }
#'
#' @keywords deprecated
#'
#' @name prioritizr-deprecated
NULL
#' @rdname prioritizr-deprecated
#' @export
add_connected_constraints <- function(...) {
.Deprecated(
old = "add_contiguity_constraints",
new = "add_contiguity_constraints")
}
#' @rdname prioritizr-deprecated
#' @export
add_corridor_constraints <- function(...) {
.Deprecated(
old = "add_corridor_constraints",
new = "add_feature_contiguity_constraints")
}
#' @rdname prioritizr-deprecated
#' @export
set_number_of_threads <- function(...) {
.Deprecated(
old = "set_number_of_threads")
}
#' @rdname prioritizr-deprecated
#' @export
get_number_of_threads <- function(...) {
.Deprecated(
old = "get_number_of_threads")
}
#' @rdname prioritizr-deprecated
#' @export
is.parallel <- function(...) {
.Deprecated(
old = "is.parallel")
}
#' @rdname prioritizr-deprecated
#' @export
add_pool_portfolio <- function(...) {
.Deprecated(
old = "add_pool_portfolio",
new = "add_top_portfolio()")
}
#' @rdname prioritizr-deprecated
#' @export
connected_matrix <- function(...) {
.Deprecated(
old = "connected_matrix",
new = "adjacency_matrix")
}
#' @rdname prioritizr-deprecated
#' @export
feature_representation <- function(...) {
.Deprecated(
old = "feature_representation",
new = "eval_feature_representation_summary")
}
#' @rdname prioritizr-deprecated
#' @export
replacement_cost <- function(...) {
.Deprecated(
old = "replacement_cost",
new = "eval_replacement_importance")
}
#' @rdname prioritizr-deprecated
#' @export
rarity_weighted_richness <- function(...) {
.Deprecated(
old = "replacement_cost",
new = "eval_rwr_importance")
}
#' @rdname prioritizr-deprecated
#' @export
ferrier_score <- function(...) {
.Deprecated(
old = "ferrier_score",
new = "eval_ferrier_importance")
}
| /R/deprecated.R | no_license | BrandonEdwards/prioritizr | R | false | false | 4,048 | r | #' @include internal.R
NULL
#' Deprecation notice
#'
#' The functions listed here are deprecated.
#' This means that they once existed in earlier versions of the
#' of the \pkg{prioritizr} package, but they have since been removed
#' entirely, replaced by other functions, or renamed as other functions
#' in newer versions.
#' To help make it easier to transition to new versions of the \pkg{prioritizr}
#' package, we have listed alternatives for deprecated the functions
#' (where applicable).
#' If a function is described as being renamed, then this means
#' that only the name of the function has changed
#' (i.e. the inputs, outputs, and underlying code remain the same).
#'
#' @param ... not used.
#'
#' @details
#' The following functions have been deprecated:
#'
#' \describe{
#'
#' \item{`add_connected_constraints()`}{renamed
#' as the [add_contiguity_constraints()] function.}
#'
#' \item{`add_corridor_constraints()`}{replaced by the
#' [add_feature_contiguity_constraints()] function.}
#'
#' \item{`set_number_of_threads()`}{no longer needed used with the
#' implementation of superior data extraction.}
#'
#' \item{`get_number_of_threads()`}{no longer needed used with the
#' implementation of superior data extraction.}
#'
#' \item{`is.parallel()`}{no longer needed used with the
#' implementation of superior data extraction.}
#'
#' \item{`add_pool_portfolio()`}{replaced by the
#' [add_extra_portfolio()] and [add_top_portfolio()].}
#'
#' \item{`connected_matrix()`}{renamed as
#' the [adjacency_matrix()] function.}
#'
#' \item{`feature_representation()`}{replaced by
#' the [eval_feature_representation_summary()] function for consistency with
#' other functions.}
#'
#' \item{`replacement_cost()`}{renamed as
#' the [eval_replacement_importance()] function for consistency with
#' other functions for evaluating solutions.}
#'
#' \item{`rarity_weighted_richness()`}{renamed as
#' the [eval_rare_richness_importance()] function for consistency with
#' other functions for evaluating solutions.}
#'
#' \item{`ferrier_score()`}{renamed as
#' the [eval_ferrier_importance()] function for consistency with
#' other functions for evaluating solutions.}
#'
#' }
#'
#' @keywords deprecated
#'
#' @name prioritizr-deprecated
NULL
#' @rdname prioritizr-deprecated
#' @export
add_connected_constraints <- function(...) {
.Deprecated(
old = "add_contiguity_constraints",
new = "add_contiguity_constraints")
}
#' @rdname prioritizr-deprecated
#' @export
add_corridor_constraints <- function(...) {
.Deprecated(
old = "add_corridor_constraints",
new = "add_feature_contiguity_constraints")
}
#' @rdname prioritizr-deprecated
#' @export
set_number_of_threads <- function(...) {
.Deprecated(
old = "set_number_of_threads")
}
#' @rdname prioritizr-deprecated
#' @export
get_number_of_threads <- function(...) {
.Deprecated(
old = "get_number_of_threads")
}
#' @rdname prioritizr-deprecated
#' @export
is.parallel <- function(...) {
.Deprecated(
old = "is.parallel")
}
#' @rdname prioritizr-deprecated
#' @export
add_pool_portfolio <- function(...) {
.Deprecated(
old = "add_pool_portfolio",
new = "add_top_portfolio()")
}
#' @rdname prioritizr-deprecated
#' @export
connected_matrix <- function(...) {
.Deprecated(
old = "connected_matrix",
new = "adjacency_matrix")
}
#' @rdname prioritizr-deprecated
#' @export
feature_representation <- function(...) {
.Deprecated(
old = "feature_representation",
new = "eval_feature_representation_summary")
}
#' @rdname prioritizr-deprecated
#' @export
replacement_cost <- function(...) {
.Deprecated(
old = "replacement_cost",
new = "eval_replacement_importance")
}
#' @rdname prioritizr-deprecated
#' @export
rarity_weighted_richness <- function(...) {
.Deprecated(
old = "replacement_cost",
new = "eval_rwr_importance")
}
#' @rdname prioritizr-deprecated
#' @export
ferrier_score <- function(...) {
.Deprecated(
old = "ferrier_score",
new = "eval_ferrier_importance")
}
|
### where different genotypes can exploid different (limited) resources
# input: compet, coordalive, haspartner, popgenome, G, K
# output : popsurvival
# each patch can hold at max K individuals, as it has K units of each resource type (ie for each gene)
competition <- function( compet, coordalive, haspartner, popgenome, G , K){
if(compet=="_ded_"){
occupiedpatches <- sort( unique( coordalive ) )
# prepare for computing the number of copies of each allele present in each patch
patch_compet <- matrix( nrow = length(occupiedpatches) , ncol= 2*G )
rownames( patch_compet ) = occupiedpatches # /!/ names are sorted
for (g in 1:G){
Tgenepatch <- table( factor( popgenome[,g], levels = 0:2), coordalive ) # table with allelic sums per patch for gene g
patch_compet[,(g*2-1)] <- Tgenepatch[1,]*2 + Tgenepatch[2,] # vector with number of copies of the 0 allele of gene g on the patch
patch_compet[,(g*2)] <- Tgenepatch[3,]*2 + Tgenepatch[2,] # vector with number of copies of the 1 allele of gene g on the patch
}
# table with reward of possessing each allele (col: al0Gen1 al1Gen1 al0Gen2 al1Gen2...) on each patch (row)
patch_compet <- K / patch_compet # total resource of each type available in the patch, divided by number of corresponding alleles in the patch
patch_compet[ patch_compet > 1 ] <- 1 # on patches with excess food, individual share is bounded at 1; Inf values also become 1 but that doesn't matter as they stand for alleles no-one has
popgenome_haspartner <- popgenome[haspartner==T,]
reward_haspartner <- patch_compet[ coordalive[ haspartner == T ], ] # matrix of the reward got by each individual (row) from each of his alleles (g1a0, g1a1, g2a0 etc)
popfitness_haspartner <- vector( length = sum(haspartner) )
for( g in 1:G ){
fitness_g <- popgenome_haspartner[,g] * reward_haspartner[, 2*g] + ( 2- popgenome_haspartner[,g] ) * reward_haspartner[, 2*g-1]
popfitness_haspartner <- popfitness_haspartner + fitness_g
}
popfitness_haspartner <- popfitness_haspartner / (2*G) # the survival probability resulting from resource acquisition, of each individual with reproductive prospects
# each individual of the population now stochastically survives (1) or not (0). Without reproductive prospects, the individual dies.
if( sum(haspartner) == 0) {
popsurvival <- "extinction"
} else {
popsurvival <- numeric( length(haspartner) )
popsurvival[ haspartner==T ] <- mapply( FUN = rbinom, prob = popfitness_haspartner, size = 1, n = 1)
}
return(popsurvival)
}
if(compet=='_fc_'){
if( sum(haspartner) == 0) {
popsurvival <- "extinction"
} else {
pop_patch <- table( coordalive )
popfitness <- K / pop_patch[ coordalive ]
popfitness[ popfitness > 1 ] <- 1
condition <- !is.na( popfitness )
popsurvival <- rep( 0, length(coordalive) )
popsurvival[ condition ] <- mapply( FUN = rbinom, prob = popfitness[ condition ], size = 1, n = 1)
}
return( popsurvival )
}
}
library(compiler)
competition <- cmpfun(competition)
| /competition.R | no_license | Redsiana/GPG | R | false | false | 3,211 | r | ### where different genotypes can exploid different (limited) resources
# input: compet, coordalive, haspartner, popgenome, G, K
# output : popsurvival
# each patch can hold at max K individuals, as it has K units of each resource type (ie for each gene)
competition <- function( compet, coordalive, haspartner, popgenome, G , K){
if(compet=="_ded_"){
occupiedpatches <- sort( unique( coordalive ) )
# prepare for computing the number of copies of each allele present in each patch
patch_compet <- matrix( nrow = length(occupiedpatches) , ncol= 2*G )
rownames( patch_compet ) = occupiedpatches # /!/ names are sorted
for (g in 1:G){
Tgenepatch <- table( factor( popgenome[,g], levels = 0:2), coordalive ) # table with allelic sums per patch for gene g
patch_compet[,(g*2-1)] <- Tgenepatch[1,]*2 + Tgenepatch[2,] # vector with number of copies of the 0 allele of gene g on the patch
patch_compet[,(g*2)] <- Tgenepatch[3,]*2 + Tgenepatch[2,] # vector with number of copies of the 1 allele of gene g on the patch
}
# table with reward of possessing each allele (col: al0Gen1 al1Gen1 al0Gen2 al1Gen2...) on each patch (row)
patch_compet <- K / patch_compet # total resource of each type available in the patch, divided by number of corresponding alleles in the patch
patch_compet[ patch_compet > 1 ] <- 1 # on patches with excess food, individual share is bounded at 1; Inf values also become 1 but that doesn't matter as they stand for alleles no-one has
popgenome_haspartner <- popgenome[haspartner==T,]
reward_haspartner <- patch_compet[ coordalive[ haspartner == T ], ] # matrix of the reward got by each individual (row) from each of his alleles (g1a0, g1a1, g2a0 etc)
popfitness_haspartner <- vector( length = sum(haspartner) )
for( g in 1:G ){
fitness_g <- popgenome_haspartner[,g] * reward_haspartner[, 2*g] + ( 2- popgenome_haspartner[,g] ) * reward_haspartner[, 2*g-1]
popfitness_haspartner <- popfitness_haspartner + fitness_g
}
popfitness_haspartner <- popfitness_haspartner / (2*G) # the survival probability resulting from resource acquisition, of each individual with reproductive prospects
# each individual of the population now stochastically survives (1) or not (0). Without reproductive prospects, the individual dies.
if( sum(haspartner) == 0) {
popsurvival <- "extinction"
} else {
popsurvival <- numeric( length(haspartner) )
popsurvival[ haspartner==T ] <- mapply( FUN = rbinom, prob = popfitness_haspartner, size = 1, n = 1)
}
return(popsurvival)
}
if(compet=='_fc_'){
if( sum(haspartner) == 0) {
popsurvival <- "extinction"
} else {
pop_patch <- table( coordalive )
popfitness <- K / pop_patch[ coordalive ]
popfitness[ popfitness > 1 ] <- 1
condition <- !is.na( popfitness )
popsurvival <- rep( 0, length(coordalive) )
popsurvival[ condition ] <- mapply( FUN = rbinom, prob = popfitness[ condition ], size = 1, n = 1)
}
return( popsurvival )
}
}
library(compiler)
competition <- cmpfun(competition)
|
# Minimize_X { (1/2)||X - A||_F^2 + lam||P*X||_1} s.t. X >= del * I
# ...using ADMM
#' Solving penalized Frobenius problem.
#'
#' This function solves the optimization problem
#'
#' Minimize_X (1/2)||X - A||_F^2 + lam||P*X||_1 s.t. X >= del * I.
#'
#' This is the prox function for the generalized gradient descent of Bien &
#' Tibshirani 2011 (see full reference below).
#'
#' This is the R implementation of the algorithm in Appendix 3 of Bien, J., and
#' Tibshirani, R. (2011), "Sparse Estimation of a Covariance Matrix,"
#' Biometrika. 98(4). 807--820. It uses an ADMM approach to solve the problem
#'
#' Minimize_X (1/2)||X - A||_F^2 + lam||P*X||_1 s.t. X >= del * I.
#'
#' Here, the multiplication between P and X is elementwise. The inequality in
#' the constraint is a lower bound on the minimum eigenvalue of the matrix X.
#'
#' Note that there are two variables X and Z that are outputted. Both are
#' estimates of the optimal X. However, Z has exact zeros whereas X has
#' eigenvalues at least del. Running the ADMM algorithm long enough, these two
#' are guaranteed to converge.
#'
#' @param A A symmetric matrix.
#' @param del A non-negative scalar. Lower bound on eigenvalues.
#' @param lam A non-negative scalar. L1 penalty parameter.
#' @param P Matrix with non-negative elements and dimension of A. Allows for
#' differing L1 penalty parameters.
#' @param rho ADMM parameter. Can affect rate of convergence a lot.
#' @param tol Convergence threshold.
#' @param maxiters Maximum number of iterations.
#' @param verb Controls whether to be verbose.
#' @return \item{X}{Estimate of optimal X.} \item{Z}{Estimate of optimal X.}
#' \item{obj}{Objective values.}
#' @author Jacob Bien and Rob Tibshirani
#' @seealso spcov
#' @references Bien, J., and Tibshirani, R. (2011), "Sparse Estimation of a
#' Covariance Matrix," Biometrika. 98(4). 807--820.
#' @keywords multivariate
#' @export
#' @examples
#'
#' set.seed(1)
#' n <- 100
#' p <- 200
#' # generate a covariance matrix:
#' model <- GenerateCliquesCovariance(ncliques=4, cliquesize=p / 4, 1)
#'
#' # generate data matrix with x[i, ] ~ N(0, model$Sigma):
#' x <- matrix(rnorm(n * p), ncol=p) %*% model$A
#' S <- var(x)
#'
#' # compute sparse, positive covariance estimator:
#' P <- matrix(1, p, p)
#' diag(P) <- 0
#' lam <- 0.1
#' aa <- ProxADMM(S, 0.01, lam, P)
#'
ProxADMM <- function(A, del, lam, P, rho=.1, tol=1e-6, maxiters=100, verb=FALSE) {
# Minimize_X { (1/2)||X - A||_F^2 + lam||P*X||_1} s.t. X >= del * I
#
# ADMM approach
# first, check if simple soft-thesholding works... if so, skip the ADMM!
soft <- SoftThreshold(A, lam * P)
minev <- min(eigen(soft, symmetric=T, only.values=T)$val)
if (minev >= del) {
return(list(X=soft, Z=soft, obj=ComputeProxObjective(soft, A, lam, P)))
}
p <- nrow(A)
obj <- NULL
# initialize Z, Y
Z <- soft
Y <- matrix(0, p, p)
# main loop
for (i in seq(maxiters)) {
# update X:
B <- (A + rho * Z - Y) / (1 + rho)
if (min(eigen(B, symmetric=T, only.values=T)$val) < del) {
# note: even though eigen is called twice, only.values=T is
# much faster, making this worthwhile.
eig <- eigen(B, symmetric=T)
X <- eig$vec %*% diag(pmax(eig$val, del)) %*% t(eig$vec)
}
else {
X <- B
}
# check for convergence:
obj <- c(obj, ComputeProxObjective(X, A, lam, P))
if (verb)
cat(" ", obj[i], fill=T)
if (i > 1)
if (obj[i] > obj[i - 1] - tol) {
if (verb)
cat(" ADMM converged after ", i, " steps.", fill=T)
break
}
# update Z:
Z <- SoftThreshold(X + Y / rho, lam * P / rho)
# update Y:
Y <- Y + rho * (X - Z)
}
list(X=X, Z=Z, obj=obj)
}
SoftThreshold <- function(x, lam) {
# note: this works also if lam is a matrix of the same size as x.
sign(x) * (abs(x) - lam) * (abs(x) > lam)
}
ComputeProxObjective <- function(X, A, lam, P) {
sum((X-A)^2) / 2 + lam * sum(abs(P*X))
}
| /R/prox.R | no_license | cran/spcov | R | false | false | 3,988 | r | # Minimize_X { (1/2)||X - A||_F^2 + lam||P*X||_1} s.t. X >= del * I
# ...using ADMM
#' Solving penalized Frobenius problem.
#'
#' This function solves the optimization problem
#'
#' Minimize_X (1/2)||X - A||_F^2 + lam||P*X||_1 s.t. X >= del * I.
#'
#' This is the prox function for the generalized gradient descent of Bien &
#' Tibshirani 2011 (see full reference below).
#'
#' This is the R implementation of the algorithm in Appendix 3 of Bien, J., and
#' Tibshirani, R. (2011), "Sparse Estimation of a Covariance Matrix,"
#' Biometrika. 98(4). 807--820. It uses an ADMM approach to solve the problem
#'
#' Minimize_X (1/2)||X - A||_F^2 + lam||P*X||_1 s.t. X >= del * I.
#'
#' Here, the multiplication between P and X is elementwise. The inequality in
#' the constraint is a lower bound on the minimum eigenvalue of the matrix X.
#'
#' Note that there are two variables X and Z that are outputted. Both are
#' estimates of the optimal X. However, Z has exact zeros whereas X has
#' eigenvalues at least del. Running the ADMM algorithm long enough, these two
#' are guaranteed to converge.
#'
#' @param A A symmetric matrix.
#' @param del A non-negative scalar. Lower bound on eigenvalues.
#' @param lam A non-negative scalar. L1 penalty parameter.
#' @param P Matrix with non-negative elements and dimension of A. Allows for
#' differing L1 penalty parameters.
#' @param rho ADMM parameter. Can affect rate of convergence a lot.
#' @param tol Convergence threshold.
#' @param maxiters Maximum number of iterations.
#' @param verb Controls whether to be verbose.
#' @return \item{X}{Estimate of optimal X.} \item{Z}{Estimate of optimal X.}
#' \item{obj}{Objective values.}
#' @author Jacob Bien and Rob Tibshirani
#' @seealso spcov
#' @references Bien, J., and Tibshirani, R. (2011), "Sparse Estimation of a
#' Covariance Matrix," Biometrika. 98(4). 807--820.
#' @keywords multivariate
#' @export
#' @examples
#'
#' set.seed(1)
#' n <- 100
#' p <- 200
#' # generate a covariance matrix:
#' model <- GenerateCliquesCovariance(ncliques=4, cliquesize=p / 4, 1)
#'
#' # generate data matrix with x[i, ] ~ N(0, model$Sigma):
#' x <- matrix(rnorm(n * p), ncol=p) %*% model$A
#' S <- var(x)
#'
#' # compute sparse, positive covariance estimator:
#' P <- matrix(1, p, p)
#' diag(P) <- 0
#' lam <- 0.1
#' aa <- ProxADMM(S, 0.01, lam, P)
#'
ProxADMM <- function(A, del, lam, P, rho=.1, tol=1e-6, maxiters=100, verb=FALSE) {
# Minimize_X { (1/2)||X - A||_F^2 + lam||P*X||_1} s.t. X >= del * I
#
# ADMM approach
# first, check if simple soft-thesholding works... if so, skip the ADMM!
soft <- SoftThreshold(A, lam * P)
minev <- min(eigen(soft, symmetric=T, only.values=T)$val)
if (minev >= del) {
return(list(X=soft, Z=soft, obj=ComputeProxObjective(soft, A, lam, P)))
}
p <- nrow(A)
obj <- NULL
# initialize Z, Y
Z <- soft
Y <- matrix(0, p, p)
# main loop
for (i in seq(maxiters)) {
# update X:
B <- (A + rho * Z - Y) / (1 + rho)
if (min(eigen(B, symmetric=T, only.values=T)$val) < del) {
# note: even though eigen is called twice, only.values=T is
# much faster, making this worthwhile.
eig <- eigen(B, symmetric=T)
X <- eig$vec %*% diag(pmax(eig$val, del)) %*% t(eig$vec)
}
else {
X <- B
}
# check for convergence:
obj <- c(obj, ComputeProxObjective(X, A, lam, P))
if (verb)
cat(" ", obj[i], fill=T)
if (i > 1)
if (obj[i] > obj[i - 1] - tol) {
if (verb)
cat(" ADMM converged after ", i, " steps.", fill=T)
break
}
# update Z:
Z <- SoftThreshold(X + Y / rho, lam * P / rho)
# update Y:
Y <- Y + rho * (X - Z)
}
list(X=X, Z=Z, obj=obj)
}
SoftThreshold <- function(x, lam) {
# note: this works also if lam is a matrix of the same size as x.
sign(x) * (abs(x) - lam) * (abs(x) > lam)
}
ComputeProxObjective <- function(X, A, lam, P) {
sum((X-A)^2) / 2 + lam * sum(abs(P*X))
}
|
##################################################
#
# Plotting Probability Distributions and
# Calculating Probabilities with an Exponential Curve
#
# Student name(s): Daniel Camacho
#
############## Setup ##############
# We will use tibbles, pipes, and ggplot
require(tidyverse)
######## Excercices - Part B ####################
######## Computer Life ##########################
# Question (4):
# a) On the average, a certain computer part
# lasts ten years. The length of time the computer
# part lasts is exponentially distributed. What is the
# probability that the part will last between 7 and 11
# years?
# b) Eighty percent of these computer parts last
# at most how long? (Hint: what R function is the
# inverse of pexp?)
# Set the parameters for the distribution
lambda <- 10 # lambda = average "lifetime"
rate <- 1/lambda # parameter for "dexp" is rate = 1/lambda
lower <- 7
upper <- 11 # We have to stop the graph somewhere
percentile <- 80
# Generate 100 x-values between 0 and 4*lambda
# These will be used to generate points on the
# exponential curve
# for ggplot to work we need the data in a tibble
data <- tibble(x = seq(0, 4*lambda,length = 100))
# plot the exponential distribution and
# shade the region
# Note that this calculates the y values
# on the fly using "dexp"
myPlot <- data %>%
ggplot(aes(x = x)) +
stat_function(fun = dexp,
args = list(rate = rate), # parameters for "dexp"
xlim = c(lower,upper),
geom = "area", # shade the region
fill = "lightgreen") +
stat_function(fun = dexp, # draw the curve
args = list(rate = rate)) + # parameters for "dexp"
xlab("Life of Computer (Years)") +
ylab("")
# we saved the plot so that we could add the title to it later
# to look at it now, we need to execute this line
myPlot
# Now calculate the probability
# P( x < upper)
# That is, find the area shaded in green
# For this we use the pexp function
# Note that
area <- pexp(upper, rate = 1/lambda) - pexp(lower, rate = 1/lambda)
time <- -plnorm(percentile, rate, lower = FALSE, log = TRUE)
# add probability to the plot formatted properly
result <- paste("P(",lower," < Life of Computer <",upper,") =",
signif(area, digits=3))
# Eighty percent of these computer parts last almost 11.6 years
result2 <- paste("80th Percentile:", "Almost",
signif(time, digits=3), "years")
myPlot + labs(title = paste("Exponential distribution with lambda = ",
lambda),
subtitle = result,
caption = result2) + theme(
plot.caption = element_text(hjust = 0, size = 12, face = "italic") # move caption to the left
)
##################################################
########## Chi-Square Random Variable########################################
# Question (5):
# What is the probability that a chi-square random
# variable with 10 degrees of freedom is greater
# than 15.99? (This curve can be graphed on the
# interval [0,25].)
# Set the parameters for the distribution
dfchi <- 10
lower <- 15.99
upper <- 25
# Generate 100 x-values between 0 and 4*lambda
# These will be used to generate points on the
# exponential curve
# for ggplot to work we need the data in a tibble
data <- tibble(x = seq(0, upper,length = 100))
# plot the chi-aquare distribution and
# shade the region
# Note that this calculates the y values
# on the fly using "dexp"
myPlot <- data %>%
ggplot(aes(x = x)) +
stat_function(fun = dchisq,
xlim = c(lower,upper),
args = list(df = dfchi),
geom = "area", # shade the region
fill = "lightgreen") +
stat_function(fun = dchisq, # draw the curve
args = list(df = dfchi)) + # parameters for "dexp"
xlab("Values") +
ylab("")
# we saved the plot so that we could add the title to it later
# to look at it now, we need to execute this line
myPlot
# Now calculate the probability
# P( x < upper)
# That is, find the area shaded in green
# For this we use the pexp function
# Note that
area <- 1 - pchisq(lower, df)
# add probability to the plot formatted properly
result <- paste("P(Value >",lower,") =",
signif(area, digits=3))
myPlot + ggtitle(paste("Chi-Square Distribution with df = ",
dfchi),
subtitle = result)
##################################################
######### F- Statistics Distribution ################
# Question (6):
# Find the probability that an F-statistics is
# less than 0.595 if the degrees of freedom are
# df = 11 and df = 6. (This curve can be graphed on the
# interval [0,5].)
# Set the parameters for the distribution
dfstat1 <- 11
dfstat2 <- 6
lower <- 0
upper <- 0.595 # We have to stop the graph somewhere
# Generate 100 x-values between 0 and 4*lambda
# These will be used to generate points on the
# exponential curve
# for ggplot to work we need the data in a tibble
data <- tibble(x = seq(0, 5,length = 100))
# plot the exponential distribution and
# shade the region
# Note that this calculates the y values
# on the fly using "dexp"
myPlot <- data %>%
ggplot(aes(x = x)) +
stat_function(fun = df,
args = list(df1 = dfstat1, df2 = dfstat2), # parameters for "df"
xlim = c(lower,upper),
geom = "area", # shade the region
fill = "lightgreen") +
stat_function(fun = df, # draw the curve
args = list(df1 = dfstat1, df2 = dfstat2)) + # parameters for "df"
xlab("Value") +
ylab("")
# we saved the plot so that we could add the title to it later
# to look at it now, we need to execute this line
myPlot
# Now calculate the probability
# P( x < upper)
# That is, find the area shaded in green
# For this we use the pexp function
# Note that
area <- pf(upper, df1 = dfstat1, df2 = dfstat2)
# add probability to the plot formatted properly
result <- paste("P(Value <", upper,") =",
signif(area, digits=3))
myPlot + ggtitle(paste("F Distribution with df1 = ",
dfstat1,"and df2 =", dfstat2),
subtitle = result)
| /Ch 1/Lab 2/Lab 2 Probs with an exp curve (Daniel Camacho).R | no_license | daniel1197cama/MATH-308-Adv-Data-Modeling | R | false | false | 6,484 | r | ##################################################
#
# Plotting Probability Distributions and
# Calculating Probabilities with an Exponential Curve
#
# Student name(s): Daniel Camacho
#
############## Setup ##############
# We will use tibbles, pipes, and ggplot
require(tidyverse)
######## Excercices - Part B ####################
######## Computer Life ##########################
# Question (4):
# a) On the average, a certain computer part
# lasts ten years. The length of time the computer
# part lasts is exponentially distributed. What is the
# probability that the part will last between 7 and 11
# years?
# b) Eighty percent of these computer parts last
# at most how long? (Hint: what R function is the
# inverse of pexp?)
# Set the parameters for the distribution
lambda <- 10 # lambda = average "lifetime"
rate <- 1/lambda # parameter for "dexp" is rate = 1/lambda
lower <- 7
upper <- 11 # We have to stop the graph somewhere
percentile <- 80
# Generate 100 x-values between 0 and 4*lambda
# These will be used to generate points on the
# exponential curve
# for ggplot to work we need the data in a tibble
data <- tibble(x = seq(0, 4*lambda,length = 100))
# plot the exponential distribution and
# shade the region
# Note that this calculates the y values
# on the fly using "dexp"
myPlot <- data %>%
ggplot(aes(x = x)) +
stat_function(fun = dexp,
args = list(rate = rate), # parameters for "dexp"
xlim = c(lower,upper),
geom = "area", # shade the region
fill = "lightgreen") +
stat_function(fun = dexp, # draw the curve
args = list(rate = rate)) + # parameters for "dexp"
xlab("Life of Computer (Years)") +
ylab("")
# we saved the plot so that we could add the title to it later
# to look at it now, we need to execute this line
myPlot
# Now calculate the probability
# P( x < upper)
# That is, find the area shaded in green
# For this we use the pexp function
# Note that
area <- pexp(upper, rate = 1/lambda) - pexp(lower, rate = 1/lambda)
time <- -plnorm(percentile, rate, lower = FALSE, log = TRUE)
# add probability to the plot formatted properly
result <- paste("P(",lower," < Life of Computer <",upper,") =",
signif(area, digits=3))
# Eighty percent of these computer parts last almost 11.6 years
result2 <- paste("80th Percentile:", "Almost",
signif(time, digits=3), "years")
myPlot + labs(title = paste("Exponential distribution with lambda = ",
lambda),
subtitle = result,
caption = result2) + theme(
plot.caption = element_text(hjust = 0, size = 12, face = "italic") # move caption to the left
)
##################################################
########## Chi-Square Random Variable########################################
# Question (5):
# What is the probability that a chi-square random
# variable with 10 degrees of freedom is greater
# than 15.99? (This curve can be graphed on the
# interval [0,25].)
# Set the parameters for the distribution
dfchi <- 10
lower <- 15.99
upper <- 25
# Generate 100 x-values between 0 and 4*lambda
# These will be used to generate points on the
# exponential curve
# for ggplot to work we need the data in a tibble
data <- tibble(x = seq(0, upper,length = 100))
# plot the chi-aquare distribution and
# shade the region
# Note that this calculates the y values
# on the fly using "dexp"
myPlot <- data %>%
ggplot(aes(x = x)) +
stat_function(fun = dchisq,
xlim = c(lower,upper),
args = list(df = dfchi),
geom = "area", # shade the region
fill = "lightgreen") +
stat_function(fun = dchisq, # draw the curve
args = list(df = dfchi)) + # parameters for "dexp"
xlab("Values") +
ylab("")
# we saved the plot so that we could add the title to it later
# to look at it now, we need to execute this line
myPlot
# Now calculate the probability
# P( x < upper)
# That is, find the area shaded in green
# For this we use the pexp function
# Note that
area <- 1 - pchisq(lower, df)
# add probability to the plot formatted properly
result <- paste("P(Value >",lower,") =",
signif(area, digits=3))
myPlot + ggtitle(paste("Chi-Square Distribution with df = ",
dfchi),
subtitle = result)
##################################################
######### F- Statistics Distribution ################
# Question (6):
# Find the probability that an F-statistics is
# less than 0.595 if the degrees of freedom are
# df = 11 and df = 6. (This curve can be graphed on the
# interval [0,5].)
# Set the parameters for the distribution
dfstat1 <- 11
dfstat2 <- 6
lower <- 0
upper <- 0.595 # We have to stop the graph somewhere
# Generate 100 x-values between 0 and 4*lambda
# These will be used to generate points on the
# exponential curve
# for ggplot to work we need the data in a tibble
data <- tibble(x = seq(0, 5,length = 100))
# plot the exponential distribution and
# shade the region
# Note that this calculates the y values
# on the fly using "dexp"
myPlot <- data %>%
ggplot(aes(x = x)) +
stat_function(fun = df,
args = list(df1 = dfstat1, df2 = dfstat2), # parameters for "df"
xlim = c(lower,upper),
geom = "area", # shade the region
fill = "lightgreen") +
stat_function(fun = df, # draw the curve
args = list(df1 = dfstat1, df2 = dfstat2)) + # parameters for "df"
xlab("Value") +
ylab("")
# we saved the plot so that we could add the title to it later
# to look at it now, we need to execute this line
myPlot
# Now calculate the probability
# P( x < upper)
# That is, find the area shaded in green
# For this we use the pexp function
# Note that
area <- pf(upper, df1 = dfstat1, df2 = dfstat2)
# add probability to the plot formatted properly
result <- paste("P(Value <", upper,") =",
signif(area, digits=3))
myPlot + ggtitle(paste("F Distribution with df1 = ",
dfstat1,"and df2 =", dfstat2),
subtitle = result)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/r2sd.R
\name{r2sd}
\alias{r2sd}
\title{Scale a vector (or vectors) by two standard deviations}
\usage{
r2sd(x, na = TRUE)
}
\arguments{
\item{x}{a vector, likely in your data frame}
\item{na}{what to do with NAs in the vector. Defaults to TRUE (i.e. passes over the missing observations)}
}
\value{
The function returns a numeric vector rescaled with a mean of 0 and a
standard deviation of .5.
}
\description{
\code{r2sd} allows you to rescale a numeric vector such that the
ensuing output has a mean of 0 and a standard deviation of .5. \code{r2sd_at} is a wrapper for
\code{mutate_at} and \code{rename_at} from \pkg{dplyr}. It both rescales the supplied vectors to
new vectors and renames the vectors to each have a prefix of \code{z_}.
}
\details{
By default, \code{na.rm} is set to TRUE. If you have missing data, the function will just pass
over them.
Gelman (2008) argues that rescaling by two standard deviations puts regression inputs
on roughly the same scale no matter their original scale. This allows for some honest, if preliminary,
assessment of relative effect sizes from the regression output. This does that, but
without requiring the \code{rescale} function from \pkg{arm}.
I'm trying to reduce the packages on which my workflow relies.
Importantly, I tend to rescale only the ordinal and interval inputs and leave the binary inputs as 0/1.
So, my \code{r2sd} function doesn't have any of the fancier if-else statements that Gelman's \code{rescale}
function has.
}
\examples{
x <- rnorm(100)
r2sd(x)
r2sd_at(mtcars, c("mpg", "hp", "disp"))
}
\references{
Gelman, Andrew. 2008. "Scaling Regression Inputs by Dividing by Two Standard Deviations." \emph{Statistics in Medicine} 27: 2865--2873.
}
| /man/r2sd.Rd | no_license | svmiller/stevemisc | R | false | true | 1,795 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/r2sd.R
\name{r2sd}
\alias{r2sd}
\title{Scale a vector (or vectors) by two standard deviations}
\usage{
r2sd(x, na = TRUE)
}
\arguments{
\item{x}{a vector, likely in your data frame}
\item{na}{what to do with NAs in the vector. Defaults to TRUE (i.e. passes over the missing observations)}
}
\value{
The function returns a numeric vector rescaled with a mean of 0 and a
standard deviation of .5.
}
\description{
\code{r2sd} allows you to rescale a numeric vector such that the
ensuing output has a mean of 0 and a standard deviation of .5. \code{r2sd_at} is a wrapper for
\code{mutate_at} and \code{rename_at} from \pkg{dplyr}. It both rescales the supplied vectors to
new vectors and renames the vectors to each have a prefix of \code{z_}.
}
\details{
By default, \code{na.rm} is set to TRUE. If you have missing data, the function will just pass
over them.
Gelman (2008) argues that rescaling by two standard deviations puts regression inputs
on roughly the same scale no matter their original scale. This allows for some honest, if preliminary,
assessment of relative effect sizes from the regression output. This does that, but
without requiring the \code{rescale} function from \pkg{arm}.
I'm trying to reduce the packages on which my workflow relies.
Importantly, I tend to rescale only the ordinal and interval inputs and leave the binary inputs as 0/1.
So, my \code{r2sd} function doesn't have any of the fancier if-else statements that Gelman's \code{rescale}
function has.
}
\examples{
x <- rnorm(100)
r2sd(x)
r2sd_at(mtcars, c("mpg", "hp", "disp"))
}
\references{
Gelman, Andrew. 2008. "Scaling Regression Inputs by Dividing by Two Standard Deviations." \emph{Statistics in Medicine} 27: 2865--2873.
}
|
library(dplyr)
library(readr)
start <- Sys.time()
message(" \n Begin Cube iteration source file \n ")
setwd(input.yaml$sim_dir)
sim_score = read_csv(paste0(input.yaml$sim_dir,"sim_analyses/cube_sim_scn2a.csv"))
sim_score<- do.call(data.frame, lapply(sim_score, function(x) {
replace(x, is.infinite(x) | is.na(x), 0)
})
)
diag(sim_score) <- 0
cor_names = names(sim_score)
names(sim_score) = cor_names
rownames(sim_score) = cor_names
###########################################################
###########################################################
estimate_mode <- function(x){
d <- density(x)
d$x[which.max(d$y)]
}
sim_pat_draw = function(sim_score, num_pats) {
r_100k = as.data.frame(matrix(nrow = 100000, ncol = 3))
names(r_100k) = c("median","mean", "mode")
pat_vect = names(sim_score)
for(n in 1: nrow(r_100k)){
IDs = sample(pat_vect, num_pats)
sub_sim = sim_score[(rownames(sim_score) %in% IDs), (names(sim_score) %in% IDs)]
diag(sub_sim) = 12345
vect_scores = unlist(sub_sim)
vect_scores = vect_scores[-which(vect_scores == 12345)]
r_100k$median[n] = median(vect_scores)
r_100k$mean[n] = mean(vect_scores)
r_100k$mode[n] = estimate_mode(vect_scores)
}
return(r_100k)
}
message("\n The Cube iteration source filescript ran successfully. \n ")
stop = Sys.time()
| /sim_analyses/cube/pat_100k_draw_npat_cube.R | no_license | helbig-lab/SCN2A | R | false | false | 1,355 | r | library(dplyr)
library(readr)
start <- Sys.time()
message(" \n Begin Cube iteration source file \n ")
setwd(input.yaml$sim_dir)
sim_score = read_csv(paste0(input.yaml$sim_dir,"sim_analyses/cube_sim_scn2a.csv"))
sim_score<- do.call(data.frame, lapply(sim_score, function(x) {
replace(x, is.infinite(x) | is.na(x), 0)
})
)
diag(sim_score) <- 0
cor_names = names(sim_score)
names(sim_score) = cor_names
rownames(sim_score) = cor_names
###########################################################
###########################################################
estimate_mode <- function(x){
d <- density(x)
d$x[which.max(d$y)]
}
sim_pat_draw = function(sim_score, num_pats) {
r_100k = as.data.frame(matrix(nrow = 100000, ncol = 3))
names(r_100k) = c("median","mean", "mode")
pat_vect = names(sim_score)
for(n in 1: nrow(r_100k)){
IDs = sample(pat_vect, num_pats)
sub_sim = sim_score[(rownames(sim_score) %in% IDs), (names(sim_score) %in% IDs)]
diag(sub_sim) = 12345
vect_scores = unlist(sub_sim)
vect_scores = vect_scores[-which(vect_scores == 12345)]
r_100k$median[n] = median(vect_scores)
r_100k$mean[n] = mean(vect_scores)
r_100k$mode[n] = estimate_mode(vect_scores)
}
return(r_100k)
}
message("\n The Cube iteration source filescript ran successfully. \n ")
stop = Sys.time()
|
# Chinook salmon life cycle model functions
library(dplyr)
library(tidyr)
## Functions ------------------------------------------------------------------------------------------------------------
set.vars <- function(n.yr, scenario){
load('srfc_data.RData') # load data, need to load in here for parallel computing
library(mgcv)
# Simulate climate/flow scenario
w <- flow.sim(n.yr, scenario, flow.full) # flow
# Hatchery releases - nonparametric bootstrap
ht <- rep(mean(hat.release$total.release), n.yr)
# Hatchery release site distance - function of flow
newx <- data.frame(log.flow = log(w))
sig2 <- weighted.gam$sig2 # estimated residual variance from the model
newx <- transform(newx, newy = predict(weighted.gam, newx, type = 'response')) # predict distance
newx <- transform(newx, ysim = rnorm(n.yr, mean = newy, sd = sqrt(sig2))) # simulate values
hd <- newx$ysim
hd <- 2 + 7 * (hd/445) # survival increase with trucking, maintain the same maximum value (ie slope), max represents distance from Coleman to the bay (Sturrock et al. 2019)
# Hatchery escapement proportion - nonparametric bootstrap
xt <- sample(catch.esc$hatchery/catch.esc$total.esc, n.yr, replace = TRUE)
# NPGO - concatenate years
npgo <- rep(npgo.full$npgo, times = 2)
return(data.frame(w, ht, hd, xt, npgo))
}
flow.sim <- function(n.yr, scenario, flow.full){ # n.yr, scenario, flow.full
# # Save non-drought and drought values to sample with replacement
nondrought.vals <- flow.full %>% filter((year>1992 & year<2007) | (year>2009 & year<2012) | (year>2016 & year<2020)) %>% mutate(flow = round(discharge)) %>% pull(flow)
drought.vals <- flow.full %>% filter((year<1993) | (year>2006 & year<2010) | (year>2011 & year<2017)) %>% mutate(flow = round(discharge)) %>% pull(flow)
drought.dur <- function(){return(sample(2:4, 1))} # draw number for drought duration (2-4 represents 3-5year droughts)
if(scenario == 'base'){
drought.fre <- 12 # frequency; interval between initial drought years Pois(lambda = 12 years)
drought.pro <- rep(1, length(drought.vals))/length(drought.vals) # same probability of drought values
} else if(scenario == 'longer duration'){
drought.dur <- function(){return(sample(2:6, 1))} # overwrite drought duration function to select droughts 3-7 years
drought.fre <- 12 # same as base
drought.pro <- rep(1, length(drought.vals))/length(drought.vals) # same as base
} else if(scenario == 'more frequent'){
drought.fre <- 6 # Pois(lambda = 6 years)
drought.pro <- rep(1, length(drought.vals))/length(drought.vals) # same as base
} else if(scenario == 'more intense'){
drought.fre <- 12 # same as base
drought.pro <- c(rep(0.0705, 11), 0.154, 0.0705) # higher probability of drawing lowest value
}
# Define first drought event - draw number from a uniform distribution over the time interval (1, 12)
tmp.drought <- round(runif(1, min=1, max=12))
flow.array <- array(data = 0, dim = c(100, 2))
tmp.drought.dur <- drought.dur()
drought.record <- tmp.drought.dur
flow.array[(tmp.drought:(tmp.drought + tmp.drought.dur)), 2] <- 1
# Simulate climate scenario
for(i in 1:100){
if((flow.array[i, 2] == 1 && i == 1) | (i > 1 && flow.array[i, 2] == 1 && flow.array[i-1, 2] == 0)){ # draw from Poisson dist for next drought and draw drought value
tmp.drought <- rpois(1, lambda = drought.fre) # draw from Poisson distribution
tmp.drought.dur <- drought.dur() # draw duration of next drought
drought.record <- c(drought.record, tmp.drought.dur)
while(tmp.drought < drought.record[length(drought.record)-1]+2){
tmp.drought <- rpois(1, lambda = drought.fre)
}
if(between(i + tmp.drought, 0, 100)){ # check to make sure the next drought is within the bounds
if(!between(i + tmp.drought + tmp.drought.dur, 0, 100)){ # truncate drought length to be within bounds
flow.array[(i + tmp.drought):100, 2] <- 1
} else {
flow.array[((i + tmp.drought):(i + tmp.drought + tmp.drought.dur)), 2] <- 1
}
}
flow.array[i, 1] <- sample(drought.vals, size = 1, prob = drought.pro) # probability of drawing drought values
} else if(flow.array[i, 2] == 1) { # drought
flow.array[i, 1] <- sample(drought.vals, size = 1, prob = drought.pro) # probability of drawing drought values
} else {
# non drought
flow.array[i, 1] <- sample(nondrought.vals, size = 1)
# make sure not more than two consecutive years below historic mean threshold
if(i > 2 && (flow.array[i-1, 1] < 10712 & flow.array[i-1, 2] == 0) && (flow.array[i-2, 1] < 10712 & flow.array[i-2, 2] == 0)){
while(flow.array[i, 1] < 10712){
flow.array[i, 1] <- sample(nondrought.vals, size = 1)
}
} else if((i < 100 && flow.array[i+1, 2] == 1) | (i > 1 && flow.array[i-1, 2] == 1)) {
while(flow.array[i, 1] < 10712){
flow.array[i, 1] <- sample(nondrought.vals, size = 1)
}
}
}
}
return(flow.array[,1])
}
srr <- function(theta, g, x){
return((theta[1] * g) / (1 + theta[2] * g * x))
}
fishery.impact <- function(O, i, nu){
# Only used in parameter optimization
tmp.nu <- c(nu, nu)
I.out <- rep(NA, length(O))
for(x in 1:length(I.out)){
if(((1 - (1 - i)) * tmp.nu[x]) > 1){
print('Harvest rate greater than 1')
} else if (((1 - (1 - i)) * tmp.nu[x]) < 0){
print('Harvest rate less than 1')
}
I.out[x] <- O[x] * i * tmp.nu[x]
}
return(I.out)
}
juv.survival <- function(w){
if(is.na(w)){
return(NA)
}
if(w < (4045)){ #1: 4045 is 95% of the upper limit of this step in Michel et al. (2021)
surv <- 0.03 # flat step from Michel et al. 2021
sd <- 0.276
} else if(w >= 4045 & w < 4795) { #2:
surv <- (((0.189 - 0.03) / (4795 - 4045)) * w) + (0.189 - (((0.189 - 0.03) / (4795 - 4045)) * 4795)) # linear model to interpolate steps
sd <- sum(c(0.276, 0.094))
} else if(w >= 4795 & w < 10175) { #3
surv <- 0.189 # flat step from Michel et al. 2021
sd <- 0.094
} else if(w >= 10175 & w < 11856) { #4 (0.508 - 0.189)
surv <- (((0.508 - 0.189) / (11856 - 10175)) * w) + (0.508 - (((0.508 - 0.189) / (11856 - 10175)) * 11856)) # linear model to interpolate steps
sd <- sum(c(0.094, 0.082))
} else if(w >= 11856 & w < 21727) { #5
surv <- 0.508 # flat step from Michel et al. 2021
sd <- 0.082
} else if(w >= 21727 & w < 24016){
surv <- (((0.353 - 0.508) / (24016 - 21727)) * w) + (0.353 - (((0.353 - 0.508) / (24016 - 21727)) * 24016)) # linear model to interpolate steps
sd <- sum(c(0.082, 0.088))
} else if(w >= 24016){
surv <- 0.353 # flat step from Michel et al. 2021 0.353
sd <- 0.088 # 0.088
}
river.surv <- rnorm(n = 1, mean = log(surv/(1-surv)), sd = sd) # logit function for mean
return(exp(river.surv)/(1 + exp(river.surv))) # return inverse logit
}
control.rule <- function(SI){
# fixed
Fabc <- 0.70
Smsy <- 122000
MSST <- 91500
A <- MSST / 2
B <- (MSST + Smsy) / 2
C <- Smsy / (1 - 0.25)
D <- Smsy / (1 - Fabc)
if(SI >= 0 & SI <= A){
ER <- 0.10 * (SI / A)
} else if(SI > A & SI <= MSST){
ER <- 0.10
} else if(SI > MSST & SI <= B){
ER <- 0.10 + (0.15*((SI - MSST)/(B - MSST)))
} else if(SI > B & SI <= C){
ER <- 0.25
} else if(SI > C & SI <= D){
ER <- (SI - Smsy) / SI
} else if(SI > D){
ER <- Fabc
}
return(ER)
}
| /operating_model_functions.R | no_license | CVFC-MSE/age_climate_model | R | false | false | 7,504 | r | # Chinook salmon life cycle model functions
library(dplyr)
library(tidyr)
## Functions ------------------------------------------------------------------------------------------------------------
set.vars <- function(n.yr, scenario){
load('srfc_data.RData') # load data, need to load in here for parallel computing
library(mgcv)
# Simulate climate/flow scenario
w <- flow.sim(n.yr, scenario, flow.full) # flow
# Hatchery releases - nonparametric bootstrap
ht <- rep(mean(hat.release$total.release), n.yr)
# Hatchery release site distance - function of flow
newx <- data.frame(log.flow = log(w))
sig2 <- weighted.gam$sig2 # estimated residual variance from the model
newx <- transform(newx, newy = predict(weighted.gam, newx, type = 'response')) # predict distance
newx <- transform(newx, ysim = rnorm(n.yr, mean = newy, sd = sqrt(sig2))) # simulate values
hd <- newx$ysim
hd <- 2 + 7 * (hd/445) # survival increase with trucking, maintain the same maximum value (ie slope), max represents distance from Coleman to the bay (Sturrock et al. 2019)
# Hatchery escapement proportion - nonparametric bootstrap
xt <- sample(catch.esc$hatchery/catch.esc$total.esc, n.yr, replace = TRUE)
# NPGO - concatenate years
npgo <- rep(npgo.full$npgo, times = 2)
return(data.frame(w, ht, hd, xt, npgo))
}
flow.sim <- function(n.yr, scenario, flow.full){ # n.yr, scenario, flow.full
# # Save non-drought and drought values to sample with replacement
nondrought.vals <- flow.full %>% filter((year>1992 & year<2007) | (year>2009 & year<2012) | (year>2016 & year<2020)) %>% mutate(flow = round(discharge)) %>% pull(flow)
drought.vals <- flow.full %>% filter((year<1993) | (year>2006 & year<2010) | (year>2011 & year<2017)) %>% mutate(flow = round(discharge)) %>% pull(flow)
drought.dur <- function(){return(sample(2:4, 1))} # draw number for drought duration (2-4 represents 3-5year droughts)
if(scenario == 'base'){
drought.fre <- 12 # frequency; interval between initial drought years Pois(lambda = 12 years)
drought.pro <- rep(1, length(drought.vals))/length(drought.vals) # same probability of drought values
} else if(scenario == 'longer duration'){
drought.dur <- function(){return(sample(2:6, 1))} # overwrite drought duration function to select droughts 3-7 years
drought.fre <- 12 # same as base
drought.pro <- rep(1, length(drought.vals))/length(drought.vals) # same as base
} else if(scenario == 'more frequent'){
drought.fre <- 6 # Pois(lambda = 6 years)
drought.pro <- rep(1, length(drought.vals))/length(drought.vals) # same as base
} else if(scenario == 'more intense'){
drought.fre <- 12 # same as base
drought.pro <- c(rep(0.0705, 11), 0.154, 0.0705) # higher probability of drawing lowest value
}
# Define first drought event - draw number from a uniform distribution over the time interval (1, 12)
tmp.drought <- round(runif(1, min=1, max=12))
flow.array <- array(data = 0, dim = c(100, 2))
tmp.drought.dur <- drought.dur()
drought.record <- tmp.drought.dur
flow.array[(tmp.drought:(tmp.drought + tmp.drought.dur)), 2] <- 1
# Simulate climate scenario
for(i in 1:100){
if((flow.array[i, 2] == 1 && i == 1) | (i > 1 && flow.array[i, 2] == 1 && flow.array[i-1, 2] == 0)){ # draw from Poisson dist for next drought and draw drought value
tmp.drought <- rpois(1, lambda = drought.fre) # draw from Poisson distribution
tmp.drought.dur <- drought.dur() # draw duration of next drought
drought.record <- c(drought.record, tmp.drought.dur)
while(tmp.drought < drought.record[length(drought.record)-1]+2){
tmp.drought <- rpois(1, lambda = drought.fre)
}
if(between(i + tmp.drought, 0, 100)){ # check to make sure the next drought is within the bounds
if(!between(i + tmp.drought + tmp.drought.dur, 0, 100)){ # truncate drought length to be within bounds
flow.array[(i + tmp.drought):100, 2] <- 1
} else {
flow.array[((i + tmp.drought):(i + tmp.drought + tmp.drought.dur)), 2] <- 1
}
}
flow.array[i, 1] <- sample(drought.vals, size = 1, prob = drought.pro) # probability of drawing drought values
} else if(flow.array[i, 2] == 1) { # drought
flow.array[i, 1] <- sample(drought.vals, size = 1, prob = drought.pro) # probability of drawing drought values
} else {
# non drought
flow.array[i, 1] <- sample(nondrought.vals, size = 1)
# make sure not more than two consecutive years below historic mean threshold
if(i > 2 && (flow.array[i-1, 1] < 10712 & flow.array[i-1, 2] == 0) && (flow.array[i-2, 1] < 10712 & flow.array[i-2, 2] == 0)){
while(flow.array[i, 1] < 10712){
flow.array[i, 1] <- sample(nondrought.vals, size = 1)
}
} else if((i < 100 && flow.array[i+1, 2] == 1) | (i > 1 && flow.array[i-1, 2] == 1)) {
while(flow.array[i, 1] < 10712){
flow.array[i, 1] <- sample(nondrought.vals, size = 1)
}
}
}
}
return(flow.array[,1])
}
srr <- function(theta, g, x){
return((theta[1] * g) / (1 + theta[2] * g * x))
}
fishery.impact <- function(O, i, nu){
# Only used in parameter optimization
tmp.nu <- c(nu, nu)
I.out <- rep(NA, length(O))
for(x in 1:length(I.out)){
if(((1 - (1 - i)) * tmp.nu[x]) > 1){
print('Harvest rate greater than 1')
} else if (((1 - (1 - i)) * tmp.nu[x]) < 0){
print('Harvest rate less than 1')
}
I.out[x] <- O[x] * i * tmp.nu[x]
}
return(I.out)
}
juv.survival <- function(w){
if(is.na(w)){
return(NA)
}
if(w < (4045)){ #1: 4045 is 95% of the upper limit of this step in Michel et al. (2021)
surv <- 0.03 # flat step from Michel et al. 2021
sd <- 0.276
} else if(w >= 4045 & w < 4795) { #2:
surv <- (((0.189 - 0.03) / (4795 - 4045)) * w) + (0.189 - (((0.189 - 0.03) / (4795 - 4045)) * 4795)) # linear model to interpolate steps
sd <- sum(c(0.276, 0.094))
} else if(w >= 4795 & w < 10175) { #3
surv <- 0.189 # flat step from Michel et al. 2021
sd <- 0.094
} else if(w >= 10175 & w < 11856) { #4 (0.508 - 0.189)
surv <- (((0.508 - 0.189) / (11856 - 10175)) * w) + (0.508 - (((0.508 - 0.189) / (11856 - 10175)) * 11856)) # linear model to interpolate steps
sd <- sum(c(0.094, 0.082))
} else if(w >= 11856 & w < 21727) { #5
surv <- 0.508 # flat step from Michel et al. 2021
sd <- 0.082
} else if(w >= 21727 & w < 24016){
surv <- (((0.353 - 0.508) / (24016 - 21727)) * w) + (0.353 - (((0.353 - 0.508) / (24016 - 21727)) * 24016)) # linear model to interpolate steps
sd <- sum(c(0.082, 0.088))
} else if(w >= 24016){
surv <- 0.353 # flat step from Michel et al. 2021 0.353
sd <- 0.088 # 0.088
}
river.surv <- rnorm(n = 1, mean = log(surv/(1-surv)), sd = sd) # logit function for mean
return(exp(river.surv)/(1 + exp(river.surv))) # return inverse logit
}
control.rule <- function(SI){
# fixed
Fabc <- 0.70
Smsy <- 122000
MSST <- 91500
A <- MSST / 2
B <- (MSST + Smsy) / 2
C <- Smsy / (1 - 0.25)
D <- Smsy / (1 - Fabc)
if(SI >= 0 & SI <= A){
ER <- 0.10 * (SI / A)
} else if(SI > A & SI <= MSST){
ER <- 0.10
} else if(SI > MSST & SI <= B){
ER <- 0.10 + (0.15*((SI - MSST)/(B - MSST)))
} else if(SI > B & SI <= C){
ER <- 0.25
} else if(SI > C & SI <= D){
ER <- (SI - Smsy) / SI
} else if(SI > D){
ER <- Fabc
}
return(ER)
}
|
## This function gets and sets the value of a matrix and gets and sets the value of the inverse of that matrix
makeCacheMatrix <- function(x = matrix()) {
m <- NULL ## initialize objet m
set <- function(y) { ## function to set the value of m
x <<- y
m <<- NULL ## sets the global value NULL to m
}
get <- function() x ## function that gets m
setsolve <- function(solve) m <<- solve ## function calculates the inverse of the matrix and store it globally
getsolve <- function() m ## get cached value of the inverse matrix stored in setsolve
list(set = set, get = get, setsolve = setsolve, getsolve = getsolve) ## list elements
}
## This function returns a matrix that is the inverse of 'x'
cacheSolve <- function(x, ...) {
m <- x$getsolve() ## m stores the inverse matrix
if(!is.null(m)) { ## if inverse matrix is previously calculated
message("getting cached data") ## print message
return(m) ## gets the inverse matrix stored in cache
}
data <- x$get() ## gets the matrix
m <- solve(data, ...) ## calculates for the first time its inverse
x$setsolve(m) ## stores the value of the just calculated inverted matrix in cache
m ## print matrix
}
| /cachematrix.R | no_license | dsantibanez/ProgrammingAssignment2 | R | false | false | 1,516 | r | ## This function gets and sets the value of a matrix and gets and sets the value of the inverse of that matrix
makeCacheMatrix <- function(x = matrix()) {
m <- NULL ## initialize objet m
set <- function(y) { ## function to set the value of m
x <<- y
m <<- NULL ## sets the global value NULL to m
}
get <- function() x ## function that gets m
setsolve <- function(solve) m <<- solve ## function calculates the inverse of the matrix and store it globally
getsolve <- function() m ## get cached value of the inverse matrix stored in setsolve
list(set = set, get = get, setsolve = setsolve, getsolve = getsolve) ## list elements
}
## This function returns a matrix that is the inverse of 'x'
cacheSolve <- function(x, ...) {
m <- x$getsolve() ## m stores the inverse matrix
if(!is.null(m)) { ## if inverse matrix is previously calculated
message("getting cached data") ## print message
return(m) ## gets the inverse matrix stored in cache
}
data <- x$get() ## gets the matrix
m <- solve(data, ...) ## calculates for the first time its inverse
x$setsolve(m) ## stores the value of the just calculated inverted matrix in cache
m ## print matrix
}
|
#' Generate a stratified subsample for a vector given a grouping
#'
#' Use this function to compute LISI scores of one or more labels.
#'
#' @param indexer A vector containing cell barcodes/labels to subsample
#' @param grouping A vector containg a groups to stratify by ( same size as indexer)
#' @param sample_proportion proportion to sample data (default: .1)
#' @param min_count Minimum number of samples in a group to keep
#' @param seed seed value for set.seed
#'
#' @return A subsampled vector generated from indexer
#' @export
stratified_sample <- function(indexer, grouping,sample_proportion=.1, min_count=0, seed=424242){
df <- data.frame(indexer=indexer, label=grouping)
dfl <- split(df, df$label)
lengths <- sapply(dfl, nrow)
keep <- lengths > min_count
dfl <- dfl[keep]
set.seed(seed)
indices <- sapply(dfl, function(x) {
size <- as.integer(nrow(x)*sample_proportion)
sample(x$indexer,size)})
names(indices) <- NULL
idx <- unlist(indices)
return(idx)
}
qmessage <- function(msg, quiet){
if(!quiet) message(msg)
}
#' Running All Metrics
#' @param reduction A matrix of reduced dimensions
#' @param metadata A data.frame containing information like batch, cell type, etc
#' @param batch_key Name of column in metadata corresponding to batch
#' @param label1_key Name of column in metadata corresponding to primary cell label, eg Cell type
#' @param label2_key Name of column in metadata corresponding to secondary cell label, eg cluster identity
#' @param sil (optional) Name associated with this data.
#' @param sil_width_prop (optinal) proportion of data to use for silhoette width
#' @param sil_width_group_key (optinal) which column in metadata to use for stratified sampling of data
#'
#' @return A subsampled vector generated from indexer
#' @export
run_all_metrics <- function(reduction, metadata, batch_key, label1_key, label2_key, run_name=NULL,
sil_width_prop=1, sil_width_group_key=NULL, quietly=F){
if(is.null(run_name))run_name <- sample(letters, 12, replace = T)
metadata <- as.data.frame( metadata)
keys <- c(batch_key, label1_key, label2_key)
qmessage('Calculating LISI...',quietly)
lisi <- lisi(reduction, metadata, keys )
lisi <- lapply(lisi, mean)
names(lisi) <- paste0('lisi_', keys)
lisi <- as.data.frame(lisi)
qmessage('Done\nCalculating Silhoette width...',quietly)
if(sil_width_prop < 1){
if(is.null(sil_width_group_key) ) sil_width_group_key <- label1_key
idx <- stratified_sample(rownames(reduction), metadata[[sil_width_group_key]])
rd_ds <- reduction[idx, ]
md_ds <- metadata[idx, ]
sw <- silhouette_width(rd_ds, md_ds, keys)
} else{
sw <- silhouette_width(reduction, metadata, keys)
}
names(sw) <- paste0('silWidth_', keys)
sw <- as.data.frame(t(sw))
qmessage('Done\nCalculating ARI...',quietly)
ari_batch <- ari(metadata[[batch_key]], metadata[[label1_key]])
ari_label <- ari(metadata[[label1_key]], metadata[[label2_key]])
qmessage('Done\nCalculating NMI...',quietly)
nmi_batch <- nmi(metadata[[batch_key]], metadata[[label1_key]])
nmi_label <- nmi(metadata[[label1_key]], metadata[[label2_key]])
qmessage('Done',quietly)
scores <- data.frame(run=run_name,
#ari_batch= ari_batch,
ari_label= ari_label,
#nmi_batch= nmi_batch,
nmi_label= nmi_label)
scores <- do.call(cbind, list( scores, lisi, sw) )
return(scores)
}
| /R/helpers.R | no_license | Varix/scPOP | R | false | false | 3,554 | r | #' Generate a stratified subsample for a vector given a grouping
#'
#' Use this function to compute LISI scores of one or more labels.
#'
#' @param indexer A vector containing cell barcodes/labels to subsample
#' @param grouping A vector containg a groups to stratify by ( same size as indexer)
#' @param sample_proportion proportion to sample data (default: .1)
#' @param min_count Minimum number of samples in a group to keep
#' @param seed seed value for set.seed
#'
#' @return A subsampled vector generated from indexer
#' @export
stratified_sample <- function(indexer, grouping,sample_proportion=.1, min_count=0, seed=424242){
df <- data.frame(indexer=indexer, label=grouping)
dfl <- split(df, df$label)
lengths <- sapply(dfl, nrow)
keep <- lengths > min_count
dfl <- dfl[keep]
set.seed(seed)
indices <- sapply(dfl, function(x) {
size <- as.integer(nrow(x)*sample_proportion)
sample(x$indexer,size)})
names(indices) <- NULL
idx <- unlist(indices)
return(idx)
}
qmessage <- function(msg, quiet){
if(!quiet) message(msg)
}
#' Running All Metrics
#' @param reduction A matrix of reduced dimensions
#' @param metadata A data.frame containing information like batch, cell type, etc
#' @param batch_key Name of column in metadata corresponding to batch
#' @param label1_key Name of column in metadata corresponding to primary cell label, eg Cell type
#' @param label2_key Name of column in metadata corresponding to secondary cell label, eg cluster identity
#' @param sil (optional) Name associated with this data.
#' @param sil_width_prop (optinal) proportion of data to use for silhoette width
#' @param sil_width_group_key (optinal) which column in metadata to use for stratified sampling of data
#'
#' @return A subsampled vector generated from indexer
#' @export
run_all_metrics <- function(reduction, metadata, batch_key, label1_key, label2_key, run_name=NULL,
sil_width_prop=1, sil_width_group_key=NULL, quietly=F){
if(is.null(run_name))run_name <- sample(letters, 12, replace = T)
metadata <- as.data.frame( metadata)
keys <- c(batch_key, label1_key, label2_key)
qmessage('Calculating LISI...',quietly)
lisi <- lisi(reduction, metadata, keys )
lisi <- lapply(lisi, mean)
names(lisi) <- paste0('lisi_', keys)
lisi <- as.data.frame(lisi)
qmessage('Done\nCalculating Silhoette width...',quietly)
if(sil_width_prop < 1){
if(is.null(sil_width_group_key) ) sil_width_group_key <- label1_key
idx <- stratified_sample(rownames(reduction), metadata[[sil_width_group_key]])
rd_ds <- reduction[idx, ]
md_ds <- metadata[idx, ]
sw <- silhouette_width(rd_ds, md_ds, keys)
} else{
sw <- silhouette_width(reduction, metadata, keys)
}
names(sw) <- paste0('silWidth_', keys)
sw <- as.data.frame(t(sw))
qmessage('Done\nCalculating ARI...',quietly)
ari_batch <- ari(metadata[[batch_key]], metadata[[label1_key]])
ari_label <- ari(metadata[[label1_key]], metadata[[label2_key]])
qmessage('Done\nCalculating NMI...',quietly)
nmi_batch <- nmi(metadata[[batch_key]], metadata[[label1_key]])
nmi_label <- nmi(metadata[[label1_key]], metadata[[label2_key]])
qmessage('Done',quietly)
scores <- data.frame(run=run_name,
#ari_batch= ari_batch,
ari_label= ari_label,
#nmi_batch= nmi_batch,
nmi_label= nmi_label)
scores <- do.call(cbind, list( scores, lisi, sw) )
return(scores)
}
|
# Header ------------------------------------------------------------------
# Created: 1/23/2016
# Author: Joshua Slocum
# Purpose: simple function to add model call to log
# Initialize --------------------------------------------------------------
library(shiny)
log_data <- read.csv("./model_log.csv")
# Define UI for application that draws a histogram
# UI ----------------------------------------------------------------------
ui <- fluidPage(
# Application title
titlePanel("Model Log Viewer"),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
),
# Show a plot of the generated distribution
mainPanel(
DT::dataTableOutput("log_table")
)
)
)
# Server ------------------------------------------------------------------
# Define server logic required to draw a histogram
server <- function(input, output) {
output$log_table <- DT::renderDataTable(log_data)
}
# Run the application
shinyApp(ui = ui, server = server)
| /app/model-log-viewer/app.R | permissive | JoshuaSlocum/model-log | R | false | false | 1,036 | r | # Header ------------------------------------------------------------------
# Created: 1/23/2016
# Author: Joshua Slocum
# Purpose: simple function to add model call to log
# Initialize --------------------------------------------------------------
library(shiny)
log_data <- read.csv("./model_log.csv")
# Define UI for application that draws a histogram
# UI ----------------------------------------------------------------------
ui <- fluidPage(
# Application title
titlePanel("Model Log Viewer"),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
),
# Show a plot of the generated distribution
mainPanel(
DT::dataTableOutput("log_table")
)
)
)
# Server ------------------------------------------------------------------
# Define server logic required to draw a histogram
server <- function(input, output) {
output$log_table <- DT::renderDataTable(log_data)
}
# Run the application
shinyApp(ui = ui, server = server)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sfcr_baseline.R
\name{.sfcr_eqs_check}
\alias{.sfcr_eqs_check}
\title{Check for missing endogenous variables}
\usage{
.sfcr_eqs_check(m, equations)
}
\arguments{
\item{m}{The initialized matrix obtained with \code{.make_matrix()}.}
\item{equations}{Prepared equations with \code{.prep_equations()}.}
}
\description{
Check for missing endogenous variables
}
\author{
João Macalós
}
\keyword{internal}
| /man/dot-sfcr_eqs_check.Rd | permissive | markushlang/sfcr | R | false | true | 481 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sfcr_baseline.R
\name{.sfcr_eqs_check}
\alias{.sfcr_eqs_check}
\title{Check for missing endogenous variables}
\usage{
.sfcr_eqs_check(m, equations)
}
\arguments{
\item{m}{The initialized matrix obtained with \code{.make_matrix()}.}
\item{equations}{Prepared equations with \code{.prep_equations()}.}
}
\description{
Check for missing endogenous variables
}
\author{
João Macalós
}
\keyword{internal}
|
library("RColorBrewer")
library("DESeq2")
library("dendsort")
library("ComplexHeatmap")
library("circlize")
source("nmi.r")
setwd("../")
args = commandArgs(trailingOnly=TRUE)
resultdir = "result.rsem.TET.instance/"
print (args)
if (length(args)>0) {
resultdir = args[1]
if (substr(resultdir, nchar(resultdir), nchar(resultdir)) != "/") {
resultdir = paste0(resultdir, "/")
}
b_noovp = args[2]
}
print(resultdir)
setwd(resultdir)
print(b_noovp)
load(file = "ddsNew.RData")
load(file = "VSTcnts.DEGES.RData")
# filter ESCA STAD
cdata <- colData(ddsnew)
sampleidx = rep(TRUE, ncol(VSTcnts))
ESCASTADidx = (cdata$tissue == "ESCA") | (cdata$tissue == "STAD")
sampleidx = !ESCASTADidx
VSTcnts = VSTcnts[,sampleidx]
coldataNew = colData(ddsnew)
coldataNew = coldataNew[sampleidx,]
print(dim(VSTcnts))
TEs <- grepl(":", rownames(VSTcnts))
geneVSTcnts <- VSTcnts[!TEs,]
TEcnts <- VSTcnts[TEs,]
if (grepl("instance", resultdir) && (! is.na(b_noovp)) && (b_noovp == "noovp")) {
filters = read.table("gene.noovp.100000.TEs.bed", sep="\t")
filternames = as.character(filters[,4])
print(dim(TEcnts))
TEnames <- rownames(TEcnts)
TElocus = matrix(unlist(strsplit(TEnames, ":")), ncol=5, byrow=TRUE)[,1]
rownames(TEcnts) = TElocus
names(TEnames) = TElocus
filteredTEcnts <- TEcnts[filternames,]
print(dim(filteredTEcnts))
filteredTEnames <- TEnames[filternames]
rownames(filteredTEcnts) = filteredTEnames
#rm(VSTcnts)
#VSTcnts = rbind(geneVSTcnts, filteredTEcnts)
} else {
filteredTEcnts = TEcnts
}
lineVSTcnts <- filteredTEcnts[grepl("LINE", rownames(filteredTEcnts)), ]
print(dim(lineVSTcnts))
sineVSTcnts <- filteredTEcnts[grepl("SINE", rownames(filteredTEcnts)), ]
print(dim(sineVSTcnts))
dnaVSTcnts <- filteredTEcnts[grepl("DNA", rownames(filteredTEcnts)), ]
print(dim(dnaVSTcnts))
ltrVSTcnts <- filteredTEcnts[grepl("LTR", rownames(filteredTEcnts)), ]
print(dim(ltrVSTcnts))
hervVSTcnts <- filteredTEcnts[grepl("HERV", rownames(filteredTEcnts)), ]
print(dim(hervVSTcnts))
youngL1idx <- grepl("L1HS", rownames(filteredTEcnts))
youngL1idx <- youngL1idx | grepl("L1PA2_", rownames(filteredTEcnts))
youngL1idx <- youngL1idx | grepl("L1PA3_", rownames(filteredTEcnts))
youngL1VSTcnts <- filteredTEcnts[youngL1idx, ]
print(dim(youngL1VSTcnts))
print (rownames(youngL1VSTcnts))
# annotation
annotation_data <- as.data.frame(coldataNew$tissue)
rownames(annotation_data) <- colnames(geneVSTcnts)
colnames(annotation_data) <- "tissue"
broadertype = as.character(annotation_data$tissue)
broadertype[broadertype=="BLCA"] = "BLCAUCEC"
broadertype[broadertype=="UCEC"] = "BLCAUCEC"
broadertype[broadertype=="COAD"] = "COADREAD"
broadertype[broadertype=="READ"] = "COADREAD"
broadertype[broadertype=="ESCA"] = "ESCASTAD"
broadertype[broadertype=="STAD"] = "ESCASTAD"
broadertype[broadertype=="KICH"] = "KICHKIRCKIRP"
broadertype[broadertype=="KIRC"] = "KICHKIRCKIRP"
broadertype[broadertype=="KIRP"] = "KICHKIRCKIRP"
broadertype[broadertype=="LUAD"] = "LUADLUSC"
broadertype[broadertype=="LUSC"] = "LUADLUSC"
annotation_data <- cbind.data.frame(annotation_data,broadertype)
#callback = function(hc, ...){dendsort(hc)}
#colors
#qual_col_pals = brewer.pal.info[brewer.pal.info$category == 'qual',]
#col_vector = unlist(mapply(brewer.pal, qual_col_pals$maxcolors, rownames(qual_col_pals)))
col_vector = c("#e6194b", "#3cb44b", "#ffe119", "#0082c8", "#f58231", "#911eb4", "#46f0f0", "#f032e6", "#d2f53c", "#fabebe", "#008080", "#e6beff", "#aa6e28", "#800000", "#fffac8", "#aaffc3", "#808000", "#ffd8b1", "#000080", "#808080", "#FFFFFF", "#000000")
col_vector2 = c("#e6194b", "#3cb44b", "#ffe119", "#0082c8", "#f58231", "#911eb4", "#d2f53c", "#fabebe", "#e6beff", "#800000", "#ffd8b1", "#000080", "#808080", "#FFFFFF", "#000000")
mycolors=list(tissue = col_vector[1:length(levels(annotation_data$tissue))],
broadertype = col_vector2[1:length(levels(annotation_data$broadertype))])
names(mycolors$tissue) <- levels(annotation_data$tissue)
names(mycolors$broadertype) <- levels(annotation_data$broadertype)
rnMI <- rep(0, 100)
for (i in 100) {
randomtype1 <- sample(annotation_data$broadertype, length(annotation_data$broadertype), replace = FALSE)
randomtype2 <- sample(annotation_data$broadertype, length(annotation_data$broadertype), replace = FALSE)
nMI <- normalizedMI(randomtype1, randomtype2)
rnMI[i] <- nMI
}
print("nMI random")
print(mean(rnMI))
#heatmap
plotheatmap <- function(VSTcnts, filename, top) {
varGenes <- rowVars(VSTcnts)
topVarianceGenes <- head(order(varGenes, decreasing=T),top)
mat <- VSTcnts[ topVarianceGenes, ]
mat <- mat - rowMeans(mat)
for (run in 1:10) {
hc=hclust(d = dist(t(mat)), method = "ave")
memb <- cutree(hc, k=length(levels(annotation_data$tissue)))
cid <- as.data.frame(sort(memb))
clusterid <- merge(annotation_data, cid, by="row.names")
colnames(clusterid)[4] = "clusterid"
cluster2gtype = rep(0, nrow(clusterid))
clusterid$clusterid <- factor(clusterid$clusterid)
for (i in 1:length(levels(clusterid$clusterid))) {
gtype <- summary(clusterid[clusterid$clusterid==i,"broadertype"])
typeidx = which(gtype==max(gtype))
if (length(typeidx) > 1) {
gtype <- summary(clusterid[clusterid$clusterid==i,"broadertype"])/summary(clusterid[,"broadertype"])
typeidx = which(gtype==max(gtype))
}
cluster2gtype[clusterid$clusterid==i] = names(typeidx) # assign most frequent broadertype to the cluster
}
clusterid <- cbind.data.frame(clusterid, cluster2gtype)
clusterid$cluster2gtype <- factor(clusterid$cluster2gtype, levels=levels(clusterid$broadertype))
nMI <- normalizedMI(clusterid$broadertype, clusterid$cluster2gtype)
print(paste0("normalized Mutual Information ", filename, ": "))
print(nMI)
}
dend_col = dendsort(as.dendrogram(hc))
dend_row = dendsort(as.dendrogram(hclust(dist(mat))))
# select the 'contrast' you want
pdf(file=filename, width=20, height=12)
par(ps=3)
ha = HeatmapAnnotation(annotation_data, col=mycolors,
#annotation_legend_param = list(type = list(grid_height=10)),
)
#pushViewport(viewport(layout.pos.row = 1, layout.pos.col = 3))
ht = Heatmap(mat, cluster_rows = dend_row, row_dend_reorder = FALSE,
name = "ht", cluster_columns = dend_col, column_dend_reorder = FALSE,
top_annotation = ha,
top_annotation_height = unit(8, "cm"),
show_row_names = FALSE, show_column_names = FALSE,
#col = colorRampPalette(rev(brewer.pal(n = 7, name = "RdYlBu")))(100),
col = colorRamp2(c(-5, 0, 5), c("blue", "#ffffbf", "red")),
column_title = "")
#upViewport(2)
draw(ht, newpage = FALSE)
dev.off()
}
plotheatmap(geneVSTcnts, paste0("geneheatmapTop150Var", b_noovp, "100k.pdf"), 150)
plotheatmap(lineVSTcnts, paste0("LINEheatmapTop150Var", b_noovp, "100k.pdf"), 150)
plotheatmap(sineVSTcnts, paste0("SINEheatmapTop150Var", b_noovp, "100k.pdf"), 150)
plotheatmap(dnaVSTcnts, paste0("DNAheatmapTop150Var", b_noovp, "100k.pdf"), 150)
plotheatmap(ltrVSTcnts, paste0("LTRheatmapTop150Var", b_noovp, "100k.pdf"), 150)
plotheatmap(hervVSTcnts, paste0("HERVheatmapTop150Var", b_noovp, "100k.pdf"), 150)
plotheatmap(youngL1VSTcnts, paste0("youngL1heatmapTop150Var", b_noovp, "100k.pdf"), 150)
| /src/makeHeatmap.r | no_license | HanLabUNLV/TEcoex | R | false | false | 7,439 | r | library("RColorBrewer")
library("DESeq2")
library("dendsort")
library("ComplexHeatmap")
library("circlize")
source("nmi.r")
setwd("../")
args = commandArgs(trailingOnly=TRUE)
resultdir = "result.rsem.TET.instance/"
print (args)
if (length(args)>0) {
resultdir = args[1]
if (substr(resultdir, nchar(resultdir), nchar(resultdir)) != "/") {
resultdir = paste0(resultdir, "/")
}
b_noovp = args[2]
}
print(resultdir)
setwd(resultdir)
print(b_noovp)
load(file = "ddsNew.RData")
load(file = "VSTcnts.DEGES.RData")
# filter ESCA STAD
cdata <- colData(ddsnew)
sampleidx = rep(TRUE, ncol(VSTcnts))
ESCASTADidx = (cdata$tissue == "ESCA") | (cdata$tissue == "STAD")
sampleidx = !ESCASTADidx
VSTcnts = VSTcnts[,sampleidx]
coldataNew = colData(ddsnew)
coldataNew = coldataNew[sampleidx,]
print(dim(VSTcnts))
TEs <- grepl(":", rownames(VSTcnts))
geneVSTcnts <- VSTcnts[!TEs,]
TEcnts <- VSTcnts[TEs,]
if (grepl("instance", resultdir) && (! is.na(b_noovp)) && (b_noovp == "noovp")) {
filters = read.table("gene.noovp.100000.TEs.bed", sep="\t")
filternames = as.character(filters[,4])
print(dim(TEcnts))
TEnames <- rownames(TEcnts)
TElocus = matrix(unlist(strsplit(TEnames, ":")), ncol=5, byrow=TRUE)[,1]
rownames(TEcnts) = TElocus
names(TEnames) = TElocus
filteredTEcnts <- TEcnts[filternames,]
print(dim(filteredTEcnts))
filteredTEnames <- TEnames[filternames]
rownames(filteredTEcnts) = filteredTEnames
#rm(VSTcnts)
#VSTcnts = rbind(geneVSTcnts, filteredTEcnts)
} else {
filteredTEcnts = TEcnts
}
lineVSTcnts <- filteredTEcnts[grepl("LINE", rownames(filteredTEcnts)), ]
print(dim(lineVSTcnts))
sineVSTcnts <- filteredTEcnts[grepl("SINE", rownames(filteredTEcnts)), ]
print(dim(sineVSTcnts))
dnaVSTcnts <- filteredTEcnts[grepl("DNA", rownames(filteredTEcnts)), ]
print(dim(dnaVSTcnts))
ltrVSTcnts <- filteredTEcnts[grepl("LTR", rownames(filteredTEcnts)), ]
print(dim(ltrVSTcnts))
hervVSTcnts <- filteredTEcnts[grepl("HERV", rownames(filteredTEcnts)), ]
print(dim(hervVSTcnts))
youngL1idx <- grepl("L1HS", rownames(filteredTEcnts))
youngL1idx <- youngL1idx | grepl("L1PA2_", rownames(filteredTEcnts))
youngL1idx <- youngL1idx | grepl("L1PA3_", rownames(filteredTEcnts))
youngL1VSTcnts <- filteredTEcnts[youngL1idx, ]
print(dim(youngL1VSTcnts))
print (rownames(youngL1VSTcnts))
# annotation
annotation_data <- as.data.frame(coldataNew$tissue)
rownames(annotation_data) <- colnames(geneVSTcnts)
colnames(annotation_data) <- "tissue"
broadertype = as.character(annotation_data$tissue)
broadertype[broadertype=="BLCA"] = "BLCAUCEC"
broadertype[broadertype=="UCEC"] = "BLCAUCEC"
broadertype[broadertype=="COAD"] = "COADREAD"
broadertype[broadertype=="READ"] = "COADREAD"
broadertype[broadertype=="ESCA"] = "ESCASTAD"
broadertype[broadertype=="STAD"] = "ESCASTAD"
broadertype[broadertype=="KICH"] = "KICHKIRCKIRP"
broadertype[broadertype=="KIRC"] = "KICHKIRCKIRP"
broadertype[broadertype=="KIRP"] = "KICHKIRCKIRP"
broadertype[broadertype=="LUAD"] = "LUADLUSC"
broadertype[broadertype=="LUSC"] = "LUADLUSC"
annotation_data <- cbind.data.frame(annotation_data,broadertype)
#callback = function(hc, ...){dendsort(hc)}
#colors
#qual_col_pals = brewer.pal.info[brewer.pal.info$category == 'qual',]
#col_vector = unlist(mapply(brewer.pal, qual_col_pals$maxcolors, rownames(qual_col_pals)))
col_vector = c("#e6194b", "#3cb44b", "#ffe119", "#0082c8", "#f58231", "#911eb4", "#46f0f0", "#f032e6", "#d2f53c", "#fabebe", "#008080", "#e6beff", "#aa6e28", "#800000", "#fffac8", "#aaffc3", "#808000", "#ffd8b1", "#000080", "#808080", "#FFFFFF", "#000000")
col_vector2 = c("#e6194b", "#3cb44b", "#ffe119", "#0082c8", "#f58231", "#911eb4", "#d2f53c", "#fabebe", "#e6beff", "#800000", "#ffd8b1", "#000080", "#808080", "#FFFFFF", "#000000")
mycolors=list(tissue = col_vector[1:length(levels(annotation_data$tissue))],
broadertype = col_vector2[1:length(levels(annotation_data$broadertype))])
names(mycolors$tissue) <- levels(annotation_data$tissue)
names(mycolors$broadertype) <- levels(annotation_data$broadertype)
rnMI <- rep(0, 100)
for (i in 100) {
randomtype1 <- sample(annotation_data$broadertype, length(annotation_data$broadertype), replace = FALSE)
randomtype2 <- sample(annotation_data$broadertype, length(annotation_data$broadertype), replace = FALSE)
nMI <- normalizedMI(randomtype1, randomtype2)
rnMI[i] <- nMI
}
print("nMI random")
print(mean(rnMI))
#heatmap
plotheatmap <- function(VSTcnts, filename, top) {
varGenes <- rowVars(VSTcnts)
topVarianceGenes <- head(order(varGenes, decreasing=T),top)
mat <- VSTcnts[ topVarianceGenes, ]
mat <- mat - rowMeans(mat)
for (run in 1:10) {
hc=hclust(d = dist(t(mat)), method = "ave")
memb <- cutree(hc, k=length(levels(annotation_data$tissue)))
cid <- as.data.frame(sort(memb))
clusterid <- merge(annotation_data, cid, by="row.names")
colnames(clusterid)[4] = "clusterid"
cluster2gtype = rep(0, nrow(clusterid))
clusterid$clusterid <- factor(clusterid$clusterid)
for (i in 1:length(levels(clusterid$clusterid))) {
gtype <- summary(clusterid[clusterid$clusterid==i,"broadertype"])
typeidx = which(gtype==max(gtype))
if (length(typeidx) > 1) {
gtype <- summary(clusterid[clusterid$clusterid==i,"broadertype"])/summary(clusterid[,"broadertype"])
typeidx = which(gtype==max(gtype))
}
cluster2gtype[clusterid$clusterid==i] = names(typeidx) # assign most frequent broadertype to the cluster
}
clusterid <- cbind.data.frame(clusterid, cluster2gtype)
clusterid$cluster2gtype <- factor(clusterid$cluster2gtype, levels=levels(clusterid$broadertype))
nMI <- normalizedMI(clusterid$broadertype, clusterid$cluster2gtype)
print(paste0("normalized Mutual Information ", filename, ": "))
print(nMI)
}
dend_col = dendsort(as.dendrogram(hc))
dend_row = dendsort(as.dendrogram(hclust(dist(mat))))
# select the 'contrast' you want
pdf(file=filename, width=20, height=12)
par(ps=3)
ha = HeatmapAnnotation(annotation_data, col=mycolors,
#annotation_legend_param = list(type = list(grid_height=10)),
)
#pushViewport(viewport(layout.pos.row = 1, layout.pos.col = 3))
ht = Heatmap(mat, cluster_rows = dend_row, row_dend_reorder = FALSE,
name = "ht", cluster_columns = dend_col, column_dend_reorder = FALSE,
top_annotation = ha,
top_annotation_height = unit(8, "cm"),
show_row_names = FALSE, show_column_names = FALSE,
#col = colorRampPalette(rev(brewer.pal(n = 7, name = "RdYlBu")))(100),
col = colorRamp2(c(-5, 0, 5), c("blue", "#ffffbf", "red")),
column_title = "")
#upViewport(2)
draw(ht, newpage = FALSE)
dev.off()
}
plotheatmap(geneVSTcnts, paste0("geneheatmapTop150Var", b_noovp, "100k.pdf"), 150)
plotheatmap(lineVSTcnts, paste0("LINEheatmapTop150Var", b_noovp, "100k.pdf"), 150)
plotheatmap(sineVSTcnts, paste0("SINEheatmapTop150Var", b_noovp, "100k.pdf"), 150)
plotheatmap(dnaVSTcnts, paste0("DNAheatmapTop150Var", b_noovp, "100k.pdf"), 150)
plotheatmap(ltrVSTcnts, paste0("LTRheatmapTop150Var", b_noovp, "100k.pdf"), 150)
plotheatmap(hervVSTcnts, paste0("HERVheatmapTop150Var", b_noovp, "100k.pdf"), 150)
plotheatmap(youngL1VSTcnts, paste0("youngL1heatmapTop150Var", b_noovp, "100k.pdf"), 150)
|
rm(list=ls())
library(rpart)
library(rpart.plot)
library(pROC)
training<-read.csv("F:/Thesis/DataMing+MachieLeaning/CAD/ATLEAST3/CAD Dataset Train70p.csv")
testing<-read.csv("F:/Thesis/DataMing+MachieLeaning/CAD/ATLEAST3/CAD Dataset Test30p.csv")
sapdata<-read.csv("F:/Thesis/DataMing+MachieLeaning/CAD/ATLEAST3/Sap_DT_Selected.csv")
base_model <- rpart(as.factor(training$Cath) ~., data = training,method="class",control = rpart.control(cp = 0))
postpruned_model <- rpart(as.factor(Cath) ~., data = training,method="class",control = rpart.control(cp = 0))
max_dep=0
min_split=0
accu=0
cp_op=0
k=1
cp_array=c(0,0.01,0.084,0.05,0.1,0.5,1)
for(cp_value in 1:length(cp_array)){
for (i in 1:20) {
for (j in 1:32) {
print(i)
print(j)
prepruned_model <- rpart(as.factor(Cath) ~., data = training,method="class",control = rpart.control(cp = cp_array[cp_value], maxdepth = i,minsplit =j))
testing$pred <- predict(object = prepruned_model, newdata = testing[,-20], type = "class")
#printcp()
#plotcp()
cm=table(testing$pred,testing$Cath,dnn=c("Prediction","Actual"))
acc=((sum(diag(cm))/sum(cm)))
tp<-cm[2,2]
tn<-cm[1,1]
fn<-cm[1,2]
fp<-cm[2,1]
sen=tp/(tp+fn)
spe=tn/(tn+fp)
mcc=((tp*tn) - (fp*fn))/(sqrt((tp+fp)*(tp+fn)*(tn+fp)*(tn+fn)))
f1=2*tp/((2*tp)+fp+fn)
roc_obj<-roc(testing[,20],as.numeric(testing$pred))
rocauc<-auc(roc_obj)
sapdata[k,1]=cp_array[cp_value]
sapdata[k,2]=i
sapdata[k,3]=j
sapdata[k,4]=acc
sapdata[k,5]=sen
sapdata[k,6]=spe
sapdata[k,7]=mcc
sapdata[k,8]=f1
sapdata[k,9]=rocauc
k=k+1
print('Accuracy')
print(acc)
print('sensitivity')
print(sen)
print('Specificity')
print(spe)
print('MCC')
print(mcc)
print('F1')
print(f1)
print('AUC')
print(rocauc)
if(acc>accu){
max_dep=i
min_split=j
accu=acc
cp_op=cp_array[cp_value]
}
}
}
}
write.csv(sapdata,"F:/Thesis/DataMing+MachieLeaning/CAD/ATLEAST3/Sap_DT_Selected.csv")
print(cp_op)
print(max_dep)
print(min_split)
print(accu)
rpart.plot(prepruned_model) | /ATLEAST3/DT.R | permissive | UtshaDas/CAD-Classification | R | false | false | 2,345 | r | rm(list=ls())
library(rpart)
library(rpart.plot)
library(pROC)
training<-read.csv("F:/Thesis/DataMing+MachieLeaning/CAD/ATLEAST3/CAD Dataset Train70p.csv")
testing<-read.csv("F:/Thesis/DataMing+MachieLeaning/CAD/ATLEAST3/CAD Dataset Test30p.csv")
sapdata<-read.csv("F:/Thesis/DataMing+MachieLeaning/CAD/ATLEAST3/Sap_DT_Selected.csv")
base_model <- rpart(as.factor(training$Cath) ~., data = training,method="class",control = rpart.control(cp = 0))
postpruned_model <- rpart(as.factor(Cath) ~., data = training,method="class",control = rpart.control(cp = 0))
max_dep=0
min_split=0
accu=0
cp_op=0
k=1
cp_array=c(0,0.01,0.084,0.05,0.1,0.5,1)
for(cp_value in 1:length(cp_array)){
for (i in 1:20) {
for (j in 1:32) {
print(i)
print(j)
prepruned_model <- rpart(as.factor(Cath) ~., data = training,method="class",control = rpart.control(cp = cp_array[cp_value], maxdepth = i,minsplit =j))
testing$pred <- predict(object = prepruned_model, newdata = testing[,-20], type = "class")
#printcp()
#plotcp()
cm=table(testing$pred,testing$Cath,dnn=c("Prediction","Actual"))
acc=((sum(diag(cm))/sum(cm)))
tp<-cm[2,2]
tn<-cm[1,1]
fn<-cm[1,2]
fp<-cm[2,1]
sen=tp/(tp+fn)
spe=tn/(tn+fp)
mcc=((tp*tn) - (fp*fn))/(sqrt((tp+fp)*(tp+fn)*(tn+fp)*(tn+fn)))
f1=2*tp/((2*tp)+fp+fn)
roc_obj<-roc(testing[,20],as.numeric(testing$pred))
rocauc<-auc(roc_obj)
sapdata[k,1]=cp_array[cp_value]
sapdata[k,2]=i
sapdata[k,3]=j
sapdata[k,4]=acc
sapdata[k,5]=sen
sapdata[k,6]=spe
sapdata[k,7]=mcc
sapdata[k,8]=f1
sapdata[k,9]=rocauc
k=k+1
print('Accuracy')
print(acc)
print('sensitivity')
print(sen)
print('Specificity')
print(spe)
print('MCC')
print(mcc)
print('F1')
print(f1)
print('AUC')
print(rocauc)
if(acc>accu){
max_dep=i
min_split=j
accu=acc
cp_op=cp_array[cp_value]
}
}
}
}
write.csv(sapdata,"F:/Thesis/DataMing+MachieLeaning/CAD/ATLEAST3/Sap_DT_Selected.csv")
print(cp_op)
print(max_dep)
print(min_split)
print(accu)
rpart.plot(prepruned_model) |
# Q2.
# Have total emissions from PM2.5 decreased in the Baltimore City, Maryland
# (fips == "24510")
# from 1999 to 2008? Use the base plotting system to make a plot answering
# this question.
#Read the data into R:
# NEI: National Emmissions Inventory (the data for years 1999,2002,2005,2008)
# SCC: Source Classification Code
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
baltimore <- subset(NEI,NEI$fips=="24510")
emissionsBalt <-baltimore$Emissions
print(mean(is.na(emissionsBalt))) #make sure there are no NAs
emByYrBalt <- sapply(split(emissionsBalt,baltimore$year), sum)
years <- unique(baltimore$year)
png(filename = "plot2.png")
bp <- barplot(emByYrBalt, col = rgb( 0,0.5,0.5,0.2),
xlab = "Year",
ylab = "tons",
main = "Total PM2.5 emission in Baltimore \n from all sources by year",
sub = "Total PM2.5 in Baltimore decreased from 1999 to 2008.")
text(bp,emByYrBalt*0.9,labels = round(emByYrBalt,digits = 0))
dev.off()
| /tasks2.R | no_license | purvinis/Ex_Data_CourseProject2 | R | false | false | 1,038 | r | # Q2.
# Have total emissions from PM2.5 decreased in the Baltimore City, Maryland
# (fips == "24510")
# from 1999 to 2008? Use the base plotting system to make a plot answering
# this question.
#Read the data into R:
# NEI: National Emmissions Inventory (the data for years 1999,2002,2005,2008)
# SCC: Source Classification Code
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
baltimore <- subset(NEI,NEI$fips=="24510")
emissionsBalt <-baltimore$Emissions
print(mean(is.na(emissionsBalt))) #make sure there are no NAs
emByYrBalt <- sapply(split(emissionsBalt,baltimore$year), sum)
years <- unique(baltimore$year)
png(filename = "plot2.png")
bp <- barplot(emByYrBalt, col = rgb( 0,0.5,0.5,0.2),
xlab = "Year",
ylab = "tons",
main = "Total PM2.5 emission in Baltimore \n from all sources by year",
sub = "Total PM2.5 in Baltimore decreased from 1999 to 2008.")
text(bp,emByYrBalt*0.9,labels = round(emByYrBalt,digits = 0))
dev.off()
|
library(magi)
# set up configuration if not already exist ------------------------------------
if(!exists("config")){
config <- list(
nobs = 15,
noise = rep(0.01, 5), # 0.001 = low noise, 0.01 = high noise
kernel = "generalMatern",
loglikflag = "withmeanBand",
bandsize = 40,
hmcSteps = 100,
n.iter = 20001,
burninRatio = 0.50,
stepSizeFactor = 0.01,
linfillspace = 0.5, # discretization interval width (instead of fill level, since unevenly spaced observations)
t.end = 100,
modelName = "PTrans",
temperPrior = TRUE,
useFrequencyBasedPrior = TRUE,
useScalerSigma = FALSE,
useFixedSigma = FALSE,
linearizexInit = TRUE,
useExoSigma = FALSE,
useMean = TRUE,
useBand = TRUE,
max.epoch = 1
)
}
# Use this to replicate same seeds used in the paper
args <- commandArgs(trailingOnly = TRUE)
args <- as.numeric(args)[1]
seedlist <- scan("ptrans-noise001-seeds.txt")
config$seed <- seedlist[args]
show(config$seed)
config$ndis <- config$t.end / config$linfillspace + 1;
# initialize global parameters, true x, simulated x ----------------------------
pram.true <- list(
theta=c(0.07, 0.6,0.05,0.3,0.017,0.3),
x0 = c(1,0,1,0,0),
sigma=config$noise
)
times <- seq(0,100,length=1001)
modelODE <- function(t, state, parameters) {
list(as.vector(magi:::ptransmodelODE(parameters, t(state))))
}
xtrue <- deSolve::ode(y = pram.true$x0, times = times, func = modelODE, parms = pram.true$theta)
xtrue <- data.frame(xtrue)
#matplot(xtrue[, "time"], xtrue[, -1], type="l", lty=1)
xtrueFunc <- lapply(2:ncol(xtrue), function(j)
approxfun(xtrue[, "time"], xtrue[, j]))
xsim <- data.frame(time = c(0,1,2,4,5,7,10,15,20,30,40,50,60,80,100))
xsim <- cbind(xsim, sapply(xtrueFunc, function(f) f(xsim$time)))
set.seed(config$seed)
for(j in 1:(ncol(xsim)-1)){
xsim[,1+j] <- xsim[,1+j]+rnorm(nrow(xsim), sd=config$noise[j])
}
xsim.obs <- xsim[seq(1,nrow(xsim), length=config$nobs),]
#matplot(xsim.obs$time, xsim.obs[,-1], type="p", col=1:(ncol(xsim)-1), pch=20, add = TRUE)
#matplot(xsim.obs$time, xsim.obs[,-1], type="p", col=1:(ncol(xsim)-1), pch=20)
## Linearly interpolate using fixed interval widths
fillC <- seq(0, config$t.end, by = config$linfillspace)
xsim <- data.frame(time = fillC)
xsim <- cbind(xsim, matrix(NaN, nrow = length(fillC), ncol = ncol(xsim.obs)-1 ))
for (i in 1:length(fillC)) {
loc <- match( fillC[i], xsim.obs[, "time"])
if (!is.na(loc))
xsim[i,2:ncol(xsim)] = xsim.obs[loc,2:ncol(xsim)];
}
if (config$linearizexInit) {
exoxInit <- sapply(2:ncol(xsim.obs), function(j)
approx(xsim.obs[, "time"], xsim.obs[, j], xsim[, "time"])$y)
} else {
exoxInit <- matrix(nrow=0,ncol=0)
}
# cpp inference ----------------------------
ptransmodel <- list(
name= config$modelName,
fOde=magi:::ptransmodelODE,
fOdeDx=magi:::ptransmodelDx,
fOdeDtheta=magi:::ptransmodelDtheta,
thetaLowerBound=rep(0,6),
thetaUpperBound=rep(4,6)
)
outDir <- "../results/ptrans-highnoise/"
dir.create(outDir, showWarnings = FALSE, recursive = TRUE)
config$priorTemperature <- config$ndis / config$nobs
config$priorTemperatureObs <- 1
OursStartTime <- proc.time()[3]
### Optimize phi first using equally spaced intervals of 1, i.e., 0,1...,100.
samplesCpp <- magi:::solveMagiRcpp(
yFull = exoxInit[xsim$time %in% 0:100,],
odeModel = ptransmodel,
tvecFull = 0:100,
sigmaExogenous = matrix(numeric(0)),
phiExogenous = matrix(numeric(0)),
xInitExogenous = matrix(numeric(0)),
thetaInitExogenous = matrix(numeric(0)),
muExogenous = matrix(numeric(0)),
dotmuExogenous = matrix(numeric(0)),
priorTemperatureLevel = config$priorTemperature,
priorTemperatureDeriv = config$priorTemperature,
priorTemperatureObs = config$priorTemperatureObs,
kernel = config$kernel,
nstepsHmc = config$hmcSteps,
burninRatioHmc = config$burninRatio,
niterHmc = 2,
stepSizeFactorHmc = config$stepSizeFactor,
nEpoch = config$max.epoch,
bandSize = config$bandsize,
useFrequencyBasedPrior = config$useFrequencyBasedPrior,
useBand = config$useBand,
useMean = config$useMean,
useScalerSigma = config$useScalerSigma,
useFixedSigma = config$useFixedSigma,
verbose = TRUE)
phiUsed <- samplesCpp$phi
samplesCpp <- samplesCpp$llikxthetasigmaSamples
samplesCpp <- samplesCpp[,,1]
out <- samplesCpp[-1,1,drop=FALSE]
sigmaUsed <- tail(out[, 1], ncol(xsim[,-1]))
show(sigmaUsed)
## stabilize phi estimate
samplesCpp <- magi:::solveMagiRcpp(
yFull = exoxInit[xsim$time %in% 0:100,],
odeModel = ptransmodel,
tvecFull = 0:100,
sigmaExogenous = sigmaUsed,
phiExogenous = matrix(numeric(0)),
xInitExogenous = matrix(numeric(0)),
thetaInitExogenous = matrix(numeric(0)),
muExogenous = matrix(numeric(0)),
dotmuExogenous = matrix(numeric(0)),
priorTemperatureLevel = config$priorTemperature,
priorTemperatureDeriv = config$priorTemperature,
priorTemperatureObs = config$priorTemperatureObs,
kernel = config$kernel,
nstepsHmc = config$hmcSteps,
burninRatioHmc = config$burninRatio,
niterHmc = 2,
stepSizeFactorHmc = config$stepSizeFactor,
nEpoch = config$max.epoch,
bandSize = config$bandsize,
useFrequencyBasedPrior = config$useFrequencyBasedPrior,
useBand = config$useBand,
useMean = config$useMean,
useScalerSigma = config$useScalerSigma,
useFixedSigma = config$useFixedSigma,
verbose = TRUE)
phiUsed <- samplesCpp$phi
samplesCpp <- samplesCpp$llikxthetasigmaSamples
samplesCpp <- samplesCpp[,,1]
out <- samplesCpp[-1,1,drop=FALSE]
sigmaUsed <- tail(out[, 1], ncol(xsim[,-1]))
show(sigmaUsed)
samplesCpp <- magi:::solveMagiRcpp(
yFull = data.matrix(xsim[,-1]),
odeModel = ptransmodel,
tvecFull = xsim$time,
sigmaExogenous = sigmaUsed,
phiExogenous = phiUsed,
xInitExogenous = exoxInit,
thetaInitExogenous = matrix(numeric(0)),
muExogenous = matrix(numeric(0)),
dotmuExogenous = matrix(numeric(0)),
priorTemperatureLevel = config$priorTemperature,
priorTemperatureDeriv = config$priorTemperature,
priorTemperatureObs = config$priorTemperatureObs,
kernel = config$kernel,
nstepsHmc = config$hmcSteps,
burninRatioHmc = config$burninRatio,
niterHmc = config$n.iter,
stepSizeFactorHmc = config$stepSizeFactor,
nEpoch = config$max.epoch,
bandSize = config$bandsize,
useFrequencyBasedPrior = config$useFrequencyBasedPrior,
useBand = config$useBand,
useMean = config$useMean,
useScalerSigma = config$useScalerSigma,
useFixedSigma = config$useFixedSigma,
verbose = TRUE)
OursTimeUsed <- proc.time()[3] - OursStartTime
phiUsed <- samplesCpp$phi
samplesCpp <- samplesCpp$llikxthetasigmaSamples
samplesCpp <- samplesCpp[,,1]
out <- samplesCpp[-1,1,drop=FALSE]
xCpp <- matrix(out[1:length(data.matrix(xsim[,-1])), 1], ncol=ncol(xsim[,-1]))
thetaCpp <- out[(length(xCpp)+1):(length(xCpp) + length(ptransmodel$thetaLowerBound)), 1]
sigmaCpp <- tail(out[, 1], ncol(xsim[,-1]))
#matplot(xsim$time, xCpp, type="l", add=TRUE)
llikId <- 1
xId <- (max(llikId)+1):(max(llikId)+length(data.matrix(xsim[,-1])))
thetaId <- (max(xId)+1):(max(xId)+length(ptransmodel$thetaLowerBound))
sigmaId <- (max(thetaId)+1):(max(thetaId)+ncol(xsim[,-1]))
burnin <- as.integer(config$n.iter*config$burninRatio)
gpode <- list(theta=t(samplesCpp[thetaId, -(1:burnin)]),
xsampled=array(t(samplesCpp[xId, -(1:burnin)]),
dim=c(config$n.iter-burnin, nrow(xsim), ncol(xsim)-1)),
lglik=samplesCpp[llikId,-(1:burnin)],
sigma = t(samplesCpp[sigmaId, -(1:burnin), drop=FALSE]))
gpode$fode <- sapply(1:length(gpode$lglik), function(t)
with(gpode, magi:::ptransmodelODE(theta[t,], xsampled[t,,])), simplify = "array")
gpode$fode <- aperm(gpode$fode, c(3,1,2))
dotxtrue = magi:::ptransmodelODE(pram.true$theta, data.matrix(xtrue[,-1]))
odemodel <- list(times=times, modelODE=modelODE, xtrue=xtrue)
magi:::plotPostSamplesFlex(
paste0(outDir, config$modelName,"-",config$seed,"-noise", config$noise[1], ".pdf"),
xtrue, dotxtrue, xsim, gpode, pram.true, config, odemodel)
save(xtrue, dotxtrue, xsim, gpode, pram.true, config, odemodel, OursTimeUsed, phiUsed, file= paste0(outDir, config$modelName,"-",config$seed,"-noise", config$noise[1],"-fill", config$linfillspace, ".rda"))
| /magi/replication/run-ptrans-highnoise.R | no_license | nick-jhlee/plant-circadian | R | false | false | 8,238 | r | library(magi)
# set up configuration if not already exist ------------------------------------
if(!exists("config")){
config <- list(
nobs = 15,
noise = rep(0.01, 5), # 0.001 = low noise, 0.01 = high noise
kernel = "generalMatern",
loglikflag = "withmeanBand",
bandsize = 40,
hmcSteps = 100,
n.iter = 20001,
burninRatio = 0.50,
stepSizeFactor = 0.01,
linfillspace = 0.5, # discretization interval width (instead of fill level, since unevenly spaced observations)
t.end = 100,
modelName = "PTrans",
temperPrior = TRUE,
useFrequencyBasedPrior = TRUE,
useScalerSigma = FALSE,
useFixedSigma = FALSE,
linearizexInit = TRUE,
useExoSigma = FALSE,
useMean = TRUE,
useBand = TRUE,
max.epoch = 1
)
}
# Use this to replicate same seeds used in the paper
args <- commandArgs(trailingOnly = TRUE)
args <- as.numeric(args)[1]
seedlist <- scan("ptrans-noise001-seeds.txt")
config$seed <- seedlist[args]
show(config$seed)
config$ndis <- config$t.end / config$linfillspace + 1;
# initialize global parameters, true x, simulated x ----------------------------
pram.true <- list(
theta=c(0.07, 0.6,0.05,0.3,0.017,0.3),
x0 = c(1,0,1,0,0),
sigma=config$noise
)
times <- seq(0,100,length=1001)
modelODE <- function(t, state, parameters) {
list(as.vector(magi:::ptransmodelODE(parameters, t(state))))
}
xtrue <- deSolve::ode(y = pram.true$x0, times = times, func = modelODE, parms = pram.true$theta)
xtrue <- data.frame(xtrue)
#matplot(xtrue[, "time"], xtrue[, -1], type="l", lty=1)
xtrueFunc <- lapply(2:ncol(xtrue), function(j)
approxfun(xtrue[, "time"], xtrue[, j]))
xsim <- data.frame(time = c(0,1,2,4,5,7,10,15,20,30,40,50,60,80,100))
xsim <- cbind(xsim, sapply(xtrueFunc, function(f) f(xsim$time)))
set.seed(config$seed)
for(j in 1:(ncol(xsim)-1)){
xsim[,1+j] <- xsim[,1+j]+rnorm(nrow(xsim), sd=config$noise[j])
}
xsim.obs <- xsim[seq(1,nrow(xsim), length=config$nobs),]
#matplot(xsim.obs$time, xsim.obs[,-1], type="p", col=1:(ncol(xsim)-1), pch=20, add = TRUE)
#matplot(xsim.obs$time, xsim.obs[,-1], type="p", col=1:(ncol(xsim)-1), pch=20)
## Linearly interpolate using fixed interval widths
fillC <- seq(0, config$t.end, by = config$linfillspace)
xsim <- data.frame(time = fillC)
xsim <- cbind(xsim, matrix(NaN, nrow = length(fillC), ncol = ncol(xsim.obs)-1 ))
for (i in 1:length(fillC)) {
loc <- match( fillC[i], xsim.obs[, "time"])
if (!is.na(loc))
xsim[i,2:ncol(xsim)] = xsim.obs[loc,2:ncol(xsim)];
}
if (config$linearizexInit) {
exoxInit <- sapply(2:ncol(xsim.obs), function(j)
approx(xsim.obs[, "time"], xsim.obs[, j], xsim[, "time"])$y)
} else {
exoxInit <- matrix(nrow=0,ncol=0)
}
# cpp inference ----------------------------
ptransmodel <- list(
name= config$modelName,
fOde=magi:::ptransmodelODE,
fOdeDx=magi:::ptransmodelDx,
fOdeDtheta=magi:::ptransmodelDtheta,
thetaLowerBound=rep(0,6),
thetaUpperBound=rep(4,6)
)
outDir <- "../results/ptrans-highnoise/"
dir.create(outDir, showWarnings = FALSE, recursive = TRUE)
config$priorTemperature <- config$ndis / config$nobs
config$priorTemperatureObs <- 1
OursStartTime <- proc.time()[3]
### Optimize phi first using equally spaced intervals of 1, i.e., 0,1...,100.
samplesCpp <- magi:::solveMagiRcpp(
yFull = exoxInit[xsim$time %in% 0:100,],
odeModel = ptransmodel,
tvecFull = 0:100,
sigmaExogenous = matrix(numeric(0)),
phiExogenous = matrix(numeric(0)),
xInitExogenous = matrix(numeric(0)),
thetaInitExogenous = matrix(numeric(0)),
muExogenous = matrix(numeric(0)),
dotmuExogenous = matrix(numeric(0)),
priorTemperatureLevel = config$priorTemperature,
priorTemperatureDeriv = config$priorTemperature,
priorTemperatureObs = config$priorTemperatureObs,
kernel = config$kernel,
nstepsHmc = config$hmcSteps,
burninRatioHmc = config$burninRatio,
niterHmc = 2,
stepSizeFactorHmc = config$stepSizeFactor,
nEpoch = config$max.epoch,
bandSize = config$bandsize,
useFrequencyBasedPrior = config$useFrequencyBasedPrior,
useBand = config$useBand,
useMean = config$useMean,
useScalerSigma = config$useScalerSigma,
useFixedSigma = config$useFixedSigma,
verbose = TRUE)
phiUsed <- samplesCpp$phi
samplesCpp <- samplesCpp$llikxthetasigmaSamples
samplesCpp <- samplesCpp[,,1]
out <- samplesCpp[-1,1,drop=FALSE]
sigmaUsed <- tail(out[, 1], ncol(xsim[,-1]))
show(sigmaUsed)
## stabilize phi estimate
samplesCpp <- magi:::solveMagiRcpp(
yFull = exoxInit[xsim$time %in% 0:100,],
odeModel = ptransmodel,
tvecFull = 0:100,
sigmaExogenous = sigmaUsed,
phiExogenous = matrix(numeric(0)),
xInitExogenous = matrix(numeric(0)),
thetaInitExogenous = matrix(numeric(0)),
muExogenous = matrix(numeric(0)),
dotmuExogenous = matrix(numeric(0)),
priorTemperatureLevel = config$priorTemperature,
priorTemperatureDeriv = config$priorTemperature,
priorTemperatureObs = config$priorTemperatureObs,
kernel = config$kernel,
nstepsHmc = config$hmcSteps,
burninRatioHmc = config$burninRatio,
niterHmc = 2,
stepSizeFactorHmc = config$stepSizeFactor,
nEpoch = config$max.epoch,
bandSize = config$bandsize,
useFrequencyBasedPrior = config$useFrequencyBasedPrior,
useBand = config$useBand,
useMean = config$useMean,
useScalerSigma = config$useScalerSigma,
useFixedSigma = config$useFixedSigma,
verbose = TRUE)
phiUsed <- samplesCpp$phi
samplesCpp <- samplesCpp$llikxthetasigmaSamples
samplesCpp <- samplesCpp[,,1]
out <- samplesCpp[-1,1,drop=FALSE]
sigmaUsed <- tail(out[, 1], ncol(xsim[,-1]))
show(sigmaUsed)
samplesCpp <- magi:::solveMagiRcpp(
yFull = data.matrix(xsim[,-1]),
odeModel = ptransmodel,
tvecFull = xsim$time,
sigmaExogenous = sigmaUsed,
phiExogenous = phiUsed,
xInitExogenous = exoxInit,
thetaInitExogenous = matrix(numeric(0)),
muExogenous = matrix(numeric(0)),
dotmuExogenous = matrix(numeric(0)),
priorTemperatureLevel = config$priorTemperature,
priorTemperatureDeriv = config$priorTemperature,
priorTemperatureObs = config$priorTemperatureObs,
kernel = config$kernel,
nstepsHmc = config$hmcSteps,
burninRatioHmc = config$burninRatio,
niterHmc = config$n.iter,
stepSizeFactorHmc = config$stepSizeFactor,
nEpoch = config$max.epoch,
bandSize = config$bandsize,
useFrequencyBasedPrior = config$useFrequencyBasedPrior,
useBand = config$useBand,
useMean = config$useMean,
useScalerSigma = config$useScalerSigma,
useFixedSigma = config$useFixedSigma,
verbose = TRUE)
OursTimeUsed <- proc.time()[3] - OursStartTime
phiUsed <- samplesCpp$phi
samplesCpp <- samplesCpp$llikxthetasigmaSamples
samplesCpp <- samplesCpp[,,1]
out <- samplesCpp[-1,1,drop=FALSE]
xCpp <- matrix(out[1:length(data.matrix(xsim[,-1])), 1], ncol=ncol(xsim[,-1]))
thetaCpp <- out[(length(xCpp)+1):(length(xCpp) + length(ptransmodel$thetaLowerBound)), 1]
sigmaCpp <- tail(out[, 1], ncol(xsim[,-1]))
#matplot(xsim$time, xCpp, type="l", add=TRUE)
llikId <- 1
xId <- (max(llikId)+1):(max(llikId)+length(data.matrix(xsim[,-1])))
thetaId <- (max(xId)+1):(max(xId)+length(ptransmodel$thetaLowerBound))
sigmaId <- (max(thetaId)+1):(max(thetaId)+ncol(xsim[,-1]))
burnin <- as.integer(config$n.iter*config$burninRatio)
gpode <- list(theta=t(samplesCpp[thetaId, -(1:burnin)]),
xsampled=array(t(samplesCpp[xId, -(1:burnin)]),
dim=c(config$n.iter-burnin, nrow(xsim), ncol(xsim)-1)),
lglik=samplesCpp[llikId,-(1:burnin)],
sigma = t(samplesCpp[sigmaId, -(1:burnin), drop=FALSE]))
gpode$fode <- sapply(1:length(gpode$lglik), function(t)
with(gpode, magi:::ptransmodelODE(theta[t,], xsampled[t,,])), simplify = "array")
gpode$fode <- aperm(gpode$fode, c(3,1,2))
dotxtrue = magi:::ptransmodelODE(pram.true$theta, data.matrix(xtrue[,-1]))
odemodel <- list(times=times, modelODE=modelODE, xtrue=xtrue)
magi:::plotPostSamplesFlex(
paste0(outDir, config$modelName,"-",config$seed,"-noise", config$noise[1], ".pdf"),
xtrue, dotxtrue, xsim, gpode, pram.true, config, odemodel)
save(xtrue, dotxtrue, xsim, gpode, pram.true, config, odemodel, OursTimeUsed, phiUsed, file= paste0(outDir, config$modelName,"-",config$seed,"-noise", config$noise[1],"-fill", config$linfillspace, ".rda"))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ledger-functions.R
\name{monthly}
\alias{monthly}
\title{Calculate 30 days average of the given vector}
\usage{
monthly(x)
}
\arguments{
\item{x}{a numeric vector}
}
\description{
example of a functions can be used to be appled for
the transaction vectors
}
| /ledgerplots/man/monthly.Rd | permissive | paul-jewell/ledger-plots | R | false | true | 338 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ledger-functions.R
\name{monthly}
\alias{monthly}
\title{Calculate 30 days average of the given vector}
\usage{
monthly(x)
}
\arguments{
\item{x}{a numeric vector}
}
\description{
example of a functions can be used to be appled for
the transaction vectors
}
|
## Coursera R Programming Assignment 2
## Author: Scott Elliott
## Completed April 26, 2015
## SUMMARY
## These functions are used to take a square invertible matrix object,
## calculate the inverse, then store the inverted matrix in a cache
## for quick retreival without having to recalculate the inverse.
## makeCacheMatrix function accepts a matrix object as an argument and
## uses it to create a special matrix object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
i <- NULL # when initially creating the new matrix object,
# this sets the stored value of the inverse "i" to NULL
set <- function(y) { # the SET function takes the user-provided
# object or value and makes the 'special' matrix
# equal to it. Also sets the global variable "i"
# to NULL
x <<- y
i <<- NULL
}
get <- function() x # the GET function simply returns the special
# matrix object
setinverse <- function(inverse) i <<- inverse # the SETINVERSE function
# manually sets the global
# inverse varaiable "i" to
# match a user-defined or passed
# value.
getinverse <- function() i # the GETINVERSE function simply returns
# the value of the global inverse
# variable "i", then lists the code of
# these defined functions
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## The cacheSolve function will first check to see if the inverse of the special
## matrix returned by makeCacheMatrix has been cached, if so simply returning
## that value.
## If it has not yet been cached, it will compute the inverse, cache it and
## return it.
cacheSolve <- function(x, ...) {
i <- x$getinverse() # first check to see if the inverse has been
# cached
if(!is.null(i)) { # if it has been cached, send a message that
# it's coming from the cache and return the
# stored inverse value
message("getting cached data")
return(i)
}
data <- x$get() # if "i" is NULL (i.e., the inverse is not cached), then
# call the GET function to load the matrix...
i <- solve(data, ...) # ...calculate the inverse...
x$setinverse(i) # and cache the value of the inverse
i # then return the value of the inverse
}
| /cachematrix.R | no_license | sdelliott/ProgrammingAssignment2 | R | false | false | 2,971 | r | ## Coursera R Programming Assignment 2
## Author: Scott Elliott
## Completed April 26, 2015
## SUMMARY
## These functions are used to take a square invertible matrix object,
## calculate the inverse, then store the inverted matrix in a cache
## for quick retreival without having to recalculate the inverse.
## makeCacheMatrix function accepts a matrix object as an argument and
## uses it to create a special matrix object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
i <- NULL # when initially creating the new matrix object,
# this sets the stored value of the inverse "i" to NULL
set <- function(y) { # the SET function takes the user-provided
# object or value and makes the 'special' matrix
# equal to it. Also sets the global variable "i"
# to NULL
x <<- y
i <<- NULL
}
get <- function() x # the GET function simply returns the special
# matrix object
setinverse <- function(inverse) i <<- inverse # the SETINVERSE function
# manually sets the global
# inverse varaiable "i" to
# match a user-defined or passed
# value.
getinverse <- function() i # the GETINVERSE function simply returns
# the value of the global inverse
# variable "i", then lists the code of
# these defined functions
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## The cacheSolve function will first check to see if the inverse of the special
## matrix returned by makeCacheMatrix has been cached, if so simply returning
## that value.
## If it has not yet been cached, it will compute the inverse, cache it and
## return it.
cacheSolve <- function(x, ...) {
i <- x$getinverse() # first check to see if the inverse has been
# cached
if(!is.null(i)) { # if it has been cached, send a message that
# it's coming from the cache and return the
# stored inverse value
message("getting cached data")
return(i)
}
data <- x$get() # if "i" is NULL (i.e., the inverse is not cached), then
# call the GET function to load the matrix...
i <- solve(data, ...) # ...calculate the inverse...
x$setinverse(i) # and cache the value of the inverse
i # then return the value of the inverse
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/vector.R
\name{vec}
\alias{vec}
\alias{is_vec}
\alias{as_vec}
\title{Vector of vectors}
\usage{
vec(..., default_dim = 2)
is_vec(x)
as_vec(x)
}
\arguments{
\item{...}{Various input. See the Constructor section.}
\item{default_dim}{The dimensionality when constructing an empty vector}
\item{x}{A vector of vectors or an object to convert to it}
}
\value{
An \code{euclid_vector} vector
}
\description{
A geometrical vector is somewhat different from the concept of a vector in
programming hence the slightly confusing terminology. In geometry a vector is
a direction and a magnitude most often defined by a point in space where the
direction is defined as the direction from the origin to the point and the
magnitude is defined as the distance from the origin to the point.
}
\section{Constructors}{
\strong{2 dimensional vectors}
\itemize{
\item Providing a point will construct vectors pointing to the points from the
origin centered.
\item Providing two exact numeric vectors will construct vectors pointing to the
point defined by the coordinates given.
\item Providing a ray will construct vectors pointing in the same direction as
the ray
}
\strong{3 dimensional vectors}
\itemize{
\item Providing a point will construct vectors pointing to the points from the
origin centered.
\item Providing three exact numeric vectors will construct vectors pointing to
the point defined by the coordinates given.
\item Providing a ray will construct vectors pointing in the same direction as
the ray
}
}
\examples{
# Create vectors from points:
v1 <- vec(x = 1:5, y = 4:8)
# Vectors can be added and subtracted
v1[1] + v1[2]
v1[5] - v1[3]
# You can invert a vector by taking its negative
-v1
# As vectors can be added you can also use sum() and cumsum()
sum(v1)
cumsum(v1)
# Multiplying and dividing a vector by a numeric changes its magnitude
v1 * 10
v1 / 2.5
# Multiplying two vectors gives the inner product of the two
v1[1:2] * v1[3:4]
# Vectors can be converted to points, directions and transformation matrices
as_point(v1)
as_direction(v1)
as_affine_transformation(v1)
}
| /man/vec.Rd | permissive | KevCaz/euclid | R | false | true | 2,166 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/vector.R
\name{vec}
\alias{vec}
\alias{is_vec}
\alias{as_vec}
\title{Vector of vectors}
\usage{
vec(..., default_dim = 2)
is_vec(x)
as_vec(x)
}
\arguments{
\item{...}{Various input. See the Constructor section.}
\item{default_dim}{The dimensionality when constructing an empty vector}
\item{x}{A vector of vectors or an object to convert to it}
}
\value{
An \code{euclid_vector} vector
}
\description{
A geometrical vector is somewhat different from the concept of a vector in
programming hence the slightly confusing terminology. In geometry a vector is
a direction and a magnitude most often defined by a point in space where the
direction is defined as the direction from the origin to the point and the
magnitude is defined as the distance from the origin to the point.
}
\section{Constructors}{
\strong{2 dimensional vectors}
\itemize{
\item Providing a point will construct vectors pointing to the points from the
origin centered.
\item Providing two exact numeric vectors will construct vectors pointing to the
point defined by the coordinates given.
\item Providing a ray will construct vectors pointing in the same direction as
the ray
}
\strong{3 dimensional vectors}
\itemize{
\item Providing a point will construct vectors pointing to the points from the
origin centered.
\item Providing three exact numeric vectors will construct vectors pointing to
the point defined by the coordinates given.
\item Providing a ray will construct vectors pointing in the same direction as
the ray
}
}
\examples{
# Create vectors from points:
v1 <- vec(x = 1:5, y = 4:8)
# Vectors can be added and subtracted
v1[1] + v1[2]
v1[5] - v1[3]
# You can invert a vector by taking its negative
-v1
# As vectors can be added you can also use sum() and cumsum()
sum(v1)
cumsum(v1)
# Multiplying and dividing a vector by a numeric changes its magnitude
v1 * 10
v1 / 2.5
# Multiplying two vectors gives the inner product of the two
v1[1:2] * v1[3:4]
# Vectors can be converted to points, directions and transformation matrices
as_point(v1)
as_direction(v1)
as_affine_transformation(v1)
}
|
#' Set and check parameter settings of estimate_R
#'
#' This function defines settings for estimate_R It takes a list of named
#' items as input, set defaults where arguments are
#' missing, and return a list of settings.
#'
#' @param ... Acceptable arguments for ... are:
#'
#' \describe{
#'
#' \item{t_start}{Vector of positive integers giving the starting times of each
#' window over which the reproduction number will be estimated. These must be in
#' ascending order, and so that for all \code{i}, \code{t_start[i]<=t_end[i]}.
#' t_start[1] should be strictly after the first day with non null incidence.}
#'
#' \item{t_end}{Vector of positive integers giving the ending times of each
#' window over which the reproduction number will be estimated. These must be
#' in ascending order, and so that for all \code{i},
#' \code{t_start[i]<=t_end[i]}.}
#'
#' \item{n1}{For method "uncertain_si" and "si_from_data"; positive integer
#' giving the size of the sample of SI distributions to be drawn (see details).}
#'
#' \item{n2}{For methods "uncertain_si", "si_from_data" and "si_from_sample";
#' positive integer giving the size of the sample drawn from the posterior
#' distribution of R for each serial interval distribution considered (see
#' details).}
#'
#' \item{mean_si}{For method "parametric_si" and "uncertain_si" ; positive real
#' giving the mean serial interval (method "parametric_si") or the average mean
#' serial interval (method "uncertain_si", see details).}
#'
#' \item{std_si}{For method "parametric_si" and "uncertain_si" ; non negative
#' real giving the standard deviation of the serial interval
#' (method "parametric_si") or the average standard deviation of the serial
#' interval (method "uncertain_si", see details).}
#'
#' \item{std_mean_si}{For method "uncertain_si" ; standard deviation of the
#' distribution from which mean serial intervals are drawn (see details).}
#'
#' \item{min_mean_si}{For method "uncertain_si" ; lower bound of the
#' distribution from which mean serial intervals are drawn (see details).}
#'
#' \item{max_mean_si}{For method "uncertain_si" ; upper bound of the
#' distribution from which mean serial intervals are drawn (see details).}
#'
#' \item{std_std_si}{For method "uncertain_si" ; standard deviation of the
#' distribution from which standard deviations of the serial interval are drawn
#' (see details).}
#'
#' \item{min_std_si}{For method "uncertain_si" ; lower bound of the distribution
#' from which standard deviations of the serial interval are drawn (see
#' details).}
#'
#' \item{max_std_si}{For method "uncertain_si" ; upper bound of the distribution
#' from which standard deviations of the serial interval are drawn (see
#' details).}
#'
#' \item{si_distr}{For method "non_parametric_si" ; vector of probabilities
#' giving the discrete distribution of the serial interval, starting with
#' \code{si_distr[1]} (probability that the serial interval is zero), which
#' should be zero.}
#'
#' \item{si_parametric_distr}{For method "si_from_data" ; the parametric
#' distribution to use when estimating the serial interval from data on dates of
#' symptoms of pairs of infector/infected individuals (see details).
#' Should be one of "G" (Gamma), "W" (Weibull), "L" (Lognormal), "off1G" (Gamma
#' shifted by 1), "off1W" (Weibull shifted by 1), or "off1L" (Lognormal shifted
#' by 1).}
#'
#' \item{mcmc_control}{An object of class \code{estimate_R_mcmc_control}, as
#' returned by function \code{make_mcmc_control}. }
#'
#' \item{seed}{An optional integer used as the seed for the random number
#' generator at the start of the function (then potentially reset within the
#' MCMC for method \code{si_from_data}); useful to get reproducible results.}
#'
#' \item{mean_prior}{A positive number giving the mean of the common prior
#' distribution for all reproduction numbers (see details).}
#'
#' \item{std_prior}{A positive number giving the standard deviation of the
#' common prior distribution for all reproduction numbers (see details).}
#'
#' \item{cv_posterior}{A positive number giving the aimed posterior coefficient
#' of variation (see details).}
#'
#' }
#' @param incid As in function\code{estimate_R}.
#' @param method As in function\code{estimate_R}.
#'
#' @details
#' Analytical estimates of the reproduction number for an epidemic over
#' predefined time windows can be obtained using function \code{estimate_R},
#' for a given discrete distribution of the serial interval. \code{make_config}
#' allows to generate a configuration specifying the way the estimation will
#' be performed.
#'
#' The more incident cases are observed over a time window, the smallest the
#' posterior coefficient of variation (CV, ratio of standard deviation over
#' mean) of the reproduction number.
#' An aimed CV can be specified in the argument \code{cv_posterior}
#' (default is \code{0.3}), and a warning will be produced if the incidence
#' within one of the time windows considered is too low to get this CV.
#'
#' The methods vary in the way the serial interval distribution is specified.
#'
#' In short there are five methods to specify the serial interval distribution
#' (see below for details on each method).
#' In the first two methods, a unique serial interval distribution is
#' considered, whereas in the last three, a range of serial interval
#' distributions are integrated over:
#' \itemize{
#' \item{In method "non_parametric_si" the user specifies the discrete
#' distribution of the serial interval}
#' \item{In method "parametric_si" the user specifies the mean and sd of the
#' serial interval}
#' \item{In method "uncertain_si" the mean and sd of the serial interval are
#' each drawn from truncated normal distributions, with parameters specified by
#' the user}
#' \item{In method "si_from_data", the serial interval distribution is directly
#' estimated, using MCMC, from interval censored exposure data, with data
#' provided by the user together with a choice of parametric distribution for
#' the serial interval}
#' \item{In method "si_from_sample", the user directly provides the sample of
#' serial interval distribution to use for estimation of R. This can be a useful
#' alternative to the previous method, where the MCMC estimation of the serial
#' interval distribution could be run once, and the same estimated SI
#' distribution then used in estimate_R in different contexts, e.g. with
#' different time windows, hence avoiding to rerun the MCMC everytime
#' estimate_R is called.}
#' }
#'
#' ----------------------- \code{method "non_parametric_si"} -------------------
#'
#' The discrete distribution of the serial interval is directly specified in the
#' argument \code{si_distr}.
#'
#' ----------------------- \code{method "parametric_si"} -----------------------
#'
#' The mean and standard deviation of the continuous distribution of the serial
#' interval are given in the arguments \code{mean_si} and \code{std_si}.
#' The discrete distribution of the serial interval is derived automatically
#' using \code{\link{discr_si}}.
#'
#' ----------------------- \code{method "uncertain_si"} -----------------------
#'
#' \code{Method "uncertain_si"} allows accounting for uncertainty on the serial
#' interval distribution as described in Cori et al. AJE 2013.
#' We allow the mean \eqn{\mu} and standard deviation \eqn{\sigma} of the serial
#' interval to vary according to truncated normal distributions.
#' We sample \code{n1} pairs of mean and standard deviations,
#' \eqn{(\mu^{(1)},\sigma^{(1)}),...,(\mu^{(n_2)},\sigma^{(n_2)})}, by first
#' sampling the mean \eqn{\mu^{(k)}}
#' from its truncated normal distribution (with mean \code{mean_si}, standard
#' deviation \code{std_mean_si}, minimum \code{min_mean_si} and maximum
#' \code{max_mean_si}),
#' and then sampling the standard deviation \eqn{\sigma^{(k)}} from its
#' truncated normal distribution
#' (with mean \code{std_si}, standard deviation \code{std_std_si}, minimum
#' \code{min_std_si} and maximum \code{max_std_si}), but imposing that
#' \eqn{\sigma^{(k)}<\mu^{(k)}}.
#' This constraint ensures that the Gamma probability density function of the
#' serial interval is null at \eqn{t=0}.
#' Warnings are produced when the truncated normal distributions are not
#' symmetric around the mean.
#' For each pair \eqn{(\mu^{(k)},\sigma^{(k)})}, we then draw a sample of size
#' \code{n2} in the posterior distribution of the reproduction number over each
#' time window, conditionally on this serial interval distribution.
#' After pooling, a sample of size \eqn{\code{n1}\times\code{n2}} of the joint
#' posterior distribution of the reproduction number over each time window is
#' obtained.
#' The posterior mean, standard deviation, and 0.025, 0.05, 0.25, 0.5, 0.75,
#' 0.95, 0.975 quantiles of the reproduction number for each time window are
#' obtained from this sample.
#'
#' ----------------------- \code{method "si_from_data"} -----------------------
#'
#' \code{Method "si_from_data"} allows accounting for uncertainty on the serial
#' interval distribution.
#' Unlike method "uncertain_si", where we arbitrarily vary the mean and std of
#' the SI in truncated normal distributions,
#' here, the scope of serial interval distributions considered is directly
#' informed by data
#' on the (potentially censored) dates of symptoms of pairs of infector/infected
#' individuals.
#' This data, specified in argument \code{si_data}, should be a dataframe with 5
#' columns:
#' \itemize{
#' \item{EL: the lower bound of the symptom onset date of the infector (given as
#' an integer)}
#' \item{ER: the upper bound of the symptom onset date of the infector (given as
#' an integer). Should be such that ER>=EL. If the dates are known exactly use
#' ER = EL}
#' \item{SL: the lower bound of the symptom onset date of the infected
#' individual (given as an integer)}
#' \item{SR: the upper bound of the symptom onset date of the infected
#' individual (given as an integer). Should be such that SR>=SL.
#' If the dates are known exactly use SR = SL}
#' \item{type (optional): can have entries 0, 1, or 2, corresponding to doubly
#' interval-censored, single interval-censored or exact observations,
#' respectively, see Reich et al. Statist. Med. 2009. If not specified, this
#' will be automatically computed from the dates}
#' }
#' Assuming a given parametric distribution for the serial interval distribution
#' (specified in si_parametric_distr),
#' the posterior distribution of the serial interval is estimated directly from
#' these data using MCMC methods implemented in the package
#' \code{coarsedatatools}.
#' The argument \code{mcmc_control} is a list of characteristics which control
#' the MCMC.
#' The MCMC is run for a total number of iterations of
#' \code{mcmc_control$burnin + n1*mcmc_control$thin};
#' but the output is only recorded after the burnin, and only 1 in every
#' \code{mcmc_control$thin} iterations,
#' so that the posterior sample size is \code{n1}.
#' For each element in the posterior sample of serial interval distribution,
#' we then draw a sample of size \code{n2} in the posterior distribution of the
#' reproduction number over each time window,
#' conditionally on this serial interval distribution.
#' After pooling, a sample of size \eqn{\code{n1}\times\code{n2}} of the joint
#' posterior distribution of
#' the reproduction number over each time window is obtained.
#' The posterior mean, standard deviation, and 0.025, 0.05, 0.25, 0.5, 0.75,
#' 0.95, 0.975 quantiles of the reproduction number for each time window are
#' obtained from this sample.
#'
#' ----------------------- \code{method "si_from_sample"} ----------------------
#'
#' \code{Method "si_from_sample"} also allows accounting for uncertainty on the
#' serial interval distribution.
#' Unlike methods "uncertain_si" and "si_from_data", the user directly provides
#' (in argument \code{si_sample}) a sample of serial interval distribution to be
#' explored.
#'
#'
#' @return An object of class \code{estimate_R_config} with components
#' t_start, t_end, n1, n2, mean_si, std_si,
#' std_mean_si, min_mean_si, max_mean_si, std_std_si, min_std_si, max_std_si,
#' si_distr, si_parametric_distr, mcmc_control, seed, mean_prior, std_prior,
#' cv_posterior, which can be used as an argument of function \code{estimate_R}.
#' @export
#'
#' @examples
#' \dontrun{
#' ## Note the following examples use an MCMC routine
#' ## to estimate the serial interval distribution from data,
#' ## so they may take a few minutes to run
#'
#' ## load data on rotavirus
#' data("MockRotavirus")
#'
#' ## estimate the reproduction number (method "si_from_data")
#' ## we are not specifying the time windows, so by defaults this will estimate
#' ## R on sliding weekly windows
#' incid <- MockRotavirus$incidence
#' method <- "si_from_data"
#' config <- make_config(incid = incid,
#' method = method,
#' list(si_parametric_distr = "G",
#' mcmc_control = make_mcmc_control(burnin = 1000,
#' thin = 10, seed = 1),
#' n1 = 500,
#' n2 = 50,
#' seed = 2))
#'
#' R_si_from_data <- estimate_R(incid,
#' method = method,
#' si_data = MockRotavirus$si_data,
#' config = config)
#' plot(R_si_from_data)
#'
#' ## you can also create the config straight within the estimate_R call,
#' ## in that case incid and method are automatically used from the estimate_R
#' ## arguments:
#' R_si_from_data <- estimate_R(incid,
#' method = method,
#' si_data = MockRotavirus$si_data,
#' config = make_config(
#' list(si_parametric_distr = "G",
#' mcmc_control = make_mcmc_control(burnin = 1000,
#' thin = 10, seed = 1),
#' n1 = 500,
#' n2 = 50,
#' seed = 2)))
#' plot(R_si_from_data)
#' }
make_config <- function(..., incid = NULL,
method = c("non_parametric_si", "parametric_si",
"uncertain_si", "si_from_data",
"si_from_sample")){
config <- list(...)
if (length(config) == 1L && is.list(config[[1]])) {
config <- config[[1]]
}
## SET DEFAULTS
defaults <- list(t_start = NULL,
t_end = NULL,
n1 = 500,
n2 = 50,
mean_si = NULL,
std_si = NULL,
std_mean_si = NULL,
min_mean_si = NULL,
max_mean_si = NULL,
std_std_si = NULL,
min_std_si = NULL,
max_std_si = NULL,
si_distr = NULL,
si_parametric_distr = NULL,
mcmc_control = make_mcmc_control(),
seed = NULL,
mean_prior = 5,
std_prior = 5,
cv_posterior = 0.3)
## MODIFY CONFIG WITH ARGUMENTS ##
config <- modify_defaults(defaults, config)
## checking and processing incid
if(!is.null(incid))
{
incid <- process_I(incid)
T <- nrow(incid)
## filling in / checking t_start and t_end
if(is.null(config$t_start) || is.null(config$t_end))
{
msg <- "Default config will estimate R on weekly sliding windows.
To change this change the t_start and t_end arguments. "
message(msg)
config$t_start <- seq(2, T-6)
config$t_end <- seq(8, T)
}else
{
check_times(config$t_start, config$t_end, T)
}
}
class(config) <- "estimate_R_config"
return(config)
}
| /R/make_config.R | no_license | mrc-ide/EpiEstim | R | false | false | 15,929 | r | #' Set and check parameter settings of estimate_R
#'
#' This function defines settings for estimate_R It takes a list of named
#' items as input, set defaults where arguments are
#' missing, and return a list of settings.
#'
#' @param ... Acceptable arguments for ... are:
#'
#' \describe{
#'
#' \item{t_start}{Vector of positive integers giving the starting times of each
#' window over which the reproduction number will be estimated. These must be in
#' ascending order, and so that for all \code{i}, \code{t_start[i]<=t_end[i]}.
#' t_start[1] should be strictly after the first day with non null incidence.}
#'
#' \item{t_end}{Vector of positive integers giving the ending times of each
#' window over which the reproduction number will be estimated. These must be
#' in ascending order, and so that for all \code{i},
#' \code{t_start[i]<=t_end[i]}.}
#'
#' \item{n1}{For method "uncertain_si" and "si_from_data"; positive integer
#' giving the size of the sample of SI distributions to be drawn (see details).}
#'
#' \item{n2}{For methods "uncertain_si", "si_from_data" and "si_from_sample";
#' positive integer giving the size of the sample drawn from the posterior
#' distribution of R for each serial interval distribution considered (see
#' details).}
#'
#' \item{mean_si}{For method "parametric_si" and "uncertain_si" ; positive real
#' giving the mean serial interval (method "parametric_si") or the average mean
#' serial interval (method "uncertain_si", see details).}
#'
#' \item{std_si}{For method "parametric_si" and "uncertain_si" ; non negative
#' real giving the standard deviation of the serial interval
#' (method "parametric_si") or the average standard deviation of the serial
#' interval (method "uncertain_si", see details).}
#'
#' \item{std_mean_si}{For method "uncertain_si" ; standard deviation of the
#' distribution from which mean serial intervals are drawn (see details).}
#'
#' \item{min_mean_si}{For method "uncertain_si" ; lower bound of the
#' distribution from which mean serial intervals are drawn (see details).}
#'
#' \item{max_mean_si}{For method "uncertain_si" ; upper bound of the
#' distribution from which mean serial intervals are drawn (see details).}
#'
#' \item{std_std_si}{For method "uncertain_si" ; standard deviation of the
#' distribution from which standard deviations of the serial interval are drawn
#' (see details).}
#'
#' \item{min_std_si}{For method "uncertain_si" ; lower bound of the distribution
#' from which standard deviations of the serial interval are drawn (see
#' details).}
#'
#' \item{max_std_si}{For method "uncertain_si" ; upper bound of the distribution
#' from which standard deviations of the serial interval are drawn (see
#' details).}
#'
#' \item{si_distr}{For method "non_parametric_si" ; vector of probabilities
#' giving the discrete distribution of the serial interval, starting with
#' \code{si_distr[1]} (probability that the serial interval is zero), which
#' should be zero.}
#'
#' \item{si_parametric_distr}{For method "si_from_data" ; the parametric
#' distribution to use when estimating the serial interval from data on dates of
#' symptoms of pairs of infector/infected individuals (see details).
#' Should be one of "G" (Gamma), "W" (Weibull), "L" (Lognormal), "off1G" (Gamma
#' shifted by 1), "off1W" (Weibull shifted by 1), or "off1L" (Lognormal shifted
#' by 1).}
#'
#' \item{mcmc_control}{An object of class \code{estimate_R_mcmc_control}, as
#' returned by function \code{make_mcmc_control}. }
#'
#' \item{seed}{An optional integer used as the seed for the random number
#' generator at the start of the function (then potentially reset within the
#' MCMC for method \code{si_from_data}); useful to get reproducible results.}
#'
#' \item{mean_prior}{A positive number giving the mean of the common prior
#' distribution for all reproduction numbers (see details).}
#'
#' \item{std_prior}{A positive number giving the standard deviation of the
#' common prior distribution for all reproduction numbers (see details).}
#'
#' \item{cv_posterior}{A positive number giving the aimed posterior coefficient
#' of variation (see details).}
#'
#' }
#' @param incid As in function\code{estimate_R}.
#' @param method As in function\code{estimate_R}.
#'
#' @details
#' Analytical estimates of the reproduction number for an epidemic over
#' predefined time windows can be obtained using function \code{estimate_R},
#' for a given discrete distribution of the serial interval. \code{make_config}
#' allows to generate a configuration specifying the way the estimation will
#' be performed.
#'
#' The more incident cases are observed over a time window, the smallest the
#' posterior coefficient of variation (CV, ratio of standard deviation over
#' mean) of the reproduction number.
#' An aimed CV can be specified in the argument \code{cv_posterior}
#' (default is \code{0.3}), and a warning will be produced if the incidence
#' within one of the time windows considered is too low to get this CV.
#'
#' The methods vary in the way the serial interval distribution is specified.
#'
#' In short there are five methods to specify the serial interval distribution
#' (see below for details on each method).
#' In the first two methods, a unique serial interval distribution is
#' considered, whereas in the last three, a range of serial interval
#' distributions are integrated over:
#' \itemize{
#' \item{In method "non_parametric_si" the user specifies the discrete
#' distribution of the serial interval}
#' \item{In method "parametric_si" the user specifies the mean and sd of the
#' serial interval}
#' \item{In method "uncertain_si" the mean and sd of the serial interval are
#' each drawn from truncated normal distributions, with parameters specified by
#' the user}
#' \item{In method "si_from_data", the serial interval distribution is directly
#' estimated, using MCMC, from interval censored exposure data, with data
#' provided by the user together with a choice of parametric distribution for
#' the serial interval}
#' \item{In method "si_from_sample", the user directly provides the sample of
#' serial interval distribution to use for estimation of R. This can be a useful
#' alternative to the previous method, where the MCMC estimation of the serial
#' interval distribution could be run once, and the same estimated SI
#' distribution then used in estimate_R in different contexts, e.g. with
#' different time windows, hence avoiding to rerun the MCMC everytime
#' estimate_R is called.}
#' }
#'
#' ----------------------- \code{method "non_parametric_si"} -------------------
#'
#' The discrete distribution of the serial interval is directly specified in the
#' argument \code{si_distr}.
#'
#' ----------------------- \code{method "parametric_si"} -----------------------
#'
#' The mean and standard deviation of the continuous distribution of the serial
#' interval are given in the arguments \code{mean_si} and \code{std_si}.
#' The discrete distribution of the serial interval is derived automatically
#' using \code{\link{discr_si}}.
#'
#' ----------------------- \code{method "uncertain_si"} -----------------------
#'
#' \code{Method "uncertain_si"} allows accounting for uncertainty on the serial
#' interval distribution as described in Cori et al. AJE 2013.
#' We allow the mean \eqn{\mu} and standard deviation \eqn{\sigma} of the serial
#' interval to vary according to truncated normal distributions.
#' We sample \code{n1} pairs of mean and standard deviations,
#' \eqn{(\mu^{(1)},\sigma^{(1)}),...,(\mu^{(n_2)},\sigma^{(n_2)})}, by first
#' sampling the mean \eqn{\mu^{(k)}}
#' from its truncated normal distribution (with mean \code{mean_si}, standard
#' deviation \code{std_mean_si}, minimum \code{min_mean_si} and maximum
#' \code{max_mean_si}),
#' and then sampling the standard deviation \eqn{\sigma^{(k)}} from its
#' truncated normal distribution
#' (with mean \code{std_si}, standard deviation \code{std_std_si}, minimum
#' \code{min_std_si} and maximum \code{max_std_si}), but imposing that
#' \eqn{\sigma^{(k)}<\mu^{(k)}}.
#' This constraint ensures that the Gamma probability density function of the
#' serial interval is null at \eqn{t=0}.
#' Warnings are produced when the truncated normal distributions are not
#' symmetric around the mean.
#' For each pair \eqn{(\mu^{(k)},\sigma^{(k)})}, we then draw a sample of size
#' \code{n2} in the posterior distribution of the reproduction number over each
#' time window, conditionally on this serial interval distribution.
#' After pooling, a sample of size \eqn{\code{n1}\times\code{n2}} of the joint
#' posterior distribution of the reproduction number over each time window is
#' obtained.
#' The posterior mean, standard deviation, and 0.025, 0.05, 0.25, 0.5, 0.75,
#' 0.95, 0.975 quantiles of the reproduction number for each time window are
#' obtained from this sample.
#'
#' ----------------------- \code{method "si_from_data"} -----------------------
#'
#' \code{Method "si_from_data"} allows accounting for uncertainty on the serial
#' interval distribution.
#' Unlike method "uncertain_si", where we arbitrarily vary the mean and std of
#' the SI in truncated normal distributions,
#' here, the scope of serial interval distributions considered is directly
#' informed by data
#' on the (potentially censored) dates of symptoms of pairs of infector/infected
#' individuals.
#' This data, specified in argument \code{si_data}, should be a dataframe with 5
#' columns:
#' \itemize{
#' \item{EL: the lower bound of the symptom onset date of the infector (given as
#' an integer)}
#' \item{ER: the upper bound of the symptom onset date of the infector (given as
#' an integer). Should be such that ER>=EL. If the dates are known exactly use
#' ER = EL}
#' \item{SL: the lower bound of the symptom onset date of the infected
#' individual (given as an integer)}
#' \item{SR: the upper bound of the symptom onset date of the infected
#' individual (given as an integer). Should be such that SR>=SL.
#' If the dates are known exactly use SR = SL}
#' \item{type (optional): can have entries 0, 1, or 2, corresponding to doubly
#' interval-censored, single interval-censored or exact observations,
#' respectively, see Reich et al. Statist. Med. 2009. If not specified, this
#' will be automatically computed from the dates}
#' }
#' Assuming a given parametric distribution for the serial interval distribution
#' (specified in si_parametric_distr),
#' the posterior distribution of the serial interval is estimated directly from
#' these data using MCMC methods implemented in the package
#' \code{coarsedatatools}.
#' The argument \code{mcmc_control} is a list of characteristics which control
#' the MCMC.
#' The MCMC is run for a total number of iterations of
#' \code{mcmc_control$burnin + n1*mcmc_control$thin};
#' but the output is only recorded after the burnin, and only 1 in every
#' \code{mcmc_control$thin} iterations,
#' so that the posterior sample size is \code{n1}.
#' For each element in the posterior sample of serial interval distribution,
#' we then draw a sample of size \code{n2} in the posterior distribution of the
#' reproduction number over each time window,
#' conditionally on this serial interval distribution.
#' After pooling, a sample of size \eqn{\code{n1}\times\code{n2}} of the joint
#' posterior distribution of
#' the reproduction number over each time window is obtained.
#' The posterior mean, standard deviation, and 0.025, 0.05, 0.25, 0.5, 0.75,
#' 0.95, 0.975 quantiles of the reproduction number for each time window are
#' obtained from this sample.
#'
#' ----------------------- \code{method "si_from_sample"} ----------------------
#'
#' \code{Method "si_from_sample"} also allows accounting for uncertainty on the
#' serial interval distribution.
#' Unlike methods "uncertain_si" and "si_from_data", the user directly provides
#' (in argument \code{si_sample}) a sample of serial interval distribution to be
#' explored.
#'
#'
#' @return An object of class \code{estimate_R_config} with components
#' t_start, t_end, n1, n2, mean_si, std_si,
#' std_mean_si, min_mean_si, max_mean_si, std_std_si, min_std_si, max_std_si,
#' si_distr, si_parametric_distr, mcmc_control, seed, mean_prior, std_prior,
#' cv_posterior, which can be used as an argument of function \code{estimate_R}.
#' @export
#'
#' @examples
#' \dontrun{
#' ## Note the following examples use an MCMC routine
#' ## to estimate the serial interval distribution from data,
#' ## so they may take a few minutes to run
#'
#' ## load data on rotavirus
#' data("MockRotavirus")
#'
#' ## estimate the reproduction number (method "si_from_data")
#' ## we are not specifying the time windows, so by defaults this will estimate
#' ## R on sliding weekly windows
#' incid <- MockRotavirus$incidence
#' method <- "si_from_data"
#' config <- make_config(incid = incid,
#' method = method,
#' list(si_parametric_distr = "G",
#' mcmc_control = make_mcmc_control(burnin = 1000,
#' thin = 10, seed = 1),
#' n1 = 500,
#' n2 = 50,
#' seed = 2))
#'
#' R_si_from_data <- estimate_R(incid,
#' method = method,
#' si_data = MockRotavirus$si_data,
#' config = config)
#' plot(R_si_from_data)
#'
#' ## you can also create the config straight within the estimate_R call,
#' ## in that case incid and method are automatically used from the estimate_R
#' ## arguments:
#' R_si_from_data <- estimate_R(incid,
#' method = method,
#' si_data = MockRotavirus$si_data,
#' config = make_config(
#' list(si_parametric_distr = "G",
#' mcmc_control = make_mcmc_control(burnin = 1000,
#' thin = 10, seed = 1),
#' n1 = 500,
#' n2 = 50,
#' seed = 2)))
#' plot(R_si_from_data)
#' }
make_config <- function(..., incid = NULL,
method = c("non_parametric_si", "parametric_si",
"uncertain_si", "si_from_data",
"si_from_sample")){
config <- list(...)
if (length(config) == 1L && is.list(config[[1]])) {
config <- config[[1]]
}
## SET DEFAULTS
defaults <- list(t_start = NULL,
t_end = NULL,
n1 = 500,
n2 = 50,
mean_si = NULL,
std_si = NULL,
std_mean_si = NULL,
min_mean_si = NULL,
max_mean_si = NULL,
std_std_si = NULL,
min_std_si = NULL,
max_std_si = NULL,
si_distr = NULL,
si_parametric_distr = NULL,
mcmc_control = make_mcmc_control(),
seed = NULL,
mean_prior = 5,
std_prior = 5,
cv_posterior = 0.3)
## MODIFY CONFIG WITH ARGUMENTS ##
config <- modify_defaults(defaults, config)
## checking and processing incid
if(!is.null(incid))
{
incid <- process_I(incid)
T <- nrow(incid)
## filling in / checking t_start and t_end
if(is.null(config$t_start) || is.null(config$t_end))
{
msg <- "Default config will estimate R on weekly sliding windows.
To change this change the t_start and t_end arguments. "
message(msg)
config$t_start <- seq(2, T-6)
config$t_end <- seq(8, T)
}else
{
check_times(config$t_start, config$t_end, T)
}
}
class(config) <- "estimate_R_config"
return(config)
}
|
#############################################
# Script to process MNgage data #
# from Minnesota Climate office #
# Kirk R. Wythers #
# 2017.01.12 #
#############################################
# Process MNgage precipitation data from one column
# per day wide format to long format
# clear and load data file
rm(list=ls())
library(tidyverse)
library(data.table)
library(splitstackshape)
library(lubridate)
mn_precip <- read_csv("~/projects/mngage/mnprecip1970-2016.csv")
# convert to data.table and add an index column for joining on
mnp <- as.data.table(mn_precip)
mnp.m1 <- melt(mnp, id.vars = c("county_township_range_section_station_owner",
"latitude",
"longitude",
"easting",
"northing",
"sponsor",
"time",
"yyyy_mo"
),
measure.vars = c("1",
"2",
"3",
"4",
"5",
"6",
"7",
"8",
"9",
"10",
"11",
"12",
"13",
"14",
"15",
"16",
"17",
"18",
"19",
"20",
"21",
"22",
"23",
"24",
"25",
"26",
"27",
"28",
"29",
"30",
"31"
)
)
mnp.m2 <- melt(mnp, id.vars = c("county_township_range_section_station_owner",
"latitude",
"longitude",
"easting",
"northing",
"sponsor",
"time",
"yyyy_mo"
),
measure.vars = c("f1",
"f2",
"f3",
"f4",
"f5",
"f6",
"f7",
"f8",
"f9",
"f10",
"f11",
"f12",
"f13",
"f14",
"f15",
"f16",
"f17",
"f18",
"f19",
"f20",
"f21",
"f22",
"f23",
"f24",
"f25",
"f26",
"f27",
"f28",
"f29",
"f30",
"f31"
)
)
# add an rownumber based index to each data table to perform the join on
mnp.m1[,index:=.GRP, by = row.names(mnp.m1)]
mnp.m2[,index:=.GRP, by = row.names(mnp.m2)]
# rename columns from first data table
names(mnp.m1) # view the column names
names(mnp.m1)[9]<-"day"
names(mnp.m1)[10]<-"precip"
# rename columns from second data table
names(mnp.m2) # view the column names
names(mnp.m2)[10]<-"flag"
# remove un-needed/duplicate columns from second data table
mnp.m2[ , c("county_township_range_section_station_owner", "latitude",
"longitude", "easting", "northing", "sponsor",
"time", "yyyy_mo", "variable") := NULL]
# do the join
mnp.m3 <- mnp.m1[mnp.m2, on = "index"]
# split "county township range section station" column on white space and rename
mnp.m4 <- cSplit(mnp.m3, "county_township_range_section_station_owner", sep = " ", direction = "wide", fixed = TRUE,
drop = TRUE, stripWhite = TRUE, makeEqual = NULL, type.convert = TRUE)
names(mnp.m4) # look at column names
names(mnp.m4)[12] <- "county"
names(mnp.m4)[13] <- "township"
names(mnp.m4)[14] <- "range"
names(mnp.m4)[15] <- "section"
names(mnp.m4)[16] <- "station"
names(mnp.m4)[17] <- "owner"
# drop column 18
mnp.m4[, 18] <- NULL
# add new column based on yyyy_mo, split into yyyy mo, concatinate yyyy, mo, and day
# into a date column, remove extraneous columns, and re-order
mnp.m4[, "year_month" := yyyy_mo]
mnp.m5 <- cSplit(mnp.m4, "year_month", sep = "_", direction = "wide", fixed = TRUE,
drop = TRUE, stripWhite = TRUE, makeEqual = NULL, type.convert = TRUE)
mnp.m5$date <- with(mnp.m5, ymd(sprintf('%04d%02d%02d', year_month_1, year_month_2, day)))
# mnp.m5[ , c("year_month_1", "year_month_2") := NULL]
names(mnp.m5)[18] <- "year"
names(mnp.m5)[19] <- "month"
mnp.m5$owner <- mnp.m5$owner %>% na.omit()
mnp.m6 <- unite(mnp.m5, "station_owner", station, owner, sep = " ")
mnp.m7 <- mnp.m6[, c(10, 19, 17:18, 8, 16, 1:2, 5:6, 9, 11)]
# write unique stations to an object with "distinct"
mnp.m8 <- distinct(mnp.m7, latitude, longitude, .keep_all = TRUE)
stations <- mnp.m8[, c(6:9)]
# change longitude sign
stations <- transmute(stations, station_owner = station_owner,
latitude = latitude,
longitude = 0 - longitude,
sponsor = sponsor)
# write output to .csv
write_csv(mnp.m7,"~/projects/mngage/mnprecipfinal1970-2016.csv")
write_csv(stations, "~/projects/mngage/stations.csv")
| /MNgage.r | no_license | kwythers/climate | R | false | false | 8,543 | r | #############################################
# Script to process MNgage data #
# from Minnesota Climate office #
# Kirk R. Wythers #
# 2017.01.12 #
#############################################
# Process MNgage precipitation data from one column
# per day wide format to long format
# clear and load data file
rm(list=ls())
library(tidyverse)
library(data.table)
library(splitstackshape)
library(lubridate)
mn_precip <- read_csv("~/projects/mngage/mnprecip1970-2016.csv")
# convert to data.table and add an index column for joining on
mnp <- as.data.table(mn_precip)
mnp.m1 <- melt(mnp, id.vars = c("county_township_range_section_station_owner",
"latitude",
"longitude",
"easting",
"northing",
"sponsor",
"time",
"yyyy_mo"
),
measure.vars = c("1",
"2",
"3",
"4",
"5",
"6",
"7",
"8",
"9",
"10",
"11",
"12",
"13",
"14",
"15",
"16",
"17",
"18",
"19",
"20",
"21",
"22",
"23",
"24",
"25",
"26",
"27",
"28",
"29",
"30",
"31"
)
)
mnp.m2 <- melt(mnp, id.vars = c("county_township_range_section_station_owner",
"latitude",
"longitude",
"easting",
"northing",
"sponsor",
"time",
"yyyy_mo"
),
measure.vars = c("f1",
"f2",
"f3",
"f4",
"f5",
"f6",
"f7",
"f8",
"f9",
"f10",
"f11",
"f12",
"f13",
"f14",
"f15",
"f16",
"f17",
"f18",
"f19",
"f20",
"f21",
"f22",
"f23",
"f24",
"f25",
"f26",
"f27",
"f28",
"f29",
"f30",
"f31"
)
)
# add an rownumber based index to each data table to perform the join on
mnp.m1[,index:=.GRP, by = row.names(mnp.m1)]
mnp.m2[,index:=.GRP, by = row.names(mnp.m2)]
# rename columns from first data table
names(mnp.m1) # view the column names
names(mnp.m1)[9]<-"day"
names(mnp.m1)[10]<-"precip"
# rename columns from second data table
names(mnp.m2) # view the column names
names(mnp.m2)[10]<-"flag"
# remove un-needed/duplicate columns from second data table
mnp.m2[ , c("county_township_range_section_station_owner", "latitude",
"longitude", "easting", "northing", "sponsor",
"time", "yyyy_mo", "variable") := NULL]
# do the join
mnp.m3 <- mnp.m1[mnp.m2, on = "index"]
# split "county township range section station" column on white space and rename
mnp.m4 <- cSplit(mnp.m3, "county_township_range_section_station_owner", sep = " ", direction = "wide", fixed = TRUE,
drop = TRUE, stripWhite = TRUE, makeEqual = NULL, type.convert = TRUE)
names(mnp.m4) # look at column names
names(mnp.m4)[12] <- "county"
names(mnp.m4)[13] <- "township"
names(mnp.m4)[14] <- "range"
names(mnp.m4)[15] <- "section"
names(mnp.m4)[16] <- "station"
names(mnp.m4)[17] <- "owner"
# drop column 18
mnp.m4[, 18] <- NULL
# add new column based on yyyy_mo, split into yyyy mo, concatinate yyyy, mo, and day
# into a date column, remove extraneous columns, and re-order
mnp.m4[, "year_month" := yyyy_mo]
mnp.m5 <- cSplit(mnp.m4, "year_month", sep = "_", direction = "wide", fixed = TRUE,
drop = TRUE, stripWhite = TRUE, makeEqual = NULL, type.convert = TRUE)
mnp.m5$date <- with(mnp.m5, ymd(sprintf('%04d%02d%02d', year_month_1, year_month_2, day)))
# mnp.m5[ , c("year_month_1", "year_month_2") := NULL]
names(mnp.m5)[18] <- "year"
names(mnp.m5)[19] <- "month"
mnp.m5$owner <- mnp.m5$owner %>% na.omit()
mnp.m6 <- unite(mnp.m5, "station_owner", station, owner, sep = " ")
mnp.m7 <- mnp.m6[, c(10, 19, 17:18, 8, 16, 1:2, 5:6, 9, 11)]
# write unique stations to an object with "distinct"
mnp.m8 <- distinct(mnp.m7, latitude, longitude, .keep_all = TRUE)
stations <- mnp.m8[, c(6:9)]
# change longitude sign
stations <- transmute(stations, station_owner = station_owner,
latitude = latitude,
longitude = 0 - longitude,
sponsor = sponsor)
# write output to .csv
write_csv(mnp.m7,"~/projects/mngage/mnprecipfinal1970-2016.csv")
write_csv(stations, "~/projects/mngage/stations.csv")
|
\name{plotCat}
\alias{plotCat}
\title{Plotting correspondence at the top curves}
\description{
This function plots corresponding at the top (CAT) curves
using overlap proportions computed by \code{computeCat}.
A number of arguments can be used for a pretty display, and for
annotating the plot, and adding the legend
}
\usage{
plotCat(catData, whichToPlot = 1:length(catData),
preComputedPI, size=500, main="CAT-plot",
minYlim=0, maxYlim=1, col, pch, lty, cex=1, lwd=1,
spacePts=10, cexPts=1, legend=TRUE, legendText,
where="center", legCex=1,
plotLayout=layout(matrix(1:2, ncol = 2, byrow = TRUE), widths = c(0.7, 0.3)), ...)
}
\arguments{
\item{catData}{The ouput list obtained from \code{computeCat},
containing the overlapping proportions among pairs of
ordered vectors. Names in \code{catData} are used for
annotating the legend if \code{legendText} is not provided
(see below).}
\item{whichToPlot}{numeric vector. Indexes corresponding
to the elements of \code{catData} to be selected for
displaying in the plot.}
\item{preComputedPI}{numeric matrix. Probability intervals
computed using the \code{calcHypPI} function.
It is used to add grey shades to the plot corresponding
to overlapping proportion probabilities based on the
hypegeomtric distribution. If missing no PI will be
added to the plot.}
\item{size}{numeric. The number of top ranking features
to be displayed in the plot.}
\item{main}{character. The title of the plot, if not provided,
\code{main} default is "CAT-plot".}
\item{minYlim}{numeric. The lower numeric value of the y axis,
to be displayed in the plot.}
\item{maxYlim}{numeric. The upper numeric value of the y axis,
to be displayed in the plot.}
\item{col}{character or numeric. Vector specifying colors
for CAT curves plotting. \code{col} default uses
\code{rainbow} function to generate a color vector
for all CAT curves in \code{catData}.
When provided by the user, it will be recycled if needed.}
\item{pch}{graphical parameter. \code{pch} specifies point
types for annotating the CAT curves.
If not provided, \code{pch} is created by default, and recycled
if needed. See \code{par} for details.}
\item{lty}{graphical parameter. The type of line for the plot.
If not provided generated by default, recycled id needed.
See \code{par} if needed.}
\item{cex}{numeric. Standard graphical parameter useful
for controlling axes and title annotation size.
See \code{par}.}
\item{lwd}{numeric. Standard graphical parameter useful
for controlling line size. See \code{par}.}
\item{spacePts}{numeric. Specifies the interval to be used for
adding point labels on the CAT curves (evenly spaced along
the x axis dimention).}
\item{cexPts}{numeric. Graphical parameter useful for controlling
points size used for annotating CAT-plot lines.}
\item{legend}{logical. Wheter a legend should be added to the plot.}
\item{legendText}{character. A vector used for legend creation.
\code{legendText} default correspond to \code{catData} names.}
\item{where}{character. The position of the plot where the legend
will be created; \code{where} default is \code{'center'},
see \code{legend}.}
\item{legCex}{numeric. Graphical parameter setting
the font size for the legend text.}
\item{plotLayout}{A layout matrix to arrange the plot
and the legend. For further details see \code{layout}.}
\item{\dots}{Other graphical parameters, currently passed
only to \code{legend} (e.g. the number of columns to be used
in the legend, or the legend background).}
}
\details{
This function uses outputs from \code{computeCat}
and \code{calcHypPI} to plot the CAT curves and
add grey shades corresponding to probability intervals.
The default plot uses a pre-specified layout
with separate areas for the plot and the legend.
If not specified by the user, different points, colors and line
types are used for the different CAT curves.
If the CAT curves where computed using equal ranks
(e.g. "equalRank" was passed to the \code{method}
argument of the \code{computeCat} function),
the user has the option of adding probability intervals
to the plot. Such intervals must be pre-computed
using the \code{calcHypPI} function.
}
\value{
Produces an annotated CAT plot.
}
\seealso{
See \code{\link{computeCat}}, \code{\link{calcHypPI}},
\code{\link[grDevices]{rainbow}}, \code{\link[graphics]{par}},
\code{\link[graphics]{legend}}, and \code{\link[graphics]{layout}}.
}
\note{
In order to make the "best looking" plot for your needs
you must play around with graphical parameters
}
\references{
Irizarry, R. A.; Warren, D.; Spencer, F.; Kim, I. F.; Biswal, S.;
Frank, B. C.; Gabrielson, E.; Garcia, J. G. N.; Geoghegan, J.;
Germino, G.; Griffin, C.; Hilmer, S. C.; Hoffman, E.;
Jedlicka, A. E.; Kawasaki, E.; Martinez-Murillo, F.;
Morsberger, L.; Lee, H.; Petersen, D.; Quackenbush, J.;
Scott, A.; Wilson, M.; Yang, Y.; Ye, S. Q.
and Yu, W. Multiple-laboratory comparison of microarray platforms.
Nat Methods, 2005, 2, 345-350
Ross, A. E.; Marchionni, L.; Vuica-Ross, M.; Cheadle, C.;
Fan, J.; Berman, D. M.; and Schaeffer E. M.
Gene Expression Pathways of High Grade Localized Prostate Cancer.
Prostate, 2011, 71, 1568-1578
Benassi, B.; Flavin, R.; Marchionni, L.; Zanata, S.; Pan, Y.;
Chowdhury, D.; Marani, M.; Strano, S.; Muti, P.; and Blandino, G.
c-Myc is activated via USP2a-mediated modulation of microRNAs
in prostate cancer. Cancer Discovery, 2012, March, 2, 236-247
}
\author{ Luigi Marchionni \email{marchion@jhu.edu}}
\examples{
###load data
data(matchBoxExpression)
###the column name for the identifiers and the ranking statistics
idCol <- "SYMBOL"
byCol <- "t"
####filter the redundant features using SYMBOL and t-statistics
matchBoxExpression <- lapply(matchBoxExpression, filterRedundant, idCol=idCol, byCol=byCol)
###select and merge into a matrix
mat <- mergeData(matchBoxExpression, idCol=idCol, byCol=byCol)
###COMPUTE CAT
cpH2L <- computeCat(mat, idCol=1, size=round(nrow(mat)/1),
decreasing=TRUE, method="equalRank")
###CATplot without probability intervals
par(mar=c(3,3,2,1))
plotCat(cpH2L, main="CAT-plot, decreasing t-statistics",
cex=1, lwd=2, cexPts=1.5, spacePts=15,
legend=TRUE, where="center",
legCex=1, ncol=1)
###compute probability intervals
confInt <- calcHypPI(data=mat)
###CATplot with probability intervals
par(mar=c(3,3,2,1))
plotCat(cpH2L, main="CAT-plot, decreasing t-statistics, probability intevals",
cex=1, lwd=2, cexPts=1.5, spacePts=15,
legend=TRUE, where="center",
legCex=1, ncol=1)
}
\keyword{ manip}
| /man/plotCat.Rd | no_license | c1au6i0/matchBox | R | false | false | 6,777 | rd | \name{plotCat}
\alias{plotCat}
\title{Plotting correspondence at the top curves}
\description{
This function plots corresponding at the top (CAT) curves
using overlap proportions computed by \code{computeCat}.
A number of arguments can be used for a pretty display, and for
annotating the plot, and adding the legend
}
\usage{
plotCat(catData, whichToPlot = 1:length(catData),
preComputedPI, size=500, main="CAT-plot",
minYlim=0, maxYlim=1, col, pch, lty, cex=1, lwd=1,
spacePts=10, cexPts=1, legend=TRUE, legendText,
where="center", legCex=1,
plotLayout=layout(matrix(1:2, ncol = 2, byrow = TRUE), widths = c(0.7, 0.3)), ...)
}
\arguments{
\item{catData}{The ouput list obtained from \code{computeCat},
containing the overlapping proportions among pairs of
ordered vectors. Names in \code{catData} are used for
annotating the legend if \code{legendText} is not provided
(see below).}
\item{whichToPlot}{numeric vector. Indexes corresponding
to the elements of \code{catData} to be selected for
displaying in the plot.}
\item{preComputedPI}{numeric matrix. Probability intervals
computed using the \code{calcHypPI} function.
It is used to add grey shades to the plot corresponding
to overlapping proportion probabilities based on the
hypegeomtric distribution. If missing no PI will be
added to the plot.}
\item{size}{numeric. The number of top ranking features
to be displayed in the plot.}
\item{main}{character. The title of the plot, if not provided,
\code{main} default is "CAT-plot".}
\item{minYlim}{numeric. The lower numeric value of the y axis,
to be displayed in the plot.}
\item{maxYlim}{numeric. The upper numeric value of the y axis,
to be displayed in the plot.}
\item{col}{character or numeric. Vector specifying colors
for CAT curves plotting. \code{col} default uses
\code{rainbow} function to generate a color vector
for all CAT curves in \code{catData}.
When provided by the user, it will be recycled if needed.}
\item{pch}{graphical parameter. \code{pch} specifies point
types for annotating the CAT curves.
If not provided, \code{pch} is created by default, and recycled
if needed. See \code{par} for details.}
\item{lty}{graphical parameter. The type of line for the plot.
If not provided generated by default, recycled id needed.
See \code{par} if needed.}
\item{cex}{numeric. Standard graphical parameter useful
for controlling axes and title annotation size.
See \code{par}.}
\item{lwd}{numeric. Standard graphical parameter useful
for controlling line size. See \code{par}.}
\item{spacePts}{numeric. Specifies the interval to be used for
adding point labels on the CAT curves (evenly spaced along
the x axis dimention).}
\item{cexPts}{numeric. Graphical parameter useful for controlling
points size used for annotating CAT-plot lines.}
\item{legend}{logical. Wheter a legend should be added to the plot.}
\item{legendText}{character. A vector used for legend creation.
\code{legendText} default correspond to \code{catData} names.}
\item{where}{character. The position of the plot where the legend
will be created; \code{where} default is \code{'center'},
see \code{legend}.}
\item{legCex}{numeric. Graphical parameter setting
the font size for the legend text.}
\item{plotLayout}{A layout matrix to arrange the plot
and the legend. For further details see \code{layout}.}
\item{\dots}{Other graphical parameters, currently passed
only to \code{legend} (e.g. the number of columns to be used
in the legend, or the legend background).}
}
\details{
This function uses outputs from \code{computeCat}
and \code{calcHypPI} to plot the CAT curves and
add grey shades corresponding to probability intervals.
The default plot uses a pre-specified layout
with separate areas for the plot and the legend.
If not specified by the user, different points, colors and line
types are used for the different CAT curves.
If the CAT curves where computed using equal ranks
(e.g. "equalRank" was passed to the \code{method}
argument of the \code{computeCat} function),
the user has the option of adding probability intervals
to the plot. Such intervals must be pre-computed
using the \code{calcHypPI} function.
}
\value{
Produces an annotated CAT plot.
}
\seealso{
See \code{\link{computeCat}}, \code{\link{calcHypPI}},
\code{\link[grDevices]{rainbow}}, \code{\link[graphics]{par}},
\code{\link[graphics]{legend}}, and \code{\link[graphics]{layout}}.
}
\note{
In order to make the "best looking" plot for your needs
you must play around with graphical parameters
}
\references{
Irizarry, R. A.; Warren, D.; Spencer, F.; Kim, I. F.; Biswal, S.;
Frank, B. C.; Gabrielson, E.; Garcia, J. G. N.; Geoghegan, J.;
Germino, G.; Griffin, C.; Hilmer, S. C.; Hoffman, E.;
Jedlicka, A. E.; Kawasaki, E.; Martinez-Murillo, F.;
Morsberger, L.; Lee, H.; Petersen, D.; Quackenbush, J.;
Scott, A.; Wilson, M.; Yang, Y.; Ye, S. Q.
and Yu, W. Multiple-laboratory comparison of microarray platforms.
Nat Methods, 2005, 2, 345-350
Ross, A. E.; Marchionni, L.; Vuica-Ross, M.; Cheadle, C.;
Fan, J.; Berman, D. M.; and Schaeffer E. M.
Gene Expression Pathways of High Grade Localized Prostate Cancer.
Prostate, 2011, 71, 1568-1578
Benassi, B.; Flavin, R.; Marchionni, L.; Zanata, S.; Pan, Y.;
Chowdhury, D.; Marani, M.; Strano, S.; Muti, P.; and Blandino, G.
c-Myc is activated via USP2a-mediated modulation of microRNAs
in prostate cancer. Cancer Discovery, 2012, March, 2, 236-247
}
\author{ Luigi Marchionni \email{marchion@jhu.edu}}
\examples{
###load data
data(matchBoxExpression)
###the column name for the identifiers and the ranking statistics
idCol <- "SYMBOL"
byCol <- "t"
####filter the redundant features using SYMBOL and t-statistics
matchBoxExpression <- lapply(matchBoxExpression, filterRedundant, idCol=idCol, byCol=byCol)
###select and merge into a matrix
mat <- mergeData(matchBoxExpression, idCol=idCol, byCol=byCol)
###COMPUTE CAT
cpH2L <- computeCat(mat, idCol=1, size=round(nrow(mat)/1),
decreasing=TRUE, method="equalRank")
###CATplot without probability intervals
par(mar=c(3,3,2,1))
plotCat(cpH2L, main="CAT-plot, decreasing t-statistics",
cex=1, lwd=2, cexPts=1.5, spacePts=15,
legend=TRUE, where="center",
legCex=1, ncol=1)
###compute probability intervals
confInt <- calcHypPI(data=mat)
###CATplot with probability intervals
par(mar=c(3,3,2,1))
plotCat(cpH2L, main="CAT-plot, decreasing t-statistics, probability intevals",
cex=1, lwd=2, cexPts=1.5, spacePts=15,
legend=TRUE, where="center",
legCex=1, ncol=1)
}
\keyword{ manip}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/remix.r
\name{cross_one}
\alias{cross_one}
\title{Cross one x and one y}
\usage{
cross_one(x, y = NULL, funs = NULL, ..., margin = 0:2,
total = FALSE, digits = 2, showNA = c("no", "ifany", "always"),
method = c("pearson", "kendall", "spearman"), times = NULL,
followup = FALSE, test = FALSE, test.tabular = test.tabular.auto,
test.summarize = test.summarize.auto,
test.survival = test.survival.logrank, show.test = display.test,
plim = 4, show.method = TRUE, effect = FALSE,
effect.summarize = effect.diff.mean.auto,
effect.tabular = effect.or.row.by.col,
effect.survival = effect.survival.coxph, conf.level = 0.95,
label = FALSE)
}
\arguments{
\item{x}{x}
\item{y}{y}
\item{funs}{funs}
\item{...}{\dots}
\item{margin}{margin}
\item{total}{total}
\item{digits}{digits}
\item{showNA}{showNA}
\item{method}{method}
\item{times}{times}
\item{followup}{followup}
\item{test}{test}
\item{test.tabular}{test.tabular}
\item{test.summarize}{test.summarize}
\item{test.survival}{test.survival}
\item{show.test}{show.test}
\item{plim}{plim}
\item{show.method}{show.method}
\item{effect}{effect}
\item{effect.summarize}{effect.summarize}
\item{effect.tabular}{effect.tabular}
\item{effect.survival}{effect.survival}
\item{conf.level}{conf.level}
\item{label}{label}
}
\description{
Cross one x and one y
}
\author{
David Hajage
}
\keyword{internal}
| /man/cross_one.Rd | no_license | eusebe/biostat2 | R | false | true | 1,459 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/remix.r
\name{cross_one}
\alias{cross_one}
\title{Cross one x and one y}
\usage{
cross_one(x, y = NULL, funs = NULL, ..., margin = 0:2,
total = FALSE, digits = 2, showNA = c("no", "ifany", "always"),
method = c("pearson", "kendall", "spearman"), times = NULL,
followup = FALSE, test = FALSE, test.tabular = test.tabular.auto,
test.summarize = test.summarize.auto,
test.survival = test.survival.logrank, show.test = display.test,
plim = 4, show.method = TRUE, effect = FALSE,
effect.summarize = effect.diff.mean.auto,
effect.tabular = effect.or.row.by.col,
effect.survival = effect.survival.coxph, conf.level = 0.95,
label = FALSE)
}
\arguments{
\item{x}{x}
\item{y}{y}
\item{funs}{funs}
\item{...}{\dots}
\item{margin}{margin}
\item{total}{total}
\item{digits}{digits}
\item{showNA}{showNA}
\item{method}{method}
\item{times}{times}
\item{followup}{followup}
\item{test}{test}
\item{test.tabular}{test.tabular}
\item{test.summarize}{test.summarize}
\item{test.survival}{test.survival}
\item{show.test}{show.test}
\item{plim}{plim}
\item{show.method}{show.method}
\item{effect}{effect}
\item{effect.summarize}{effect.summarize}
\item{effect.tabular}{effect.tabular}
\item{effect.survival}{effect.survival}
\item{conf.level}{conf.level}
\item{label}{label}
}
\description{
Cross one x and one y
}
\author{
David Hajage
}
\keyword{internal}
|
##simulate CB data
source('paramsCB.R')
source("CBsimulator.R")
sim <- simCB(beta=beta,N=N,effprop=effprop,i0=i0,reporting=reporting,
numobs=numobs,seed=seed)
sim
data <- list(obs=sim$Iobs,
N=N,
i0=i0,
numobs=nrow(sim),
zerohack=zerohack)
##initial values -----
inits <- list(list(I = sim$I,
effprop=effprop,
beta = beta,
N0=N0,
reporting = reporting))
params = c('beta',
'effprop',
'reporting')
save.image(file="simdata.RData") | /simulateCB.R | no_license | morgankain/NIMBLE-Pres | R | false | false | 599 | r | ##simulate CB data
source('paramsCB.R')
source("CBsimulator.R")
sim <- simCB(beta=beta,N=N,effprop=effprop,i0=i0,reporting=reporting,
numobs=numobs,seed=seed)
sim
data <- list(obs=sim$Iobs,
N=N,
i0=i0,
numobs=nrow(sim),
zerohack=zerohack)
##initial values -----
inits <- list(list(I = sim$I,
effprop=effprop,
beta = beta,
N0=N0,
reporting = reporting))
params = c('beta',
'effprop',
'reporting')
save.image(file="simdata.RData") |
\name{SegSpatial-class}
\docType{class}
\alias{SegSpatial-class}
\alias{coerce,SegSpatial,SpatialPoints-method}
\alias{coerce,SegSpatial,SpatialPointsDataFrame-method}
\alias{coerce,SegSpatial,SpatialPixelsDataFrame-method}
\alias{coerce,SegSpatial,list-method}
\alias{as.list.SegSpatial-method}
\alias{show,SegSpatial-method}
\alias{print.SegSpatial-method}
\alias{spplot,SegSpatial-method}
\title{Class SegSpatial}
\description{A class to hold results from \code{\link{spatseg}}.}
\section{Objects from the Class}{
Objects can be created by calls to \code{\link{spseg}}, or the constructor \code{\link{SegSpatial}}.
}
\section{Slots}{
\describe{
\item{d}{an object of class \code{numeric} containing the spatial dissimilarity index value.}
\item{r}{an object of class \code{numeric} containing the spatial diversity index value.}
\item{h}{an object of class \code{numeric} containing the spatial information theory index value.}
\item{p}{an object of class \code{matrix} that has the spatial exposure/isolation of all population groups.}
\item{coords, data, env, proj4string}{see \code{\link{SegLocal-class}}.}
}
}
\section{Methods}{
\describe{
\item{coerce}{\code{signature(from = "SegSpatial", to = "SpatialPoints")}: coerce an object of class \code{SegSpatial} to an object of class \code{SpatialPoints}. The points have no attribute data.}
\item{coerce}{\code{signature(from = "SegSpatial", to = "SpatialPointsDataFrame")}: coerce an object of class \code{SegSpatial} to an object of class \code{SpatialPointsDataFrame}. The values in the slot \sQuote{data} will be used as the attribute data.}
\item{coerce}{\code{signature(from = "SegSpatial", to = "SpatialPixelsDataFrame")}: coerce an object of class \code{SegSpatial} to an object of class \code{SpatialPixelsDataFrame}. The values in the slot \sQuote{data} will be used as the attribute data. May not work when the points are irregularly spaced.}
\item{coerce}{\code{signature(from = "SegSpatial", to = "list")}: retrieve the segregation index values and return it as a \code{list} object.}
\item{as.list}{\code{signature(x = "SegSpatial")}: same as the above.}
\item{show}{\code{signature(object = "SegSpatial")}: show the segregation index values.}
\item{print}{\code{signature(x = "SegSpatial")}: same as \code{show}.}
\item{spplot}{\code{signature(obj = "SegSpatial")}: coerce an object of class \code{SegSpatial} to an object of class \code{SpatialPixelsDataFrame} or \code{SpatialPointsDataFrame} and display it. See \code{help(spplot)} for more details about the graphical parameter arguments.}
}
}
\section{Extends}{
\code{\link{SegLocal-class}}.
}
\author{Seong-Yun Hong}
\seealso{
\code{\link{SegSpatial}}, \code{\link{spseg}}
}
\examples{
# creates 100 regularly-spaced data points and 3 population groups
xy <- expand.grid(1:10, 1:10)
colnames(xy) <- c("x", "y")
pop <- matrix(runif(300), ncol = 3)
env <- matrix(runif(300), ncol = 3)
colnames(pop) <- LETTERS[1:3]
colnames(env) <- LETTERS[4:6]
# constructs an object of class 'SegSpatial'
v <- SegSpatial(d = numeric(), r = numeric(), h = numeric(),
p = matrix(0, 0, 0),
coords = as.matrix(xy), data = pop, env = env)
print(v)
# changes the spatial dissimilarity index value
slot(v, "d") <- runif(1)
# retrieves the index values
as.list(v)
# displays the values in the slot 'data'
spplot(v, col.regions = heat.colors(20))
# displays the values in the slot 'env'
w <- as(v, "SegLocal")
spplot(w, col.regions = heat.colors(20))
}
\keyword{classes}
| /man/SegSpatial-class.Rd | no_license | syunhong/seg | R | false | false | 3,645 | rd | \name{SegSpatial-class}
\docType{class}
\alias{SegSpatial-class}
\alias{coerce,SegSpatial,SpatialPoints-method}
\alias{coerce,SegSpatial,SpatialPointsDataFrame-method}
\alias{coerce,SegSpatial,SpatialPixelsDataFrame-method}
\alias{coerce,SegSpatial,list-method}
\alias{as.list.SegSpatial-method}
\alias{show,SegSpatial-method}
\alias{print.SegSpatial-method}
\alias{spplot,SegSpatial-method}
\title{Class SegSpatial}
\description{A class to hold results from \code{\link{spatseg}}.}
\section{Objects from the Class}{
Objects can be created by calls to \code{\link{spseg}}, or the constructor \code{\link{SegSpatial}}.
}
\section{Slots}{
\describe{
\item{d}{an object of class \code{numeric} containing the spatial dissimilarity index value.}
\item{r}{an object of class \code{numeric} containing the spatial diversity index value.}
\item{h}{an object of class \code{numeric} containing the spatial information theory index value.}
\item{p}{an object of class \code{matrix} that has the spatial exposure/isolation of all population groups.}
\item{coords, data, env, proj4string}{see \code{\link{SegLocal-class}}.}
}
}
\section{Methods}{
\describe{
\item{coerce}{\code{signature(from = "SegSpatial", to = "SpatialPoints")}: coerce an object of class \code{SegSpatial} to an object of class \code{SpatialPoints}. The points have no attribute data.}
\item{coerce}{\code{signature(from = "SegSpatial", to = "SpatialPointsDataFrame")}: coerce an object of class \code{SegSpatial} to an object of class \code{SpatialPointsDataFrame}. The values in the slot \sQuote{data} will be used as the attribute data.}
\item{coerce}{\code{signature(from = "SegSpatial", to = "SpatialPixelsDataFrame")}: coerce an object of class \code{SegSpatial} to an object of class \code{SpatialPixelsDataFrame}. The values in the slot \sQuote{data} will be used as the attribute data. May not work when the points are irregularly spaced.}
\item{coerce}{\code{signature(from = "SegSpatial", to = "list")}: retrieve the segregation index values and return it as a \code{list} object.}
\item{as.list}{\code{signature(x = "SegSpatial")}: same as the above.}
\item{show}{\code{signature(object = "SegSpatial")}: show the segregation index values.}
\item{print}{\code{signature(x = "SegSpatial")}: same as \code{show}.}
\item{spplot}{\code{signature(obj = "SegSpatial")}: coerce an object of class \code{SegSpatial} to an object of class \code{SpatialPixelsDataFrame} or \code{SpatialPointsDataFrame} and display it. See \code{help(spplot)} for more details about the graphical parameter arguments.}
}
}
\section{Extends}{
\code{\link{SegLocal-class}}.
}
\author{Seong-Yun Hong}
\seealso{
\code{\link{SegSpatial}}, \code{\link{spseg}}
}
\examples{
# creates 100 regularly-spaced data points and 3 population groups
xy <- expand.grid(1:10, 1:10)
colnames(xy) <- c("x", "y")
pop <- matrix(runif(300), ncol = 3)
env <- matrix(runif(300), ncol = 3)
colnames(pop) <- LETTERS[1:3]
colnames(env) <- LETTERS[4:6]
# constructs an object of class 'SegSpatial'
v <- SegSpatial(d = numeric(), r = numeric(), h = numeric(),
p = matrix(0, 0, 0),
coords = as.matrix(xy), data = pop, env = env)
print(v)
# changes the spatial dissimilarity index value
slot(v, "d") <- runif(1)
# retrieves the index values
as.list(v)
# displays the values in the slot 'data'
spplot(v, col.regions = heat.colors(20))
# displays the values in the slot 'env'
w <- as(v, "SegLocal")
spplot(w, col.regions = heat.colors(20))
}
\keyword{classes}
|
## ----options, echo=FALSE, warning=FALSE, message=FALSE----------------------------------------------------------------
options(width=120)
opts_chunk$set(comment=NA,
fig.width=6,
fig.height=4.4,
size='tiny',
out.width='\\textwidth',
fig.align='center',
echo=FALSE,
message=FALSE)
## ----libraries, message=FALSE, warning=FALSE, echo=FALSE--------------------------------------------------------------
library("tidyverse")
library("Sleuth3")
## ----set_seed, echo=FALSE---------------------------------------------------------------------------------------------
set.seed(2)
## ----echo=FALSE-------------------------------------------------------------------------------------------------------
set.seed(20190410)
d <- expand.grid(x = seq(-5, 5, length=101),
mean = round(sort(rnorm(6)),2)) %>%
mutate(density = dnorm(x, mean),
mean = paste0("mean = ", mean))
ggplot(d, aes(x, density, color=mean, linetype=mean)) +
geom_line() +
theme_bw()
## ----echo=FALSE-------------------------------------------------------------------------------------------------------
ggplot(Sleuth3::case0501, aes(x=Diet, y=Lifetime)) +
geom_jitter() +
# geom_hline(data=sm2, aes(yintercept=mean[7]),
# col='red', size=2) +
# geom_errorbar(data=sm, aes(y=mean, ymin=mean, ymax=mean),
# col='blue', size=2) +
theme_bw()
## ----F-distribution, echo=FALSE, fig.height = 3.5---------------------------------------------------------------------
df = c(5,300)
ggplot(data.frame(x = c(0,4)), aes(x=x)) +
stat_function(fun = "df", args = list(df1 = df[1], df2 = df[2]),
xlim = c(2,4), geom = "area", fill = "magenta") +
stat_function(fun = "df", args = list(df1 = df[1], df2 = df[2])) +
labs(title = paste0("F(",df[1],", ",df[2],")"),
x = "F", y = "density") +
theme_bw()
## ----echo=FALSE, warning=FALSE----------------------------------------------------------------------------------------
sm <- Sleuth3::case0501 %>%
group_by(Diet) %>%
summarize(n = n(),
mean = mean(Lifetime),
sd = sd(Lifetime))
total <- Sleuth3::case0501 %>%
summarize(n = n(),
mean = mean(Lifetime),
sd = sd(Lifetime)) %>%
mutate(Diet = "Total")
(sm2 <- bind_rows(sm,total))
## ----echo=FALSE-------------------------------------------------------------------------------------------------------
ggplot(Sleuth3::case0501, aes(x = Diet)) +
geom_jitter(aes(y=Lifetime), size=3) +
geom_hline(data=sm2, aes(yintercept=mean[7]),
col='red', size=2) +
geom_errorbar(data=sm, aes(x=Diet, ymin=mean, ymax=mean),
col='blue', size=2) +
theme_bw()
## ----echo=TRUE--------------------------------------------------------------------------------------------------------
m <- lm(Lifetime~Diet, case0501)
anova(m)
## ----echo=FALSE-------------------------------------------------------------------------------------------------------
sm3 = sm
sm3$mean[-which(sm3$Diet=="NP")] = mean(case0501$Lifetime[case0501$Diet!="NP"])
ggplot(case0501, aes(x=Diet)) +
geom_jitter(aes(y=Lifetime), size=3) +
geom_errorbar(data=sm, aes(ymin=mean, ymax=mean),
col='blue', size=2) +
geom_errorbar(data=sm3, aes(ymin=mean, ymax=mean),
col='red', size=2) +
theme_bw()
## ----echo = TRUE------------------------------------------------------------------------------------------------------
case0501$NP = factor(case0501$Diet == "NP")
modR = lm(Lifetime~NP, case0501) # (R)educed model
modF = lm(Lifetime~Diet, case0501) # (F)ull model
anova(modR,modF)
## ----echo=FALSE-------------------------------------------------------------------------------------------------------
ggplot(Sleuth3::ex0816, aes(factor(Time), pH))+
geom_boxplot(color="gray")+
geom_point()+
labs(x="Time", y="pH",
title="pH vs Time in Steer Carcasses") +
theme_bw()
## ----echo=FALSE-------------------------------------------------------------------------------------------------------
ggplot(Sleuth3::ex0816, aes(Time, pH))+
geom_point() +
stat_smooth(method="lm") +
labs(x="Time", y="pH",
title="pH vs Time in Steer Carcasses") +
theme_bw()
## ----echo = TRUE------------------------------------------------------------------------------------------------------
# Use as.factor to turn a continuous variable into a categorical variable
m_anova = lm(pH ~ as.factor(Time), Sleuth3::ex0816)
m_reg = lm(pH ~ Time , Sleuth3::ex0816)
anova(m_reg, m_anova)
| /courses/stat587Eng/slides/Regression/R06-ANOVA_F-tests/R06-ANOVA_F-tests.R | no_license | jarad/jarad.github.com | R | false | false | 4,682 | r | ## ----options, echo=FALSE, warning=FALSE, message=FALSE----------------------------------------------------------------
options(width=120)
opts_chunk$set(comment=NA,
fig.width=6,
fig.height=4.4,
size='tiny',
out.width='\\textwidth',
fig.align='center',
echo=FALSE,
message=FALSE)
## ----libraries, message=FALSE, warning=FALSE, echo=FALSE--------------------------------------------------------------
library("tidyverse")
library("Sleuth3")
## ----set_seed, echo=FALSE---------------------------------------------------------------------------------------------
set.seed(2)
## ----echo=FALSE-------------------------------------------------------------------------------------------------------
set.seed(20190410)
d <- expand.grid(x = seq(-5, 5, length=101),
mean = round(sort(rnorm(6)),2)) %>%
mutate(density = dnorm(x, mean),
mean = paste0("mean = ", mean))
ggplot(d, aes(x, density, color=mean, linetype=mean)) +
geom_line() +
theme_bw()
## ----echo=FALSE-------------------------------------------------------------------------------------------------------
ggplot(Sleuth3::case0501, aes(x=Diet, y=Lifetime)) +
geom_jitter() +
# geom_hline(data=sm2, aes(yintercept=mean[7]),
# col='red', size=2) +
# geom_errorbar(data=sm, aes(y=mean, ymin=mean, ymax=mean),
# col='blue', size=2) +
theme_bw()
## ----F-distribution, echo=FALSE, fig.height = 3.5---------------------------------------------------------------------
df = c(5,300)
ggplot(data.frame(x = c(0,4)), aes(x=x)) +
stat_function(fun = "df", args = list(df1 = df[1], df2 = df[2]),
xlim = c(2,4), geom = "area", fill = "magenta") +
stat_function(fun = "df", args = list(df1 = df[1], df2 = df[2])) +
labs(title = paste0("F(",df[1],", ",df[2],")"),
x = "F", y = "density") +
theme_bw()
## ----echo=FALSE, warning=FALSE----------------------------------------------------------------------------------------
sm <- Sleuth3::case0501 %>%
group_by(Diet) %>%
summarize(n = n(),
mean = mean(Lifetime),
sd = sd(Lifetime))
total <- Sleuth3::case0501 %>%
summarize(n = n(),
mean = mean(Lifetime),
sd = sd(Lifetime)) %>%
mutate(Diet = "Total")
(sm2 <- bind_rows(sm,total))
## ----echo=FALSE-------------------------------------------------------------------------------------------------------
ggplot(Sleuth3::case0501, aes(x = Diet)) +
geom_jitter(aes(y=Lifetime), size=3) +
geom_hline(data=sm2, aes(yintercept=mean[7]),
col='red', size=2) +
geom_errorbar(data=sm, aes(x=Diet, ymin=mean, ymax=mean),
col='blue', size=2) +
theme_bw()
## ----echo=TRUE--------------------------------------------------------------------------------------------------------
m <- lm(Lifetime~Diet, case0501)
anova(m)
## ----echo=FALSE-------------------------------------------------------------------------------------------------------
sm3 = sm
sm3$mean[-which(sm3$Diet=="NP")] = mean(case0501$Lifetime[case0501$Diet!="NP"])
ggplot(case0501, aes(x=Diet)) +
geom_jitter(aes(y=Lifetime), size=3) +
geom_errorbar(data=sm, aes(ymin=mean, ymax=mean),
col='blue', size=2) +
geom_errorbar(data=sm3, aes(ymin=mean, ymax=mean),
col='red', size=2) +
theme_bw()
## ----echo = TRUE------------------------------------------------------------------------------------------------------
case0501$NP = factor(case0501$Diet == "NP")
modR = lm(Lifetime~NP, case0501) # (R)educed model
modF = lm(Lifetime~Diet, case0501) # (F)ull model
anova(modR,modF)
## ----echo=FALSE-------------------------------------------------------------------------------------------------------
ggplot(Sleuth3::ex0816, aes(factor(Time), pH))+
geom_boxplot(color="gray")+
geom_point()+
labs(x="Time", y="pH",
title="pH vs Time in Steer Carcasses") +
theme_bw()
## ----echo=FALSE-------------------------------------------------------------------------------------------------------
ggplot(Sleuth3::ex0816, aes(Time, pH))+
geom_point() +
stat_smooth(method="lm") +
labs(x="Time", y="pH",
title="pH vs Time in Steer Carcasses") +
theme_bw()
## ----echo = TRUE------------------------------------------------------------------------------------------------------
# Use as.factor to turn a continuous variable into a categorical variable
m_anova = lm(pH ~ as.factor(Time), Sleuth3::ex0816)
m_reg = lm(pH ~ Time , Sleuth3::ex0816)
anova(m_reg, m_anova)
|
library("mlxR")
library("psych")
library("coda")
library("Matrix")
library(abind)
require(ggplot2)
require(gridExtra)
require(reshape2)
library(dplyr)
# save.image("rtte_mcmc_conv.RData")
# setwd("/Users/karimimohammedbelhal/Desktop/package_contrib/saemixB/R")
setwd("/Users/karimimohammedbelhal/Desktop/ongoing_research/CSDA/csda_newcombination/R")
source('aaa_generics.R')
source('compute_LL.R')
source('func_aux.R')
source('func_distcond.R')
source('func_FIM.R')
source('func_plots.R')
source('func_simulations.R')
source('main.R')
source('main_estep.R')
source('estep_mcmc.R')
source('main_initialiseMainAlgo.R')
source('main_mstep.R')
source('SaemixData.R')
source('SaemixModel.R')
source('SaemixRes.R')
# source('SaemixRes_c.R')
source('SaemixObject.R')
source('zzz.R')
setwd("/Users/karimimohammedbelhal/Desktop/ongoing_research/CSDA/csda_newcombination")
source('graphplot.R')
load("rtte_mcmc_conv.RData")
###RTTE
timetoevent.saemix <- read.table("/Users/karimimohammedbelhal/Desktop/ongoing_research/CSDA/csda_newcombination/data/rtte_data.csv", header=T, sep=",")
# timetoevent.saemix <- read.table("/Users/karimimohammedbelhal/Desktop/package_contrib/saemixB/data/rttellis.csv", header=T, sep=",")
timetoevent.saemix <- timetoevent.saemix[timetoevent.saemix$ytype==2,]
saemix.data_rtte<-saemixData(name.data=timetoevent.saemix,header=TRUE,sep=" ",na=NA, name.group=c("id"),name.response=c("y"),name.predictors=c("time","y"), name.X=c("time"))
n <- length(unique(timetoevent.saemix$id))
timetoevent.model<-function(psi,id,xidep) {
T<-xidep[,1]
y<-xidep[,2]
N <- nrow(psi)
Nj <- length(T)
censoringtime = 20
lambda <- psi[id,1]
beta <- psi[id,2]
init <- which(T==0)
cens <- which(T==censoringtime)
ind <- setdiff(1:Nj, append(init,cens))
hazard <- (beta/lambda)*(T/lambda)^(beta-1)
H <- (T/lambda)^beta
logpdf <- rep(0,Nj)
logpdf[cens] <- -H[cens] + H[cens-1]
logpdf[ind] <- -H[ind] + H[ind-1] + log(hazard[ind])
return(logpdf)
}
saemix.model_rtte<-saemixModel(model=timetoevent.model,description="time model",type="likelihood",
psi0=matrix(c(2,1),ncol=2,byrow=TRUE,dimnames=list(NULL,
c("lambda","beta"))),
transform.par=c(1,1),covariance.model=matrix(c(1,0,0,1),ncol=2,
byrow=TRUE))
##RUNS
K1 = 200
K2 = 100
iterations = 1:(K1+K2+1)
end = K1+K2
#Weibull
options_rtte<-list(seed=39546,map=F,fim=F,ll.is=F,nbiter.mcmc = c(2,2,2,0), nbiter.saemix = c(K1,K2),nbiter.sa=0,displayProgress=TRUE,nbiter.burn =0, map.range=c(0))
rtte<-data.frame(saemix(saemix.model_rtte,saemix.data_rtte,options_rtte))
rtte <- cbind(iterations, rtte)
options_rttenew<-list(seed=39546,map=F,fim=F,ll.is=F,nbiter.mcmc = c(2,2,2,6), nb.chains=1, nbiter.saemix = c(K1,K2),nbiter.sa=0,displayProgress=TRUE,nbiter.burn =0,map.range=c(1:5))
rttenew<-data.frame(saemix(saemix.model_rtte,saemix.data_rtte,options_rttenew))
rtte <- cbind(iterations, rtte)
saemix.model_rtte<-saemixModel(model=timetoevent.model,description="time model",type="likelihood",
psi0=matrix(c(10.17122,4.577724),ncol=2,byrow=TRUE,dimnames=list(NULL,
c("lambda","beta"))),
transform.par=c(1,1),covariance.model=matrix(c(0.3,0,0,0.3),ncol=2,
byrow=TRUE))
L_mcmc=5000
options_rtte<-list(seed=39546,map=F,fim=F,ll.is=F,L_mcmc=L_mcmc,nbiter.mcmc = c(2,2,2,0),nb.chains=1, nbiter.saemix = c(K1,K2),nbiter.sa=0,displayProgress=TRUE,nbiter.burn =0, map.range=c(0))
ref<-mcmc(saemix.model_rtte,saemix.data_rtte,options_rtte)$eta_ref
options_rttenew<-list(seed=39546,map=F,fim=F,ll.is=F,L_mcmc=L_mcmc,nbiter.mcmc = c(0,0,0,6,0),nb.chains=1, nbiter.saemix = c(K1,K2),nbiter.sa=0,displayProgress=TRUE,nbiter.burn =0, map.range=c(0))
new<-mcmc(saemix.model_rtte,saemix.data_rtte,options_rttenew)$eta
start_interval <- 200
zero <- as.data.frame(matrix(0,nrow = L_mcmc-start_interval,ncol = 2))
etabarref <- 1/n*Reduce("+",ref)
expecref <- data.frame(apply(etabarref[-(1:start_interval),], 2, cummean))
expecref$iteration <- 1:(L_mcmc-start_interval)
sdref <- 0
for (i in 1:n){
var <- data.frame(apply(ref[[i]][-(1:start_interval),]^2, 2, cummean))
meansq <- data.frame(apply(ref[[i]][-(1:start_interval),], 2, cummean))^2
sdref <- sdref + sqrt(pmax(zero,var - meansq))
}
sdref <- 1/n*sdref
sdref$iteration <- 1:(L_mcmc-start_interval)
etabarnew <- 1/n*Reduce("+",new)
expecnew <- data.frame(apply(etabarnew[-(1:start_interval),], 2, cummean))
expecnew$iteration <- 1:(L_mcmc-start_interval)
sdnew <- 0
for (i in 1:n){
var <- data.frame(apply(new[[i]][-(1:start_interval),]^2, 2, cummean))
meansq <- data.frame(apply(new[[i]][-(1:start_interval),], 2, cummean))^2
sdnew <- sdnew + sqrt(pmax(zero,var - meansq))
}
sdnew <- 1/n*sdnew
sdnew$iteration <- 1:(L_mcmc-start_interval)
plotmcmc(expecref[,c(3,1:2)],expecnew[,c(3,1:2)],title="mean")
plotmcmc(sdref[,c(3,1:2)],sdnew[,c(3,1:2)],title="sd")
etaref <- 1/n*Reduce("+",ref)
etaref$iteration <- 1:(L_mcmc)
# plotmcmc(etaref[,c(3,1:2)],etaref[,c(3,1:2)],title="mean")
etanew <- 1/n*Reduce("+",new)
etanew$iteration <- 1:(L_mcmc)
plotmcmc(etaref[,c(3,1:2)],etanew[,c(3,1:2)],title="mean")
# for (i in 1:5){
# ref[[i]]$iteration <- 1:(L_mcmc)
# new[[i]]$iteration <- 1:(L_mcmc)
# plotmcmc(ref[[i]][,c(3,1:2)],new[[i]][,c(3,1:2)],title="mean")
# }
# plotmcmc(ref[[9]][,c(3,1:2)],new[[9]][,c(3,1:2)],title="mean")
# plotmcmc(ref[[5]][,c(3,1:2)],new[[5]][,c(3,1:2)],title="mean")
# #one invdiv
# start_interval <- 200
# zero <- as.data.frame(matrix(0,nrow = L_mcmc-start_interval,ncol = 2))
# for (i in 1:3){
# indetabarref <- ref[[i]]
# indexpecref <- data.frame(apply(indetabarref[-(1:start_interval),], 2, cummean))
# indexpecref$iteration <- 1:(L_mcmc-start_interval)
# indsdref <- 0
# indvar <- data.frame(apply(ref[[i]][-(1:start_interval),]^2, 2, cummean))
# indmeansq <- data.frame(apply(ref[[i]][-(1:start_interval),], 2, cummean))^2
# indsdref <- indsdref + sqrt(pmax(zero,indvar - indmeansq))
# indsdref$iteration <- 1:(L_mcmc-start_interval)
# indetabarnew <- new[[i]]
# indexpecnew <- data.frame(apply(indetabarnew[-(1:start_interval),], 2, cummean))
# indexpecnew$iteration <- 1:(L_mcmc-start_interval)
# indsdnew <- 0
# indvar <- data.frame(apply(new[[i]][-(1:start_interval),]^2, 2, cummean))
# indmeansq <- data.frame(apply(new[[i]][-(1:start_interval),], 2, cummean))^2
# indsdnew <- indsdnew + sqrt(pmax(zero,indvar - indmeansq))
# indsdnew$iteration <- 1:(L_mcmc-start_interval)
# plotmcmc(indexpecref[,c(3,1:2)],indexpecnew[,c(3,1:2)],title=paste("mean",i))
# plotmcmc(indsdref[-c(1:10),c(3,1:2)],indsdnew[-c(1:10),c(3,1:2)],title=paste("sd",i))
# }
i <- 2
start_interval <- 200
zero <- as.data.frame(matrix(0,nrow = L_mcmc-start_interval,ncol = 2))
#mean and sd
indetabarref <- ref[[i]]
indexpecref <- data.frame(apply(indetabarref[-(1:start_interval),], 2, cummean))
indexpecref$iteration <- 1:(L_mcmc-start_interval)
indsdref <- 0
indvar <- data.frame(apply(ref[[i]][-(1:start_interval),]^2, 2, cummean))
indmeansq <- data.frame(apply(ref[[i]][-(1:start_interval),], 2, cummean))^2
indsdref <- indsdref + sqrt(pmax(zero,indvar - indmeansq))
indsdref$iteration <- 1:(L_mcmc-start_interval)
indetabarnew <- new[[i]]
indexpecnew <- data.frame(apply(indetabarnew[-(1:start_interval),], 2, cummean))
indexpecnew$iteration <- 1:(L_mcmc-start_interval)
indsdnew <- 0
indvar <- data.frame(apply(new[[i]][-(1:start_interval),]^2, 2, cummean))
indmeansq <- data.frame(apply(new[[i]][-(1:start_interval),], 2, cummean))^2
indsdnew <- indsdnew + sqrt(pmax(zero,indvar - indmeansq))
indsdnew$iteration <- 1:(L_mcmc-start_interval)
plotmcmc(indexpecref[,c(3,1:2)],indexpecnew[,c(3,1:2)],title=paste("mean",i))
plotmcmc(indsdref[-c(1:10),c(3,1:2)],indsdnew[-c(1:10),c(3,1:2)],title=paste("sd",i))
#quantiles
qref <- list(ref[[i]][1:L_mcmc,],ref[[i]][1:L_mcmc,],ref[[i]][1:L_mcmc,])
for (dim in 1:2){
print(dim)
for (k in 1:L_mcmc){
qref[[dim]][k,1] <- quantile(ref[[i]][1:k,dim], 0.05)
qref[[dim]][k,2] <- quantile(ref[[i]][1:k,dim], 0.5)
qref[[dim]][k,3] <- quantile(ref[[i]][1:k,dim], 0.95)
}
qref[[dim]]$iteration <- 1:L_mcmc
}
qnew <- list(new[[i]][1:L_mcmc,],new[[i]][1:L_mcmc,],new[[i]][1:L_mcmc,])
for (dim in 1:2){
print(dim)
for (k in 1:L_mcmc){
qnew[[dim]][k,1] <- quantile(new[[i]][1:k,dim], 0.05)
qnew[[dim]][k,2] <- quantile(new[[i]][1:k,dim], 0.5)
qnew[[dim]][k,3] <- quantile(new[[i]][1:k,dim], 0.95)
}
qnew[[dim]]$iteration <- 1:L_mcmc
# plotmcmc(qref[[dim]][,c(3,1:2)],qnew[[dim]][,c(3,1:2)],title=paste("quantiles",i,"dim", dim))
}
# for (dim in 1:2){
# plotmcmc(qref[[dim]][,c(3,1:2)],qnew[[dim]][,c(3,1:2)],title=paste("quantiles",i,"dim", dim))
# }
plotquantile <- function(df,df2, title=NULL, ylim=NULL)
{
G <- (ncol(df)-2)/3
df$quantile <- as.factor(df$quantile)
df2$quantile <- as.factor(df2$quantile)
ylim <-rep(ylim,each=2)
graf <- vector("list", ncol(df)-2)
o <- c(0, 1, 2, 3, 4, 5, 6, 7, 8, 9)
for (j in (2:(ncol(df)-1)))
{
if (j<3){
grafj <- ggplot(df)+geom_line(aes_string(df[,1],df[,j],by=df[,ncol(df)]),colour="blue",size=1) +geom_line(aes_string(df2[,1],df2[,j],by=df2[,ncol(df2)]),colour="red",linetype = 2,size=1)+
xlab("")+scale_x_log10(breaks= c(100,1000,5000))+ theme_bw() +ylab(expression(paste(lambda)))+ theme(axis.line = element_line(colour = "black"),axis.text.x = element_text(face="bold", color="black",
size=15, angle=0),
axis.text.y = element_text(face="bold", color="black",
size=15, angle=0))+theme(axis.title = element_text(family = "Trebuchet MS", color="black", face="bold", size=20))
}else{
grafj <- ggplot(df)+geom_line(aes_string(df[,1],df[,j],by=df[,ncol(df)]),colour="blue",size=1) +geom_line(aes_string(df2[,1],df2[,j],by=df2[,ncol(df2)]),colour="red",linetype = 2,size=1)+
xlab("")+scale_x_log10(breaks= c(100,1000,5000))+ theme_bw() +ylab(expression(paste(beta)))+ theme(axis.line = element_line(colour = "black"),axis.text.x = element_text(face="bold", color="black",
size=15, angle=0),
axis.text.y = element_text(face="bold", color="black",
size=15, angle=0))+theme(axis.title = element_text(family = "Trebuchet MS", color="black", face="bold", size=20))
}
if (!is.null(ylim))
grafj <- grafj + ylim(ylim[j-1]*c(-1,1))
graf[[o[j]]] <- grafj
}
do.call("grid.arrange", c(graf, ncol=2, top=title))
}
iteration <- 1:L_mcmc
burn <- 100
q1ref <- data.frame(cbind(iteration,qref[[1]][,1],qref[[2]][,1]))
q2ref <- data.frame(cbind(iteration,qref[[1]][,2],qref[[2]][,2]))
q3ref <- data.frame(cbind(iteration,qref[[1]][,3],qref[[2]][,3]))
q1ref$quantile <- 1
q2ref$quantile <- 2
q3ref$quantile <- 3
quantref <- rbind(q1ref[-c(1:burn),],q2ref[-c(1:burn),],q3ref[-c(1:burn),])
q1new <- data.frame(cbind(iteration,qnew[[1]][,1],qnew[[2]][,1]))
q2new <- data.frame(cbind(iteration,qnew[[1]][,2],qnew[[2]][,2]))
q3new <- data.frame(cbind(iteration,qnew[[1]][,3],qnew[[2]][,3]))
q1new$quantile <- 1
q2new$quantile <- 2
q3new$quantile <- 3
quantnew <- rbind(q1new[-c(1:burn),],q2new[-c(1:burn),],q3new[-c(1:burn),])
colnames(quantref) <- colnames(quantnew)<-c("iteration",expression(paste(lambda)),expression(paste(beta)),"quantile")
plotquantile(quantref,quantnew)
# geweke.plot(mcmc.list(as.mcmc(ref[[10]])), frac1=0.1, frac2=0.5)
# geweke.plot(mcmc.list(as.mcmc(new[[10]])), frac1=0.1, frac2=0.5)
# #cdf
# cdfref <- list(ref[[i]][1:L_mcmc,],ref[[i]][1:L_mcmc,],ref[[i]][1:L_mcmc,])
# for (dim in 1:2){
# print(dim)
# qf1 <- quantile(ref[[i]][,dim], 0.05)
# qf2 <- quantile(ref[[i]][,dim], 0.5)
# qf3 <- quantile(ref[[i]][,dim], 0.95)
# for (k in 1:L_mcmc){
# cdfref[[dim]][k,1] <- mean(ref[[i]][which(ref[[i]][1:k,dim] < qf1),dim])
# cdfref[[dim]][k,2] <- mean(ref[[i]][which(ref[[i]][1:k,dim] < qf2),dim])
# cdfref[[dim]][k,3] <- mean(ref[[i]][which(ref[[i]][1:k,dim] < qf3),dim])
# }
# cdfref[[dim]]$iteration <- 1:L_mcmc
# }
# cdfnew <- list(new[[i]][1:L_mcmc,],new[[i]][1:L_mcmc,],new[[i]][1:L_mcmc,])
# for (dim in 1:2){
# print(dim)
# qf1 <- quantile(new[[i]][,dim], 0.05)
# qf2 <- quantile(new[[i]][,dim], 0.5)
# qf3 <- quantile(new[[i]][,dim], 0.95)
# for (k in 1:L_mcmc){
# cdfnew[[dim]][k,1] <- mean(new[[i]][which(new[[i]][1:k,dim] < qf1),dim])
# cdfnew[[dim]][k,2] <- mean(new[[i]][which(new[[i]][1:k,dim] < qf2),dim])
# cdfnew[[dim]][k,3] <- mean(new[[i]][which(new[[i]][1:k,dim] < qf3),dim])
# }
# cdfnew[[dim]]$iteration <- 1:L_mcmc
# }
# iteration <- 1:L_mcmc
# cdf1ref <- data.frame(cbind(iteration,cdfref[[1]][,1],cdfref[[2]][,1]))
# cdf2ref <- data.frame(cbind(iteration,cdfref[[1]][,2],cdfref[[2]][,2]))
# cdf3ref <- data.frame(cbind(iteration,cdfref[[1]][,3],cdfref[[2]][,3]))
# cdf1ref$quantile <- 1
# cdf2ref$quantile <- 2
# cdf3ref$quantile <- 3
# cdfref <- rbind(cdf1ref[-c(1:10),],cdf2ref[-c(1:10),],cdf3ref[-c(1:10),])
# cdf1new <- data.frame(cbind(iteration,cdfnew[[1]][,1],cdfnew[[2]][,1]))
# cdf2new <- data.frame(cbind(iteration,cdfnew[[1]][,2],cdfnew[[2]][,2]))
# cdf3new <- data.frame(cbind(iteration,cdfnew[[1]][,3],cdfnew[[2]][,3]))
# cdf1new$quantile <- 1
# cdf2new$quantile <- 2
# cdf3new$quantile <- 3
# cdfnew <- rbind(cdf1new[-c(1:10),],cdf2new[-c(1:10),],cdf3new[-c(1:10),])
# plotquantile(cdfref,cdfnew, title= "cdf")
| /FastSAEM - Main/fSAEM_Code/fsaem_newcombination/rtte_compare_mcmc.R | no_license | BelhalK/AccelerationTrainingAlgorithms | R | false | false | 13,390 | r | library("mlxR")
library("psych")
library("coda")
library("Matrix")
library(abind)
require(ggplot2)
require(gridExtra)
require(reshape2)
library(dplyr)
# save.image("rtte_mcmc_conv.RData")
# setwd("/Users/karimimohammedbelhal/Desktop/package_contrib/saemixB/R")
setwd("/Users/karimimohammedbelhal/Desktop/ongoing_research/CSDA/csda_newcombination/R")
source('aaa_generics.R')
source('compute_LL.R')
source('func_aux.R')
source('func_distcond.R')
source('func_FIM.R')
source('func_plots.R')
source('func_simulations.R')
source('main.R')
source('main_estep.R')
source('estep_mcmc.R')
source('main_initialiseMainAlgo.R')
source('main_mstep.R')
source('SaemixData.R')
source('SaemixModel.R')
source('SaemixRes.R')
# source('SaemixRes_c.R')
source('SaemixObject.R')
source('zzz.R')
setwd("/Users/karimimohammedbelhal/Desktop/ongoing_research/CSDA/csda_newcombination")
source('graphplot.R')
load("rtte_mcmc_conv.RData")
###RTTE
timetoevent.saemix <- read.table("/Users/karimimohammedbelhal/Desktop/ongoing_research/CSDA/csda_newcombination/data/rtte_data.csv", header=T, sep=",")
# timetoevent.saemix <- read.table("/Users/karimimohammedbelhal/Desktop/package_contrib/saemixB/data/rttellis.csv", header=T, sep=",")
timetoevent.saemix <- timetoevent.saemix[timetoevent.saemix$ytype==2,]
saemix.data_rtte<-saemixData(name.data=timetoevent.saemix,header=TRUE,sep=" ",na=NA, name.group=c("id"),name.response=c("y"),name.predictors=c("time","y"), name.X=c("time"))
n <- length(unique(timetoevent.saemix$id))
timetoevent.model<-function(psi,id,xidep) {
T<-xidep[,1]
y<-xidep[,2]
N <- nrow(psi)
Nj <- length(T)
censoringtime = 20
lambda <- psi[id,1]
beta <- psi[id,2]
init <- which(T==0)
cens <- which(T==censoringtime)
ind <- setdiff(1:Nj, append(init,cens))
hazard <- (beta/lambda)*(T/lambda)^(beta-1)
H <- (T/lambda)^beta
logpdf <- rep(0,Nj)
logpdf[cens] <- -H[cens] + H[cens-1]
logpdf[ind] <- -H[ind] + H[ind-1] + log(hazard[ind])
return(logpdf)
}
saemix.model_rtte<-saemixModel(model=timetoevent.model,description="time model",type="likelihood",
psi0=matrix(c(2,1),ncol=2,byrow=TRUE,dimnames=list(NULL,
c("lambda","beta"))),
transform.par=c(1,1),covariance.model=matrix(c(1,0,0,1),ncol=2,
byrow=TRUE))
##RUNS
K1 = 200
K2 = 100
iterations = 1:(K1+K2+1)
end = K1+K2
#Weibull
options_rtte<-list(seed=39546,map=F,fim=F,ll.is=F,nbiter.mcmc = c(2,2,2,0), nbiter.saemix = c(K1,K2),nbiter.sa=0,displayProgress=TRUE,nbiter.burn =0, map.range=c(0))
rtte<-data.frame(saemix(saemix.model_rtte,saemix.data_rtte,options_rtte))
rtte <- cbind(iterations, rtte)
options_rttenew<-list(seed=39546,map=F,fim=F,ll.is=F,nbiter.mcmc = c(2,2,2,6), nb.chains=1, nbiter.saemix = c(K1,K2),nbiter.sa=0,displayProgress=TRUE,nbiter.burn =0,map.range=c(1:5))
rttenew<-data.frame(saemix(saemix.model_rtte,saemix.data_rtte,options_rttenew))
rtte <- cbind(iterations, rtte)
saemix.model_rtte<-saemixModel(model=timetoevent.model,description="time model",type="likelihood",
psi0=matrix(c(10.17122,4.577724),ncol=2,byrow=TRUE,dimnames=list(NULL,
c("lambda","beta"))),
transform.par=c(1,1),covariance.model=matrix(c(0.3,0,0,0.3),ncol=2,
byrow=TRUE))
L_mcmc=5000
options_rtte<-list(seed=39546,map=F,fim=F,ll.is=F,L_mcmc=L_mcmc,nbiter.mcmc = c(2,2,2,0),nb.chains=1, nbiter.saemix = c(K1,K2),nbiter.sa=0,displayProgress=TRUE,nbiter.burn =0, map.range=c(0))
ref<-mcmc(saemix.model_rtte,saemix.data_rtte,options_rtte)$eta_ref
options_rttenew<-list(seed=39546,map=F,fim=F,ll.is=F,L_mcmc=L_mcmc,nbiter.mcmc = c(0,0,0,6,0),nb.chains=1, nbiter.saemix = c(K1,K2),nbiter.sa=0,displayProgress=TRUE,nbiter.burn =0, map.range=c(0))
new<-mcmc(saemix.model_rtte,saemix.data_rtte,options_rttenew)$eta
start_interval <- 200
zero <- as.data.frame(matrix(0,nrow = L_mcmc-start_interval,ncol = 2))
etabarref <- 1/n*Reduce("+",ref)
expecref <- data.frame(apply(etabarref[-(1:start_interval),], 2, cummean))
expecref$iteration <- 1:(L_mcmc-start_interval)
sdref <- 0
for (i in 1:n){
var <- data.frame(apply(ref[[i]][-(1:start_interval),]^2, 2, cummean))
meansq <- data.frame(apply(ref[[i]][-(1:start_interval),], 2, cummean))^2
sdref <- sdref + sqrt(pmax(zero,var - meansq))
}
sdref <- 1/n*sdref
sdref$iteration <- 1:(L_mcmc-start_interval)
etabarnew <- 1/n*Reduce("+",new)
expecnew <- data.frame(apply(etabarnew[-(1:start_interval),], 2, cummean))
expecnew$iteration <- 1:(L_mcmc-start_interval)
sdnew <- 0
for (i in 1:n){
var <- data.frame(apply(new[[i]][-(1:start_interval),]^2, 2, cummean))
meansq <- data.frame(apply(new[[i]][-(1:start_interval),], 2, cummean))^2
sdnew <- sdnew + sqrt(pmax(zero,var - meansq))
}
sdnew <- 1/n*sdnew
sdnew$iteration <- 1:(L_mcmc-start_interval)
plotmcmc(expecref[,c(3,1:2)],expecnew[,c(3,1:2)],title="mean")
plotmcmc(sdref[,c(3,1:2)],sdnew[,c(3,1:2)],title="sd")
etaref <- 1/n*Reduce("+",ref)
etaref$iteration <- 1:(L_mcmc)
# plotmcmc(etaref[,c(3,1:2)],etaref[,c(3,1:2)],title="mean")
etanew <- 1/n*Reduce("+",new)
etanew$iteration <- 1:(L_mcmc)
plotmcmc(etaref[,c(3,1:2)],etanew[,c(3,1:2)],title="mean")
# for (i in 1:5){
# ref[[i]]$iteration <- 1:(L_mcmc)
# new[[i]]$iteration <- 1:(L_mcmc)
# plotmcmc(ref[[i]][,c(3,1:2)],new[[i]][,c(3,1:2)],title="mean")
# }
# plotmcmc(ref[[9]][,c(3,1:2)],new[[9]][,c(3,1:2)],title="mean")
# plotmcmc(ref[[5]][,c(3,1:2)],new[[5]][,c(3,1:2)],title="mean")
# #one invdiv
# start_interval <- 200
# zero <- as.data.frame(matrix(0,nrow = L_mcmc-start_interval,ncol = 2))
# for (i in 1:3){
# indetabarref <- ref[[i]]
# indexpecref <- data.frame(apply(indetabarref[-(1:start_interval),], 2, cummean))
# indexpecref$iteration <- 1:(L_mcmc-start_interval)
# indsdref <- 0
# indvar <- data.frame(apply(ref[[i]][-(1:start_interval),]^2, 2, cummean))
# indmeansq <- data.frame(apply(ref[[i]][-(1:start_interval),], 2, cummean))^2
# indsdref <- indsdref + sqrt(pmax(zero,indvar - indmeansq))
# indsdref$iteration <- 1:(L_mcmc-start_interval)
# indetabarnew <- new[[i]]
# indexpecnew <- data.frame(apply(indetabarnew[-(1:start_interval),], 2, cummean))
# indexpecnew$iteration <- 1:(L_mcmc-start_interval)
# indsdnew <- 0
# indvar <- data.frame(apply(new[[i]][-(1:start_interval),]^2, 2, cummean))
# indmeansq <- data.frame(apply(new[[i]][-(1:start_interval),], 2, cummean))^2
# indsdnew <- indsdnew + sqrt(pmax(zero,indvar - indmeansq))
# indsdnew$iteration <- 1:(L_mcmc-start_interval)
# plotmcmc(indexpecref[,c(3,1:2)],indexpecnew[,c(3,1:2)],title=paste("mean",i))
# plotmcmc(indsdref[-c(1:10),c(3,1:2)],indsdnew[-c(1:10),c(3,1:2)],title=paste("sd",i))
# }
i <- 2
start_interval <- 200
zero <- as.data.frame(matrix(0,nrow = L_mcmc-start_interval,ncol = 2))
#mean and sd
indetabarref <- ref[[i]]
indexpecref <- data.frame(apply(indetabarref[-(1:start_interval),], 2, cummean))
indexpecref$iteration <- 1:(L_mcmc-start_interval)
indsdref <- 0
indvar <- data.frame(apply(ref[[i]][-(1:start_interval),]^2, 2, cummean))
indmeansq <- data.frame(apply(ref[[i]][-(1:start_interval),], 2, cummean))^2
indsdref <- indsdref + sqrt(pmax(zero,indvar - indmeansq))
indsdref$iteration <- 1:(L_mcmc-start_interval)
indetabarnew <- new[[i]]
indexpecnew <- data.frame(apply(indetabarnew[-(1:start_interval),], 2, cummean))
indexpecnew$iteration <- 1:(L_mcmc-start_interval)
indsdnew <- 0
indvar <- data.frame(apply(new[[i]][-(1:start_interval),]^2, 2, cummean))
indmeansq <- data.frame(apply(new[[i]][-(1:start_interval),], 2, cummean))^2
indsdnew <- indsdnew + sqrt(pmax(zero,indvar - indmeansq))
indsdnew$iteration <- 1:(L_mcmc-start_interval)
plotmcmc(indexpecref[,c(3,1:2)],indexpecnew[,c(3,1:2)],title=paste("mean",i))
plotmcmc(indsdref[-c(1:10),c(3,1:2)],indsdnew[-c(1:10),c(3,1:2)],title=paste("sd",i))
#quantiles
qref <- list(ref[[i]][1:L_mcmc,],ref[[i]][1:L_mcmc,],ref[[i]][1:L_mcmc,])
for (dim in 1:2){
print(dim)
for (k in 1:L_mcmc){
qref[[dim]][k,1] <- quantile(ref[[i]][1:k,dim], 0.05)
qref[[dim]][k,2] <- quantile(ref[[i]][1:k,dim], 0.5)
qref[[dim]][k,3] <- quantile(ref[[i]][1:k,dim], 0.95)
}
qref[[dim]]$iteration <- 1:L_mcmc
}
qnew <- list(new[[i]][1:L_mcmc,],new[[i]][1:L_mcmc,],new[[i]][1:L_mcmc,])
for (dim in 1:2){
print(dim)
for (k in 1:L_mcmc){
qnew[[dim]][k,1] <- quantile(new[[i]][1:k,dim], 0.05)
qnew[[dim]][k,2] <- quantile(new[[i]][1:k,dim], 0.5)
qnew[[dim]][k,3] <- quantile(new[[i]][1:k,dim], 0.95)
}
qnew[[dim]]$iteration <- 1:L_mcmc
# plotmcmc(qref[[dim]][,c(3,1:2)],qnew[[dim]][,c(3,1:2)],title=paste("quantiles",i,"dim", dim))
}
# for (dim in 1:2){
# plotmcmc(qref[[dim]][,c(3,1:2)],qnew[[dim]][,c(3,1:2)],title=paste("quantiles",i,"dim", dim))
# }
plotquantile <- function(df,df2, title=NULL, ylim=NULL)
{
G <- (ncol(df)-2)/3
df$quantile <- as.factor(df$quantile)
df2$quantile <- as.factor(df2$quantile)
ylim <-rep(ylim,each=2)
graf <- vector("list", ncol(df)-2)
o <- c(0, 1, 2, 3, 4, 5, 6, 7, 8, 9)
for (j in (2:(ncol(df)-1)))
{
if (j<3){
grafj <- ggplot(df)+geom_line(aes_string(df[,1],df[,j],by=df[,ncol(df)]),colour="blue",size=1) +geom_line(aes_string(df2[,1],df2[,j],by=df2[,ncol(df2)]),colour="red",linetype = 2,size=1)+
xlab("")+scale_x_log10(breaks= c(100,1000,5000))+ theme_bw() +ylab(expression(paste(lambda)))+ theme(axis.line = element_line(colour = "black"),axis.text.x = element_text(face="bold", color="black",
size=15, angle=0),
axis.text.y = element_text(face="bold", color="black",
size=15, angle=0))+theme(axis.title = element_text(family = "Trebuchet MS", color="black", face="bold", size=20))
}else{
grafj <- ggplot(df)+geom_line(aes_string(df[,1],df[,j],by=df[,ncol(df)]),colour="blue",size=1) +geom_line(aes_string(df2[,1],df2[,j],by=df2[,ncol(df2)]),colour="red",linetype = 2,size=1)+
xlab("")+scale_x_log10(breaks= c(100,1000,5000))+ theme_bw() +ylab(expression(paste(beta)))+ theme(axis.line = element_line(colour = "black"),axis.text.x = element_text(face="bold", color="black",
size=15, angle=0),
axis.text.y = element_text(face="bold", color="black",
size=15, angle=0))+theme(axis.title = element_text(family = "Trebuchet MS", color="black", face="bold", size=20))
}
if (!is.null(ylim))
grafj <- grafj + ylim(ylim[j-1]*c(-1,1))
graf[[o[j]]] <- grafj
}
do.call("grid.arrange", c(graf, ncol=2, top=title))
}
iteration <- 1:L_mcmc
burn <- 100
q1ref <- data.frame(cbind(iteration,qref[[1]][,1],qref[[2]][,1]))
q2ref <- data.frame(cbind(iteration,qref[[1]][,2],qref[[2]][,2]))
q3ref <- data.frame(cbind(iteration,qref[[1]][,3],qref[[2]][,3]))
q1ref$quantile <- 1
q2ref$quantile <- 2
q3ref$quantile <- 3
quantref <- rbind(q1ref[-c(1:burn),],q2ref[-c(1:burn),],q3ref[-c(1:burn),])
q1new <- data.frame(cbind(iteration,qnew[[1]][,1],qnew[[2]][,1]))
q2new <- data.frame(cbind(iteration,qnew[[1]][,2],qnew[[2]][,2]))
q3new <- data.frame(cbind(iteration,qnew[[1]][,3],qnew[[2]][,3]))
q1new$quantile <- 1
q2new$quantile <- 2
q3new$quantile <- 3
quantnew <- rbind(q1new[-c(1:burn),],q2new[-c(1:burn),],q3new[-c(1:burn),])
colnames(quantref) <- colnames(quantnew)<-c("iteration",expression(paste(lambda)),expression(paste(beta)),"quantile")
plotquantile(quantref,quantnew)
# geweke.plot(mcmc.list(as.mcmc(ref[[10]])), frac1=0.1, frac2=0.5)
# geweke.plot(mcmc.list(as.mcmc(new[[10]])), frac1=0.1, frac2=0.5)
# #cdf
# cdfref <- list(ref[[i]][1:L_mcmc,],ref[[i]][1:L_mcmc,],ref[[i]][1:L_mcmc,])
# for (dim in 1:2){
# print(dim)
# qf1 <- quantile(ref[[i]][,dim], 0.05)
# qf2 <- quantile(ref[[i]][,dim], 0.5)
# qf3 <- quantile(ref[[i]][,dim], 0.95)
# for (k in 1:L_mcmc){
# cdfref[[dim]][k,1] <- mean(ref[[i]][which(ref[[i]][1:k,dim] < qf1),dim])
# cdfref[[dim]][k,2] <- mean(ref[[i]][which(ref[[i]][1:k,dim] < qf2),dim])
# cdfref[[dim]][k,3] <- mean(ref[[i]][which(ref[[i]][1:k,dim] < qf3),dim])
# }
# cdfref[[dim]]$iteration <- 1:L_mcmc
# }
# cdfnew <- list(new[[i]][1:L_mcmc,],new[[i]][1:L_mcmc,],new[[i]][1:L_mcmc,])
# for (dim in 1:2){
# print(dim)
# qf1 <- quantile(new[[i]][,dim], 0.05)
# qf2 <- quantile(new[[i]][,dim], 0.5)
# qf3 <- quantile(new[[i]][,dim], 0.95)
# for (k in 1:L_mcmc){
# cdfnew[[dim]][k,1] <- mean(new[[i]][which(new[[i]][1:k,dim] < qf1),dim])
# cdfnew[[dim]][k,2] <- mean(new[[i]][which(new[[i]][1:k,dim] < qf2),dim])
# cdfnew[[dim]][k,3] <- mean(new[[i]][which(new[[i]][1:k,dim] < qf3),dim])
# }
# cdfnew[[dim]]$iteration <- 1:L_mcmc
# }
# iteration <- 1:L_mcmc
# cdf1ref <- data.frame(cbind(iteration,cdfref[[1]][,1],cdfref[[2]][,1]))
# cdf2ref <- data.frame(cbind(iteration,cdfref[[1]][,2],cdfref[[2]][,2]))
# cdf3ref <- data.frame(cbind(iteration,cdfref[[1]][,3],cdfref[[2]][,3]))
# cdf1ref$quantile <- 1
# cdf2ref$quantile <- 2
# cdf3ref$quantile <- 3
# cdfref <- rbind(cdf1ref[-c(1:10),],cdf2ref[-c(1:10),],cdf3ref[-c(1:10),])
# cdf1new <- data.frame(cbind(iteration,cdfnew[[1]][,1],cdfnew[[2]][,1]))
# cdf2new <- data.frame(cbind(iteration,cdfnew[[1]][,2],cdfnew[[2]][,2]))
# cdf3new <- data.frame(cbind(iteration,cdfnew[[1]][,3],cdfnew[[2]][,3]))
# cdf1new$quantile <- 1
# cdf2new$quantile <- 2
# cdf3new$quantile <- 3
# cdfnew <- rbind(cdf1new[-c(1:10),],cdf2new[-c(1:10),],cdf3new[-c(1:10),])
# plotquantile(cdfref,cdfnew, title= "cdf")
|
#' @title Plot regression models
#' @name plot_model
#'
#' @description
#' \code{plot_model()} creates plots from regression models, either
#' estimates (as so-called forest or dot whisker plots) or marginal effects.
#'
#' @param model A regression model object. Depending on the \code{type}, many
#' kinds of models are supported, e.g. from packages like \pkg{stats},
#' \pkg{lme4}, \pkg{nlme}, \pkg{rstanarm}, \pkg{survey}, \pkg{glmmTMB},
#' \pkg{MASS}, \pkg{brms} etc.
#' @param type Type of plot. There are three groups of plot-types: \cr \cr
#' \emph{Coefficients} (\href{../doc/plot_model_estimates.html}{related vignette})
#' \describe{
#' \item{\code{type = "est"}}{Forest-plot of estimates. If the fitted model
#' only contains one predictor, slope-line is plotted.}
#' \item{\code{type = "re"}}{For mixed effects models, plots the random
#' effects.}
#' \item{\code{type = "std"}}{Forest-plot of standardized beta values.}
#' \item{\code{type = "std2"}}{Forest-plot of standardized beta values,
#' however, standardization is done by dividing by two sd (see 'Details').}
#' }
#' \emph{Marginal Effects} (\href{../doc/plot_marginal_effects.html}{related vignette})
#' \describe{
#' \item{\code{type = "pred"}}{Predicted values (marginal effects) for
#' specific model terms. See \code{\link[ggeffects]{ggpredict}} for details.}
#' \item{\code{type = "eff"}}{Similar to \code{type = "pred"}, however,
#' discrete predictors are held constant at their proportions (not reference
#' level). See \code{\link[ggeffects]{ggeffect}} for details.}
#' \item{\code{type = "int"}}{Marginal effects of interaction terms in
#' \code{model}.}
#' }
#' \emph{Model diagnostics}
#' \describe{
#' \item{\code{type = "slope"}}{Slope of coefficients for each single
#' predictor, against the response (linear relationship between each model
#' term and response).}
#' \item{\code{type = "resid"}}{Slope of coefficients for each single
#' predictor, against the residuals (linear relationship between each model
#' term and residuals).}
#' \item{\code{type = "diag"}}{Check model assumptions.}
#' }
#' \strong{Note:} For mixed models, the diagnostic plots like linear relationship
#' or check for Homoscedasticity, do \strong{not} take the uncertainty of
#' random effects into account, but is only based on the fixed effects part
#' of the model.
#' @param transform A character vector, naming a function that will be applied
#' on estimates and confidence intervals. By default, \code{transform} will
#' automatically use \code{"exp"} as transformation for applicable classes of
#' \code{model} (e.g. logistic or poisson regression). Estimates of linear
#' models remain untransformed. Use \code{NULL} if you want the raw,
#' non-transformed estimates.
#' @param terms Character vector with the names of those terms from \code{model}
#' that should be plotted. This argument depends on the plot-type: \describe{
#' \item{\emph{Coefficients}}{ Select terms that should be plotted. All other
#' term are removed from the output. } \item{\emph{Marginal Effects}}{ Here
#' \code{terms} indicates for which terms marginal effects should be
#' displayed. At least one term is required to calculate effects, maximum
#' length is three terms, where the second and third term indicate the groups,
#' i.e. predictions of first term are grouped by the levels of the second (and
#' third) term. \code{terms} may also indicate higher order terms (e.g.
#' interaction terms). Indicating levels in square brackets allows for
#' selecting only specific groups. Term name and levels in brackets must be
#' separated by a whitespace character, e.g. \code{terms = c("age", "education
#' [1,3]")}. For more details, see \code{\link[ggeffects]{ggpredict}}. } }
#' @param sort.est Determines in which way estimates are sorted in the plot:
#' \itemize{ \item If \code{NULL} (default), no sorting is done and estimates
#' are sorted in the same order as they appear in the model formula. \item If
#' \code{TRUE}, estimates are sorted in descending order, with highedt
#' estimate at the top. \item If \code{sort.est = "sort.all"}, estimates are
#' re-sorted for each coefficient (only applies if \code{type = "re"} and
#' \code{grid = FALSE}), i.e. the estimates of the random effects for each
#' predictor are sorted and plotted to an own plot. \item If \code{type =
#' "re"}, specify a predictor's / coefficient's name to sort estimates
#' according to this random effect. }
#' @param rm.terms Character vector with names that indicate which terms should
#' be removed from the plot. Counterpart to \code{terms}. \code{rm.terms =
#' "t_name"} would remove the term \emph{t_name}. Default is \code{NULL}, i.e.
#' all terms are used. Note that this argument does not apply to
#' \emph{Marginal Effects} plots.
#' @param group.terms Numeric vector with group indices, to group coefficients.
#' Each group of coefficients gets its own color (see 'Examples').
#' @param order.terms Numeric vector, indicating in which order the coefficients
#' should be plotted. See examples in
#' \href{../doc/plot_model_estimates.html}{this package-vignette}.
#' @param pred.type Character, only applies for \emph{Marginal Effects} plots
#' with mixed effects models. Indicates whether predicted values should be
#' conditioned on random effects (\code{pred.type = "re"}) or fixed effects
#' only (\code{pred.type = "fe"}, the default).
#' @param mdrt.values Indicates which values of the moderator variable should be
#' used when plotting interaction terms (i.e. \code{type = "int"}). \describe{
#' \item{\code{"minmax"}}{(default) minimum and maximum values (lower and
#' upper bounds) of the moderator are used to plot the interaction between
#' independent variable and moderator(s).} \item{\code{"meansd"}}{uses the
#' mean value of the moderator as well as one standard deviation below and
#' above mean value to plot the effect of the moderator on the independent
#' variable (following the convention suggested by Cohen and Cohen and
#' popularized by Aiken and West (1991), i.e. using the mean, the value one
#' standard deviation above, and the value one standard deviation below the
#' mean as values of the moderator, see
#' \href{http://www.theanalysisfactor.com/3-tips-interpreting-moderation/}{Grace-Martin
#' K: 3 Tips to Make Interpreting Moderation Effects Easier}).}
#' \item{\code{"zeromax"}}{is similar to the \code{"minmax"} option, however,
#' \code{0} is always used as minimum value for the moderator. This may be
#' useful for predictors that don't have an empirical zero-value, but absence
#' of moderation should be simulated by using 0 as minimum.}
#' \item{\code{"quart"}}{calculates and uses the quartiles (lower, median and
#' upper) of the moderator value.} \item{\code{"all"}}{uses all values of the
#' moderator variable.} }
#' @param ri.nr Numeric vector. If \code{type = "re"} and fitted model has more
#' than one random intercept, \code{ri.nr} indicates which random effects of
#' which random intercept (or: which list elements of
#' \code{\link[lme4]{ranef}}) will be plotted. Default is \code{NULL}, so all
#' random effects will be plotted.
#' @param title Character vector, used as plot title. By default,
#' \code{\link[sjlabelled]{get_dv_labels}} is called to retrieve the label of
#' the dependent variable, which will be used as title. Use \code{title = ""}
#' to remove title.
#' @param axis.title Character vector of length one or two (depending on the
#' plot function and type), used as title(s) for the x and y axis. If not
#' specified, a default labelling is chosen. \strong{Note:} Some plot types
#' may not support this argument sufficiently. In such cases, use the returned
#' ggplot-object and add axis titles manually with
#' \code{\link[ggplot2]{labs}}. Use \code{axis.title = ""} to remove axis
#' titles.
#' @param axis.labels Character vector with labels for the model terms, used as
#' axis labels. By default, \code{\link[sjlabelled]{get_term_labels}} is
#' called to retrieve the labels of the coefficients, which will be used as
#' axis labels. Use \code{axis.labels = ""} or \code{auto.label = FALSE} to
#' use the variable names as labels instead.
#' @param axis.lim Numeric vector of length 2, defining the range of the plot
#' axis. Depending on plot-type, may effect either x- or y-axis. For
#' \emph{Marginal Effects} plots, \code{axis.lim} may also be a list of two
#' vectors of length 2, defining axis limits for both the x and y axis.
#' @param grid.breaks Numeric value or vector; if \code{grid.breaks} is a
#' single value, sets the distance between breaks for the axis at every
#' \code{grid.breaks}'th position, where a major grid line is plotted. If
#' \code{grid.breaks} is a vector, values will be used to define the
#' axis positions of the major grid lines.
#' @param ci.lvl Numeric, the level of the confidence intervals (error bars).
#' Use \code{ci.lvl = NA} to remove error bars. For \code{stanreg}-models,
#' \code{ci.lvl} defines the (outer) probability for the
#' \code{\link[sjstats]{hdi}} (High Density Interval) that is plotted. By
#' default, \code{stanreg}-models are printed with two intervals: the "inner"
#' interval, which defaults to the 50\%-HDI; and the "outer" interval, which
#' defaults to the 89\%-HDI. \code{ci.lvl} affects only the outer interval in
#' such cases. See \code{prob.inner} and \code{prob.outer} under the
#' \code{...}-argument for more details.
#' @param se Either a logical, and if \code{TRUE}, error bars indicate standard
#' errors, not confidence intervals. Or a character vector with a specification
#' of the covariance matrix to compute robust standard errors (see argument
#' \code{vcov} of \code{link[sjstats]{robust}} for valid values; robust standard
#' errors are only supported for models that work with \code{\link[lmtest]{coeftest}}).
#' \code{se} overrides \code{ci.lvl}: if not \code{NULL}, arguments \code{ci.lvl}
#' and \code{transform} will be ignored. Currently, \code{se} only applies
#' to \emph{Coefficients} plots.
#' @param show.intercept Logical, if \code{TRUE}, the intercept of the fitted
#' model is also plotted. Default is \code{FALSE}. If \code{transform =
#' "exp"}, please note that due to exponential transformation of estimates,
#' the intercept in some cases is non-finite and the plot can not be created.
#' @param show.values Logical, whether values should be plotted or not.
#' @param show.p Logical, adds asterisks that indicate the significance level of
#' estimates to the value labels.
#' @param show.data Logical, for \emph{Marginal Effects} plots, also plots the
#' raw data points.
#' @param show.legend For \emph{Marginal Effects} plots, shows or hides the
#' legend.
#' @param value.offset Numeric, offset for text labels to adjust their position
#' relative to the dots or lines.
#' @param dot.size Numeric, size of the dots that indicate the point estimates.
#' @param line.size Numeric, size of the lines that indicate the error bars.
#' @param colors May be a character vector of color values in hex-format, valid
#' color value names (see \code{demo("colors")}) or a name of a pre-defined
#' color palette. Following options are valid for the \code{colors} argument:
#' \itemize{
#' \item If not specified, a default color brewer palette will be used, which is suitable for the plot style.
#' \item If \code{"gs"}, a greyscale will be used.
#' \item If \code{"bw"}, and plot-type is a line-plot, the plot is black/white and uses different line types to distinguish groups (see \href{../doc/blackwhitefigures.html}{this package-vignette}).
#' \item If \code{colors} is any valid color brewer palette name, the related palette will be used. Use \code{\link[RColorBrewer]{display.brewer.all}} to view all available palette names.
#' \item If \pkg{wesanderson} is installed, you may also specify a name of a palette from that package.
#' \item If \pkg{viridis} is installed, use \code{colors = "v"} to get the viridis color palette.
#' \item There are some pre-defined color palettes in this package, see \code{\link{sjplot-themes}} for details.
#' \item Else specify own color values or names as vector (e.g. \code{colors = "#00ff00"} or \code{colors = c("firebrick", "blue")}).
#' }
#' @param grid Logical, if \code{TRUE}, multiple plots are plotted as grid
#' layout.
#' @param wrap.title Numeric, determines how many chars of the plot title are
#' displayed in one line and when a line break is inserted.
#' @param wrap.labels Numeric, determines how many chars of the value, variable
#' or axis labels are displayed in one line and when a line break is inserted.
#' @param case Desired target case. Labels will automatically converted into the
#' specified character case. See \code{\link[snakecase]{to_any_case}} for more
#' details on this argument.
#' @param auto.label Logical, if \code{TRUE} (the default), plot-labels are
#' based on value and variable labels, if the data is labelled. See
#' \code{\link[sjlabelled]{get_label}} and
#' \code{\link[sjlabelled]{get_term_labels}} for details. If \code{FALSE},
#' original variable names and value labels (factor levels) are used.
#' @param digits Numeric, amount of digits after decimal point when rounding
#' estimates or values.
#' @param value.size Numeric, indicates the size of value labels. Can be used
#' for all plot types where the argument \code{show.values} is applicable,
#' e.g. \code{value.size = 4}.
#' @param vline.color Color of the vertical "zero effect" line. Default color is
#' inherited from the current theme.
#' @param bpe For \strong{Stan}-models (fitted with the \pkg{rstanarm}- or
#' \pkg{brms}-package), the Bayesian point estimate is, by default, the median
#' of the posterior distribution. Use \code{bpe} to define other functions to
#' calculate the Bayesion point estimate. \code{bpe} needs to be a character
#' naming the specific function, which is passed to the \code{fun}-argument in
#' \code{\link[sjstats]{typical_value}}. So, \code{bpe = "mean"} would
#' calculate the mean value of the posterior distribution.
#' @param bpe.style For \strong{Stan}-models (fitted with the \pkg{rstanarm}- or
#' \pkg{brms}-package), the Bayesian point estimate is indicated as a small,
#' vertical line by default. Use \code{bpe.style = "dot"} to plot a dot
#' instead of a line for the point estimate.
#' @param ... Other arguments, passed down to various functions. Here is a list
#' of supported arguments and their description in detail.
#' \describe{
#' \item{\code{prob.inner} and \code{prob.outer}}{For \strong{Stan}-models
#' (fitted with the \pkg{rstanarm}- or \pkg{brms}-package) and coefficients
#' plot-types, you can specify numeric values between 0 and 1 for
#' \code{prob.inner} and \code{prob.outer}, which will then be used as inner
#' and outer probabilities for the uncertainty intervals (HDI). By default,
#' the inner probability is 0.5 and the outer probability is 0.89 (unless
#' \code{ci.lvl} is specified - in this case, \code{ci.lvl} is used as outer
#' probability).
#' }
#' \item{\code{size.inner}}{For \strong{Stan}-models and \emph{Coefficients}
#' plot-types, you can specify the width of the bar for the inner
#' probabilities. Default is \code{0.1}.
#' }
#' \item{\code{width}, \code{alpha} and \code{scale}}{Passed down to
#' \code{geom_errorbar()} or \code{geom_density_ridges()}, for forest or
#' diagnostic plots; or passed down to \code{\link[ggeffects]{plot.ggeffects}}
#' for \emph{Marginal Effects} plots.
#' }
#' \item{\code{show.loess}}{Logical, for diagnostic plot-types \code{"slope"}
#' and \code{"resid"}, adds (or hides) a loess-smoothed line to the plot.
#' }
#' \item{\emph{Marginal Effects} plot-types}{When plotting marginal effects,
#' arguments are also passed down to \code{\link[ggeffects]{ggpredict}},
#' \code{\link[ggeffects]{ggeffect}} or \code{\link[ggeffects]{plot.ggeffects}}.
#' }
#' \item{Case conversion of labels}{For case conversion of labels (see argument
#' \code{case}), arguments \code{sep_in} and \code{sep_out} will be passed
#' down to \code{\link[snakecase]{to_any_case}}. This only
#' applies to automatically retrieved term labels, \emph{not} if
#' term labels are provided by the \code{axis.labels}-argument.
#' }
#' }
#'
#' @return
#' Depending on the plot-type, \code{plot_model()} returns a
#' \code{ggplot}-object or a list of such objects. \code{get_model_data}
#' returns the associated data with the plot-object as tidy data frame, or
#' (depending on the plot-type) a list of such data frames.
#'
#' @details
#' \code{get_model_data} simply calls \code{plot_model()} and returns
#' the data from the ggplot-object. Hence, it is rather inefficient and should
#' be used as alternative to \pkg{brooms} \code{tidy()}-function only in
#' specific situations. \cr \cr Some notes on the different plot-types:
#' \describe{
#' \item{\code{type = "std2"}}{Plots standardized beta values,
#' however, standardization follows Gelman's (2008) suggestion, rescaling the
#' estimates by dividing them by two standard deviations instead of just one.
#' Resulting coefficients are then directly comparable for untransformed
#' binary predictors. This standardization uses the
#' \code{\link[arm]{standardize}}-function from the \pkg{arm}-package.
#' }
#' \item{\code{type = "pred"}}{Plots marginal effects. Simply wraps
#' \code{\link[ggeffects]{ggpredict}}.
#' }
#' \item{\code{type = "eff"}}{Plots marginal effects. Simply wraps
#' \code{\link[ggeffects]{ggeffect}}.
#' }
#' \item{\code{type = "int"}}{A shortcut for marginal effects plots, where
#' interaction terms are automatically detected and used as
#' \code{terms}-argument. Furthermore, if the moderator variable (the second
#' - and third - term in an interaction) is continuous, \code{type = "int"}
#' automatically chooses useful values based on the \code{mdrt.values}-argument,
#' which are passed to \code{terms}. Then, \code{\link[ggeffects]{ggpredict}}
#' is called. \code{type = "int"} plots the interaction term that appears
#' first in the formula along the x-axis, while the second (and possibly
#' third) variable in an interaction is used as grouping factor(s)
#' (moderating variable). Use \code{type = "pred"} or \code{type = "eff"}
#' and specify a certain order in the \code{terms}-argument to indicate
#' which variable(s) should be used as moderator.}
#' }
#'
#' @note
#' \code{plot_model()} replaces the functions \code{sjp.lm},
#' \code{sjp.glm}, \code{sjp.lmer}, \code{sjp.glmer} and \code{sjp.int}. These
#' are becoming softly deprecated and will be removed in a future update.
#'
#' @references
#' Gelman A (2008) "Scaling regression inputs by dividing by two
#' standard deviations." \emph{Statistics in Medicine 27: 2865–2873.}
#' \url{http://www.stat.columbia.edu/~gelman/research/published/standardizing7.pdf}
#' \cr \cr
#' Aiken and West (1991). Multiple Regression: Testing and Interpreting Interactions.
#'
#' @examples
#' # prepare data
#' library(sjmisc)
#' data(efc)
#' efc <- to_factor(efc, c161sex, e42dep, c172code)
#' m <- lm(neg_c_7 ~ pos_v_4 + c12hour + e42dep + c172code, data = efc)
#'
#' # simple forest plot
#' plot_model(m)
#'
#' # grouped coefficients
#' plot_model(m, group.terms = c(1, 2, 3, 3, 3, 4, 4))
#'
#' # multiple plots, as returned from "diagnostic"-plot type,
#' # can be arranged with 'plot_grid()'
#' \dontrun{
#' p <- plot_model(m, type = "diag")
#' plot_grid(p)}
#'
#' # plot random effects
#' library(lme4)
#' m <- lmer(Reaction ~ Days + (Days | Subject), sleepstudy)
#' plot_model(m, type = "re")
#'
#' # plot marginal effects
#' plot_model(m, type = "eff", terms = "Days")
#'
#' # plot interactions
#' \dontrun{
#' m <- glm(
#' tot_sc_e ~ c161sex + c172code * neg_c_7,
#' data = efc,
#' family = poisson()
#' )
#' # type = "int" automatically selects groups for continuous moderator
#' # variables - see argument 'mdrt.values'. The following function call is
#' # identical to:
#' # plot_model(m, type = "pred", terms = c("c172code", "neg_c_7 [7,28]"))
#' plot_model(m, type = "int")
#'
#' # switch moderator
#' plot_model(m, type = "pred", terms = c("neg_c_7", "c172code"))
#' # same as
#' # ggeffects::ggpredict(m, terms = c("neg_c_7", "c172code"))}
#'
#' # plot Stan-model
#' \dontrun{
#' if (require("rstanarm")) {
#' data(mtcars)
#' m <- stan_glm(mpg ~ wt + am + cyl + gear, data = mtcars, chains = 1)
#' plot_model(m, bpe.style = "dot")
#' }}
#'
#' @importFrom sjstats pred_vars std_beta p_value
#' @importFrom sjmisc word_wrap str_contains
#' @importFrom sjlabelled get_dv_labels get_term_labels
#' @importFrom dplyr if_else n_distinct
#' @importFrom graphics plot
#' @importFrom ggeffects ggpredict ggeffect
#' @importFrom stats terms
#' @importFrom tibble add_column
#'
#' @export
plot_model <- function(model,
type = c("est", "re", "eff", "pred", "int", "std", "std2", "slope", "resid", "diag"),
transform,
terms = NULL,
sort.est = NULL,
rm.terms = NULL,
group.terms = NULL,
order.terms = NULL,
pred.type = c("fe", "re"),
mdrt.values = c("minmax", "meansd", "zeromax", "quart", "all"),
ri.nr = NULL,
title = NULL,
axis.title = NULL,
axis.labels = NULL,
wrap.title = 50,
wrap.labels = 25,
axis.lim = NULL,
grid.breaks = NULL,
ci.lvl = NULL,
se = NULL,
colors = "Set1",
show.intercept = FALSE,
show.values = FALSE,
show.p = TRUE,
show.data = FALSE,
show.legend = TRUE,
value.offset = NULL,
value.size,
digits = 2,
dot.size = NULL,
line.size = NULL,
vline.color = NULL,
grid,
case = "parsed",
auto.label = TRUE,
bpe = "median",
bpe.style = "line",
...
) {
type <- match.arg(type)
pred.type <- match.arg(pred.type)
mdrt.values <- match.arg(mdrt.values)
# check se-argument
se <- check_se_argument(se = se, type = type)
# get info on model family
fam.info <- get_glm_family(model)
# check whether estimates should be transformed or not
if (missing(transform)) {
if (fam.info$is_linear)
transform <- NULL
else
transform <- "exp"
}
# get titles and labels for axis ----
# this is not appropriate when plotting random effects,
# so retrieve labels only for other plot types
if (type %in% c("est", "std", "std2") && isTRUE(auto.label)) {
# get labels of dependent variables, and wrap them if too long
if (is.null(title)) title <- sjlabelled::get_dv_labels(model, case = case, ...)
title <- sjmisc::word_wrap(title, wrap = wrap.title)
# labels for axis with term names
if (is.null(axis.labels)) axis.labels <- sjlabelled::get_term_labels(model, case = case, ...)
axis.labels <- sjmisc::word_wrap(axis.labels, wrap = wrap.labels)
# title for axis with estimate values
if (is.null(axis.title)) axis.title <- sjmisc::word_wrap(get_estimate_axis_title(model, axis.title, type, transform), wrap = wrap.title)
axis.title <- sjmisc::word_wrap(axis.title, wrap = wrap.labels)
}
# check nr of terms. if only one, plot slope
if (type == "est" && length(sjstats::pred_vars(model)) == 1) type <- "slope"
# set some default options for stan-models, which are not
# available or appropriate for these
if (is.stan(model)) {
# no p-values
show.p <- FALSE
# no standardized coefficients
if (type %in% c("std", "std2", "slope")) type <- "est"
}
# set defaults for arguments, depending on model ----
if (is.null(ci.lvl)) ci.lvl <- dplyr::if_else(is.stan(model), .89, .95)
if (is.null(dot.size)) dot.size <- dplyr::if_else(is.stan(model), 1, 2.5)
if (is.null(line.size)) line.size <- dplyr::if_else(is.stan(model), .5, .5)
if (is.null(value.offset)) value.offset <- dplyr::if_else(is.stan(model), .25, .15)
# check if plot-type is applicable
if (type == "slope" && !fam.info$is_linear) {
type <- "est"
message("Plot-type \"slope\" only available for linear models. Using `type = \"est\"` now.")
}
if (type %in% c("est", "std", "std2") || (is.stan(model) && type == "re")) {
# plot estimates ----
p <- plot_type_est(
type = type,
ci.lvl = ci.lvl,
se = se,
tf = transform,
model = model,
terms = terms,
group.terms = group.terms,
rm.terms = rm.terms,
sort.est = sort.est,
title = title,
axis.title = axis.title,
axis.labels = axis.labels,
axis.lim = axis.lim,
grid.breaks = grid.breaks,
show.intercept = show.intercept,
show.values = show.values,
show.p = show.p,
value.offset = value.offset,
digits = digits,
geom.colors = colors,
geom.size = dot.size,
line.size = line.size,
order.terms = order.terms,
vline.color = vline.color,
value.size = value.size,
bpe = bpe,
bpe.style = bpe.style,
facets = grid,
...
)
} else if (type == "re") {
# plot random effects ----
p <- plot_type_ranef(
model = model,
ri.nr = ri.nr,
ci.lvl = ci.lvl,
se = se,
tf = transform,
sort.est = sort.est,
title = title,
axis.labels = axis.labels,
axis.lim = axis.lim,
grid.breaks = grid.breaks,
show.values = show.values,
value.offset = value.offset,
digits = digits,
facets = grid,
geom.colors = colors,
geom.size = dot.size,
line.size = line.size,
vline.color = vline.color,
value.size = value.size,
...
)
} else if (type %in% c("pred", "eff")) {
# plot marginal effects ----
p <- plot_type_eff(
type = type,
model = model,
terms = terms,
ci.lvl = ci.lvl,
pred.type = pred.type,
facets = grid,
show.data = show.data,
geom.colors = colors,
axis.title = axis.title,
title = title,
axis.lim = axis.lim,
case = case,
show.legend = show.legend,
...
)
} else if (type == "int") {
# plot interaction terms ----
p <- plot_type_int(
model = model,
mdrt.values = mdrt.values,
ci.lvl = ci.lvl,
pred.type = pred.type,
facets = grid,
show.data = show.data,
geom.colors = colors,
axis.title = axis.title,
title = title,
axis.lim = axis.lim,
case = case,
show.legend = show.legend,
...
)
} else if (type %in% c("slope", "resid")) {
# plot slopes of estimates ----
p <- plot_type_slope(
model = model,
terms = terms,
rm.terms = rm.terms,
ci.lvl = ci.lvl,
colors = colors,
title = title,
show.data = show.data,
facets = grid,
axis.title = axis.title,
case = case,
useResiduals = type == "resid",
...
)
} else if (type == "diag") {
# plot diagnostic plots ----
if (is.stan(model)) {
p <- plot_diag_stan(
model = model,
geom.colors = colors,
facets = grid,
...
)
} else if (fam.info$is_linear) {
p <- plot_diag_linear(
model = model,
geom.colors = colors,
dot.size = dot.size,
...
)
} else {
p <- plot_diag_glm(
model = model,
geom.colors = colors,
dot.size = dot.size,
...
)
}
}
p
}
#' @importFrom purrr map
#' @rdname plot_model
#' @export
get_model_data <- function(model,
type = c("est", "re", "eff", "pred", "int", "std", "std2", "slope", "resid", "diag"),
transform,
terms = NULL,
sort.est = NULL,
rm.terms = NULL,
group.terms = NULL,
order.terms = NULL,
pred.type = c("fe", "re"),
ri.nr = NULL,
ci.lvl = NULL,
colors = "Set1",
grid,
case = "parsed",
digits = 2,
...) {
p <- plot_model(
model = model,
type = type,
transform = transform,
terms = terms,
sort.est = sort.est,
rm.terms = rm.terms,
group.terms = group.terms,
order.terms = order.terms,
pred.type = pred.type,
ri.nr = ri.nr,
ci.lvl = ci.lvl,
colors = colors,
grid = grid,
case = case,
digits = digits,
auto.label = FALSE,
...
)
if (inherits(p, "list"))
purrr::map(p, ~ .x$data)
else
p$data
}
| /R/plot_model.R | no_license | borghenry/sjPlot | R | false | false | 29,874 | r | #' @title Plot regression models
#' @name plot_model
#'
#' @description
#' \code{plot_model()} creates plots from regression models, either
#' estimates (as so-called forest or dot whisker plots) or marginal effects.
#'
#' @param model A regression model object. Depending on the \code{type}, many
#' kinds of models are supported, e.g. from packages like \pkg{stats},
#' \pkg{lme4}, \pkg{nlme}, \pkg{rstanarm}, \pkg{survey}, \pkg{glmmTMB},
#' \pkg{MASS}, \pkg{brms} etc.
#' @param type Type of plot. There are three groups of plot-types: \cr \cr
#' \emph{Coefficients} (\href{../doc/plot_model_estimates.html}{related vignette})
#' \describe{
#' \item{\code{type = "est"}}{Forest-plot of estimates. If the fitted model
#' only contains one predictor, slope-line is plotted.}
#' \item{\code{type = "re"}}{For mixed effects models, plots the random
#' effects.}
#' \item{\code{type = "std"}}{Forest-plot of standardized beta values.}
#' \item{\code{type = "std2"}}{Forest-plot of standardized beta values,
#' however, standardization is done by dividing by two sd (see 'Details').}
#' }
#' \emph{Marginal Effects} (\href{../doc/plot_marginal_effects.html}{related vignette})
#' \describe{
#' \item{\code{type = "pred"}}{Predicted values (marginal effects) for
#' specific model terms. See \code{\link[ggeffects]{ggpredict}} for details.}
#' \item{\code{type = "eff"}}{Similar to \code{type = "pred"}, however,
#' discrete predictors are held constant at their proportions (not reference
#' level). See \code{\link[ggeffects]{ggeffect}} for details.}
#' \item{\code{type = "int"}}{Marginal effects of interaction terms in
#' \code{model}.}
#' }
#' \emph{Model diagnostics}
#' \describe{
#' \item{\code{type = "slope"}}{Slope of coefficients for each single
#' predictor, against the response (linear relationship between each model
#' term and response).}
#' \item{\code{type = "resid"}}{Slope of coefficients for each single
#' predictor, against the residuals (linear relationship between each model
#' term and residuals).}
#' \item{\code{type = "diag"}}{Check model assumptions.}
#' }
#' \strong{Note:} For mixed models, the diagnostic plots like linear relationship
#' or check for Homoscedasticity, do \strong{not} take the uncertainty of
#' random effects into account, but is only based on the fixed effects part
#' of the model.
#' @param transform A character vector, naming a function that will be applied
#' on estimates and confidence intervals. By default, \code{transform} will
#' automatically use \code{"exp"} as transformation for applicable classes of
#' \code{model} (e.g. logistic or poisson regression). Estimates of linear
#' models remain untransformed. Use \code{NULL} if you want the raw,
#' non-transformed estimates.
#' @param terms Character vector with the names of those terms from \code{model}
#' that should be plotted. This argument depends on the plot-type: \describe{
#' \item{\emph{Coefficients}}{ Select terms that should be plotted. All other
#' term are removed from the output. } \item{\emph{Marginal Effects}}{ Here
#' \code{terms} indicates for which terms marginal effects should be
#' displayed. At least one term is required to calculate effects, maximum
#' length is three terms, where the second and third term indicate the groups,
#' i.e. predictions of first term are grouped by the levels of the second (and
#' third) term. \code{terms} may also indicate higher order terms (e.g.
#' interaction terms). Indicating levels in square brackets allows for
#' selecting only specific groups. Term name and levels in brackets must be
#' separated by a whitespace character, e.g. \code{terms = c("age", "education
#' [1,3]")}. For more details, see \code{\link[ggeffects]{ggpredict}}. } }
#' @param sort.est Determines in which way estimates are sorted in the plot:
#' \itemize{ \item If \code{NULL} (default), no sorting is done and estimates
#' are sorted in the same order as they appear in the model formula. \item If
#' \code{TRUE}, estimates are sorted in descending order, with highedt
#' estimate at the top. \item If \code{sort.est = "sort.all"}, estimates are
#' re-sorted for each coefficient (only applies if \code{type = "re"} and
#' \code{grid = FALSE}), i.e. the estimates of the random effects for each
#' predictor are sorted and plotted to an own plot. \item If \code{type =
#' "re"}, specify a predictor's / coefficient's name to sort estimates
#' according to this random effect. }
#' @param rm.terms Character vector with names that indicate which terms should
#' be removed from the plot. Counterpart to \code{terms}. \code{rm.terms =
#' "t_name"} would remove the term \emph{t_name}. Default is \code{NULL}, i.e.
#' all terms are used. Note that this argument does not apply to
#' \emph{Marginal Effects} plots.
#' @param group.terms Numeric vector with group indices, to group coefficients.
#' Each group of coefficients gets its own color (see 'Examples').
#' @param order.terms Numeric vector, indicating in which order the coefficients
#' should be plotted. See examples in
#' \href{../doc/plot_model_estimates.html}{this package-vignette}.
#' @param pred.type Character, only applies for \emph{Marginal Effects} plots
#' with mixed effects models. Indicates whether predicted values should be
#' conditioned on random effects (\code{pred.type = "re"}) or fixed effects
#' only (\code{pred.type = "fe"}, the default).
#' @param mdrt.values Indicates which values of the moderator variable should be
#' used when plotting interaction terms (i.e. \code{type = "int"}). \describe{
#' \item{\code{"minmax"}}{(default) minimum and maximum values (lower and
#' upper bounds) of the moderator are used to plot the interaction between
#' independent variable and moderator(s).} \item{\code{"meansd"}}{uses the
#' mean value of the moderator as well as one standard deviation below and
#' above mean value to plot the effect of the moderator on the independent
#' variable (following the convention suggested by Cohen and Cohen and
#' popularized by Aiken and West (1991), i.e. using the mean, the value one
#' standard deviation above, and the value one standard deviation below the
#' mean as values of the moderator, see
#' \href{http://www.theanalysisfactor.com/3-tips-interpreting-moderation/}{Grace-Martin
#' K: 3 Tips to Make Interpreting Moderation Effects Easier}).}
#' \item{\code{"zeromax"}}{is similar to the \code{"minmax"} option, however,
#' \code{0} is always used as minimum value for the moderator. This may be
#' useful for predictors that don't have an empirical zero-value, but absence
#' of moderation should be simulated by using 0 as minimum.}
#' \item{\code{"quart"}}{calculates and uses the quartiles (lower, median and
#' upper) of the moderator value.} \item{\code{"all"}}{uses all values of the
#' moderator variable.} }
#' @param ri.nr Numeric vector. If \code{type = "re"} and fitted model has more
#' than one random intercept, \code{ri.nr} indicates which random effects of
#' which random intercept (or: which list elements of
#' \code{\link[lme4]{ranef}}) will be plotted. Default is \code{NULL}, so all
#' random effects will be plotted.
#' @param title Character vector, used as plot title. By default,
#' \code{\link[sjlabelled]{get_dv_labels}} is called to retrieve the label of
#' the dependent variable, which will be used as title. Use \code{title = ""}
#' to remove title.
#' @param axis.title Character vector of length one or two (depending on the
#' plot function and type), used as title(s) for the x and y axis. If not
#' specified, a default labelling is chosen. \strong{Note:} Some plot types
#' may not support this argument sufficiently. In such cases, use the returned
#' ggplot-object and add axis titles manually with
#' \code{\link[ggplot2]{labs}}. Use \code{axis.title = ""} to remove axis
#' titles.
#' @param axis.labels Character vector with labels for the model terms, used as
#' axis labels. By default, \code{\link[sjlabelled]{get_term_labels}} is
#' called to retrieve the labels of the coefficients, which will be used as
#' axis labels. Use \code{axis.labels = ""} or \code{auto.label = FALSE} to
#' use the variable names as labels instead.
#' @param axis.lim Numeric vector of length 2, defining the range of the plot
#' axis. Depending on plot-type, may effect either x- or y-axis. For
#' \emph{Marginal Effects} plots, \code{axis.lim} may also be a list of two
#' vectors of length 2, defining axis limits for both the x and y axis.
#' @param grid.breaks Numeric value or vector; if \code{grid.breaks} is a
#' single value, sets the distance between breaks for the axis at every
#' \code{grid.breaks}'th position, where a major grid line is plotted. If
#' \code{grid.breaks} is a vector, values will be used to define the
#' axis positions of the major grid lines.
#' @param ci.lvl Numeric, the level of the confidence intervals (error bars).
#' Use \code{ci.lvl = NA} to remove error bars. For \code{stanreg}-models,
#' \code{ci.lvl} defines the (outer) probability for the
#' \code{\link[sjstats]{hdi}} (High Density Interval) that is plotted. By
#' default, \code{stanreg}-models are printed with two intervals: the "inner"
#' interval, which defaults to the 50\%-HDI; and the "outer" interval, which
#' defaults to the 89\%-HDI. \code{ci.lvl} affects only the outer interval in
#' such cases. See \code{prob.inner} and \code{prob.outer} under the
#' \code{...}-argument for more details.
#' @param se Either a logical, and if \code{TRUE}, error bars indicate standard
#' errors, not confidence intervals. Or a character vector with a specification
#' of the covariance matrix to compute robust standard errors (see argument
#' \code{vcov} of \code{link[sjstats]{robust}} for valid values; robust standard
#' errors are only supported for models that work with \code{\link[lmtest]{coeftest}}).
#' \code{se} overrides \code{ci.lvl}: if not \code{NULL}, arguments \code{ci.lvl}
#' and \code{transform} will be ignored. Currently, \code{se} only applies
#' to \emph{Coefficients} plots.
#' @param show.intercept Logical, if \code{TRUE}, the intercept of the fitted
#' model is also plotted. Default is \code{FALSE}. If \code{transform =
#' "exp"}, please note that due to exponential transformation of estimates,
#' the intercept in some cases is non-finite and the plot can not be created.
#' @param show.values Logical, whether values should be plotted or not.
#' @param show.p Logical, adds asterisks that indicate the significance level of
#' estimates to the value labels.
#' @param show.data Logical, for \emph{Marginal Effects} plots, also plots the
#' raw data points.
#' @param show.legend For \emph{Marginal Effects} plots, shows or hides the
#' legend.
#' @param value.offset Numeric, offset for text labels to adjust their position
#' relative to the dots or lines.
#' @param dot.size Numeric, size of the dots that indicate the point estimates.
#' @param line.size Numeric, size of the lines that indicate the error bars.
#' @param colors May be a character vector of color values in hex-format, valid
#' color value names (see \code{demo("colors")}) or a name of a pre-defined
#' color palette. Following options are valid for the \code{colors} argument:
#' \itemize{
#' \item If not specified, a default color brewer palette will be used, which is suitable for the plot style.
#' \item If \code{"gs"}, a greyscale will be used.
#' \item If \code{"bw"}, and plot-type is a line-plot, the plot is black/white and uses different line types to distinguish groups (see \href{../doc/blackwhitefigures.html}{this package-vignette}).
#' \item If \code{colors} is any valid color brewer palette name, the related palette will be used. Use \code{\link[RColorBrewer]{display.brewer.all}} to view all available palette names.
#' \item If \pkg{wesanderson} is installed, you may also specify a name of a palette from that package.
#' \item If \pkg{viridis} is installed, use \code{colors = "v"} to get the viridis color palette.
#' \item There are some pre-defined color palettes in this package, see \code{\link{sjplot-themes}} for details.
#' \item Else specify own color values or names as vector (e.g. \code{colors = "#00ff00"} or \code{colors = c("firebrick", "blue")}).
#' }
#' @param grid Logical, if \code{TRUE}, multiple plots are plotted as grid
#' layout.
#' @param wrap.title Numeric, determines how many chars of the plot title are
#' displayed in one line and when a line break is inserted.
#' @param wrap.labels Numeric, determines how many chars of the value, variable
#' or axis labels are displayed in one line and when a line break is inserted.
#' @param case Desired target case. Labels will automatically converted into the
#' specified character case. See \code{\link[snakecase]{to_any_case}} for more
#' details on this argument.
#' @param auto.label Logical, if \code{TRUE} (the default), plot-labels are
#' based on value and variable labels, if the data is labelled. See
#' \code{\link[sjlabelled]{get_label}} and
#' \code{\link[sjlabelled]{get_term_labels}} for details. If \code{FALSE},
#' original variable names and value labels (factor levels) are used.
#' @param digits Numeric, amount of digits after decimal point when rounding
#' estimates or values.
#' @param value.size Numeric, indicates the size of value labels. Can be used
#' for all plot types where the argument \code{show.values} is applicable,
#' e.g. \code{value.size = 4}.
#' @param vline.color Color of the vertical "zero effect" line. Default color is
#' inherited from the current theme.
#' @param bpe For \strong{Stan}-models (fitted with the \pkg{rstanarm}- or
#' \pkg{brms}-package), the Bayesian point estimate is, by default, the median
#' of the posterior distribution. Use \code{bpe} to define other functions to
#' calculate the Bayesion point estimate. \code{bpe} needs to be a character
#' naming the specific function, which is passed to the \code{fun}-argument in
#' \code{\link[sjstats]{typical_value}}. So, \code{bpe = "mean"} would
#' calculate the mean value of the posterior distribution.
#' @param bpe.style For \strong{Stan}-models (fitted with the \pkg{rstanarm}- or
#' \pkg{brms}-package), the Bayesian point estimate is indicated as a small,
#' vertical line by default. Use \code{bpe.style = "dot"} to plot a dot
#' instead of a line for the point estimate.
#' @param ... Other arguments, passed down to various functions. Here is a list
#' of supported arguments and their description in detail.
#' \describe{
#' \item{\code{prob.inner} and \code{prob.outer}}{For \strong{Stan}-models
#' (fitted with the \pkg{rstanarm}- or \pkg{brms}-package) and coefficients
#' plot-types, you can specify numeric values between 0 and 1 for
#' \code{prob.inner} and \code{prob.outer}, which will then be used as inner
#' and outer probabilities for the uncertainty intervals (HDI). By default,
#' the inner probability is 0.5 and the outer probability is 0.89 (unless
#' \code{ci.lvl} is specified - in this case, \code{ci.lvl} is used as outer
#' probability).
#' }
#' \item{\code{size.inner}}{For \strong{Stan}-models and \emph{Coefficients}
#' plot-types, you can specify the width of the bar for the inner
#' probabilities. Default is \code{0.1}.
#' }
#' \item{\code{width}, \code{alpha} and \code{scale}}{Passed down to
#' \code{geom_errorbar()} or \code{geom_density_ridges()}, for forest or
#' diagnostic plots; or passed down to \code{\link[ggeffects]{plot.ggeffects}}
#' for \emph{Marginal Effects} plots.
#' }
#' \item{\code{show.loess}}{Logical, for diagnostic plot-types \code{"slope"}
#' and \code{"resid"}, adds (or hides) a loess-smoothed line to the plot.
#' }
#' \item{\emph{Marginal Effects} plot-types}{When plotting marginal effects,
#' arguments are also passed down to \code{\link[ggeffects]{ggpredict}},
#' \code{\link[ggeffects]{ggeffect}} or \code{\link[ggeffects]{plot.ggeffects}}.
#' }
#' \item{Case conversion of labels}{For case conversion of labels (see argument
#' \code{case}), arguments \code{sep_in} and \code{sep_out} will be passed
#' down to \code{\link[snakecase]{to_any_case}}. This only
#' applies to automatically retrieved term labels, \emph{not} if
#' term labels are provided by the \code{axis.labels}-argument.
#' }
#' }
#'
#' @return
#' Depending on the plot-type, \code{plot_model()} returns a
#' \code{ggplot}-object or a list of such objects. \code{get_model_data}
#' returns the associated data with the plot-object as tidy data frame, or
#' (depending on the plot-type) a list of such data frames.
#'
#' @details
#' \code{get_model_data} simply calls \code{plot_model()} and returns
#' the data from the ggplot-object. Hence, it is rather inefficient and should
#' be used as alternative to \pkg{brooms} \code{tidy()}-function only in
#' specific situations. \cr \cr Some notes on the different plot-types:
#' \describe{
#' \item{\code{type = "std2"}}{Plots standardized beta values,
#' however, standardization follows Gelman's (2008) suggestion, rescaling the
#' estimates by dividing them by two standard deviations instead of just one.
#' Resulting coefficients are then directly comparable for untransformed
#' binary predictors. This standardization uses the
#' \code{\link[arm]{standardize}}-function from the \pkg{arm}-package.
#' }
#' \item{\code{type = "pred"}}{Plots marginal effects. Simply wraps
#' \code{\link[ggeffects]{ggpredict}}.
#' }
#' \item{\code{type = "eff"}}{Plots marginal effects. Simply wraps
#' \code{\link[ggeffects]{ggeffect}}.
#' }
#' \item{\code{type = "int"}}{A shortcut for marginal effects plots, where
#' interaction terms are automatically detected and used as
#' \code{terms}-argument. Furthermore, if the moderator variable (the second
#' - and third - term in an interaction) is continuous, \code{type = "int"}
#' automatically chooses useful values based on the \code{mdrt.values}-argument,
#' which are passed to \code{terms}. Then, \code{\link[ggeffects]{ggpredict}}
#' is called. \code{type = "int"} plots the interaction term that appears
#' first in the formula along the x-axis, while the second (and possibly
#' third) variable in an interaction is used as grouping factor(s)
#' (moderating variable). Use \code{type = "pred"} or \code{type = "eff"}
#' and specify a certain order in the \code{terms}-argument to indicate
#' which variable(s) should be used as moderator.}
#' }
#'
#' @note
#' \code{plot_model()} replaces the functions \code{sjp.lm},
#' \code{sjp.glm}, \code{sjp.lmer}, \code{sjp.glmer} and \code{sjp.int}. These
#' are becoming softly deprecated and will be removed in a future update.
#'
#' @references
#' Gelman A (2008) "Scaling regression inputs by dividing by two
#' standard deviations." \emph{Statistics in Medicine 27: 2865–2873.}
#' \url{http://www.stat.columbia.edu/~gelman/research/published/standardizing7.pdf}
#' \cr \cr
#' Aiken and West (1991). Multiple Regression: Testing and Interpreting Interactions.
#'
#' @examples
#' # prepare data
#' library(sjmisc)
#' data(efc)
#' efc <- to_factor(efc, c161sex, e42dep, c172code)
#' m <- lm(neg_c_7 ~ pos_v_4 + c12hour + e42dep + c172code, data = efc)
#'
#' # simple forest plot
#' plot_model(m)
#'
#' # grouped coefficients
#' plot_model(m, group.terms = c(1, 2, 3, 3, 3, 4, 4))
#'
#' # multiple plots, as returned from "diagnostic"-plot type,
#' # can be arranged with 'plot_grid()'
#' \dontrun{
#' p <- plot_model(m, type = "diag")
#' plot_grid(p)}
#'
#' # plot random effects
#' library(lme4)
#' m <- lmer(Reaction ~ Days + (Days | Subject), sleepstudy)
#' plot_model(m, type = "re")
#'
#' # plot marginal effects
#' plot_model(m, type = "eff", terms = "Days")
#'
#' # plot interactions
#' \dontrun{
#' m <- glm(
#' tot_sc_e ~ c161sex + c172code * neg_c_7,
#' data = efc,
#' family = poisson()
#' )
#' # type = "int" automatically selects groups for continuous moderator
#' # variables - see argument 'mdrt.values'. The following function call is
#' # identical to:
#' # plot_model(m, type = "pred", terms = c("c172code", "neg_c_7 [7,28]"))
#' plot_model(m, type = "int")
#'
#' # switch moderator
#' plot_model(m, type = "pred", terms = c("neg_c_7", "c172code"))
#' # same as
#' # ggeffects::ggpredict(m, terms = c("neg_c_7", "c172code"))}
#'
#' # plot Stan-model
#' \dontrun{
#' if (require("rstanarm")) {
#' data(mtcars)
#' m <- stan_glm(mpg ~ wt + am + cyl + gear, data = mtcars, chains = 1)
#' plot_model(m, bpe.style = "dot")
#' }}
#'
#' @importFrom sjstats pred_vars std_beta p_value
#' @importFrom sjmisc word_wrap str_contains
#' @importFrom sjlabelled get_dv_labels get_term_labels
#' @importFrom dplyr if_else n_distinct
#' @importFrom graphics plot
#' @importFrom ggeffects ggpredict ggeffect
#' @importFrom stats terms
#' @importFrom tibble add_column
#'
#' @export
plot_model <- function(model,
type = c("est", "re", "eff", "pred", "int", "std", "std2", "slope", "resid", "diag"),
transform,
terms = NULL,
sort.est = NULL,
rm.terms = NULL,
group.terms = NULL,
order.terms = NULL,
pred.type = c("fe", "re"),
mdrt.values = c("minmax", "meansd", "zeromax", "quart", "all"),
ri.nr = NULL,
title = NULL,
axis.title = NULL,
axis.labels = NULL,
wrap.title = 50,
wrap.labels = 25,
axis.lim = NULL,
grid.breaks = NULL,
ci.lvl = NULL,
se = NULL,
colors = "Set1",
show.intercept = FALSE,
show.values = FALSE,
show.p = TRUE,
show.data = FALSE,
show.legend = TRUE,
value.offset = NULL,
value.size,
digits = 2,
dot.size = NULL,
line.size = NULL,
vline.color = NULL,
grid,
case = "parsed",
auto.label = TRUE,
bpe = "median",
bpe.style = "line",
...
) {
type <- match.arg(type)
pred.type <- match.arg(pred.type)
mdrt.values <- match.arg(mdrt.values)
# check se-argument
se <- check_se_argument(se = se, type = type)
# get info on model family
fam.info <- get_glm_family(model)
# check whether estimates should be transformed or not
if (missing(transform)) {
if (fam.info$is_linear)
transform <- NULL
else
transform <- "exp"
}
# get titles and labels for axis ----
# this is not appropriate when plotting random effects,
# so retrieve labels only for other plot types
if (type %in% c("est", "std", "std2") && isTRUE(auto.label)) {
# get labels of dependent variables, and wrap them if too long
if (is.null(title)) title <- sjlabelled::get_dv_labels(model, case = case, ...)
title <- sjmisc::word_wrap(title, wrap = wrap.title)
# labels for axis with term names
if (is.null(axis.labels)) axis.labels <- sjlabelled::get_term_labels(model, case = case, ...)
axis.labels <- sjmisc::word_wrap(axis.labels, wrap = wrap.labels)
# title for axis with estimate values
if (is.null(axis.title)) axis.title <- sjmisc::word_wrap(get_estimate_axis_title(model, axis.title, type, transform), wrap = wrap.title)
axis.title <- sjmisc::word_wrap(axis.title, wrap = wrap.labels)
}
# check nr of terms. if only one, plot slope
if (type == "est" && length(sjstats::pred_vars(model)) == 1) type <- "slope"
# set some default options for stan-models, which are not
# available or appropriate for these
if (is.stan(model)) {
# no p-values
show.p <- FALSE
# no standardized coefficients
if (type %in% c("std", "std2", "slope")) type <- "est"
}
# set defaults for arguments, depending on model ----
if (is.null(ci.lvl)) ci.lvl <- dplyr::if_else(is.stan(model), .89, .95)
if (is.null(dot.size)) dot.size <- dplyr::if_else(is.stan(model), 1, 2.5)
if (is.null(line.size)) line.size <- dplyr::if_else(is.stan(model), .5, .5)
if (is.null(value.offset)) value.offset <- dplyr::if_else(is.stan(model), .25, .15)
# check if plot-type is applicable
if (type == "slope" && !fam.info$is_linear) {
type <- "est"
message("Plot-type \"slope\" only available for linear models. Using `type = \"est\"` now.")
}
if (type %in% c("est", "std", "std2") || (is.stan(model) && type == "re")) {
# plot estimates ----
p <- plot_type_est(
type = type,
ci.lvl = ci.lvl,
se = se,
tf = transform,
model = model,
terms = terms,
group.terms = group.terms,
rm.terms = rm.terms,
sort.est = sort.est,
title = title,
axis.title = axis.title,
axis.labels = axis.labels,
axis.lim = axis.lim,
grid.breaks = grid.breaks,
show.intercept = show.intercept,
show.values = show.values,
show.p = show.p,
value.offset = value.offset,
digits = digits,
geom.colors = colors,
geom.size = dot.size,
line.size = line.size,
order.terms = order.terms,
vline.color = vline.color,
value.size = value.size,
bpe = bpe,
bpe.style = bpe.style,
facets = grid,
...
)
} else if (type == "re") {
# plot random effects ----
p <- plot_type_ranef(
model = model,
ri.nr = ri.nr,
ci.lvl = ci.lvl,
se = se,
tf = transform,
sort.est = sort.est,
title = title,
axis.labels = axis.labels,
axis.lim = axis.lim,
grid.breaks = grid.breaks,
show.values = show.values,
value.offset = value.offset,
digits = digits,
facets = grid,
geom.colors = colors,
geom.size = dot.size,
line.size = line.size,
vline.color = vline.color,
value.size = value.size,
...
)
} else if (type %in% c("pred", "eff")) {
# plot marginal effects ----
p <- plot_type_eff(
type = type,
model = model,
terms = terms,
ci.lvl = ci.lvl,
pred.type = pred.type,
facets = grid,
show.data = show.data,
geom.colors = colors,
axis.title = axis.title,
title = title,
axis.lim = axis.lim,
case = case,
show.legend = show.legend,
...
)
} else if (type == "int") {
# plot interaction terms ----
p <- plot_type_int(
model = model,
mdrt.values = mdrt.values,
ci.lvl = ci.lvl,
pred.type = pred.type,
facets = grid,
show.data = show.data,
geom.colors = colors,
axis.title = axis.title,
title = title,
axis.lim = axis.lim,
case = case,
show.legend = show.legend,
...
)
} else if (type %in% c("slope", "resid")) {
# plot slopes of estimates ----
p <- plot_type_slope(
model = model,
terms = terms,
rm.terms = rm.terms,
ci.lvl = ci.lvl,
colors = colors,
title = title,
show.data = show.data,
facets = grid,
axis.title = axis.title,
case = case,
useResiduals = type == "resid",
...
)
} else if (type == "diag") {
# plot diagnostic plots ----
if (is.stan(model)) {
p <- plot_diag_stan(
model = model,
geom.colors = colors,
facets = grid,
...
)
} else if (fam.info$is_linear) {
p <- plot_diag_linear(
model = model,
geom.colors = colors,
dot.size = dot.size,
...
)
} else {
p <- plot_diag_glm(
model = model,
geom.colors = colors,
dot.size = dot.size,
...
)
}
}
p
}
#' @importFrom purrr map
#' @rdname plot_model
#' @export
get_model_data <- function(model,
type = c("est", "re", "eff", "pred", "int", "std", "std2", "slope", "resid", "diag"),
transform,
terms = NULL,
sort.est = NULL,
rm.terms = NULL,
group.terms = NULL,
order.terms = NULL,
pred.type = c("fe", "re"),
ri.nr = NULL,
ci.lvl = NULL,
colors = "Set1",
grid,
case = "parsed",
digits = 2,
...) {
p <- plot_model(
model = model,
type = type,
transform = transform,
terms = terms,
sort.est = sort.est,
rm.terms = rm.terms,
group.terms = group.terms,
order.terms = order.terms,
pred.type = pred.type,
ri.nr = ri.nr,
ci.lvl = ci.lvl,
colors = colors,
grid = grid,
case = case,
digits = digits,
auto.label = FALSE,
...
)
if (inherits(p, "list"))
purrr::map(p, ~ .x$data)
else
p$data
}
|
#' compile collated dataset function
#'
#' this function puts compiles the collated dataset from records
#' @param dataset_name_prefix a character string to prefix the datasets with, no default
#' @param dir_names a vector of directory names to import records from. default is NULL, in which case records from all directories present in path are imported
#' @param skip_dirs a vector of directory names to skip importing from. default is NULL, in which case no directory is skipped
#' @param save logical, if TRUE the datasets are saved to disk, defaults to TRUE
#' @export
## * content
## ** declare
compile.collated.dataset <- function(
dataset_name_prefix,
dir_names = NULL,
skip_dirs = NULL,
save = TRUE
)
{
## ** create vector of directories to find records in
path <- .session_variables$data_copy_path
if (is.null(dir_names)) {
dirs <- list.dirs(path,
recursive = TRUE,
full.names = TRUE)
} else {
dirs <- unlist(lapply(dir_names, function(n) paste0(path, n)))
}
if (!is.null(skip_dirs)) dirs <- dirs[-grep(paste0(skip_dirs, collapse = "|"), dirs)]
## ** compile dataset
dataset <- rbind.fill(lapply(dirs, compile.centre.dataset, save = save))
## ** save dataset to disk
if (save) {
dataset_path <- paste0(.session_variables$data_path, "/datasets/", dataset_name_prefix, "-dataset-collated/")
save.dataset(dataset, dataset_path)
}
## ** return dataset
return(dataset)
## * end
}
| /R/compile.collated.dataset.r | no_license | tracits/beehive | R | false | false | 1,699 | r | #' compile collated dataset function
#'
#' this function puts compiles the collated dataset from records
#' @param dataset_name_prefix a character string to prefix the datasets with, no default
#' @param dir_names a vector of directory names to import records from. default is NULL, in which case records from all directories present in path are imported
#' @param skip_dirs a vector of directory names to skip importing from. default is NULL, in which case no directory is skipped
#' @param save logical, if TRUE the datasets are saved to disk, defaults to TRUE
#' @export
## * content
## ** declare
compile.collated.dataset <- function(
dataset_name_prefix,
dir_names = NULL,
skip_dirs = NULL,
save = TRUE
)
{
## ** create vector of directories to find records in
path <- .session_variables$data_copy_path
if (is.null(dir_names)) {
dirs <- list.dirs(path,
recursive = TRUE,
full.names = TRUE)
} else {
dirs <- unlist(lapply(dir_names, function(n) paste0(path, n)))
}
if (!is.null(skip_dirs)) dirs <- dirs[-grep(paste0(skip_dirs, collapse = "|"), dirs)]
## ** compile dataset
dataset <- rbind.fill(lapply(dirs, compile.centre.dataset, save = save))
## ** save dataset to disk
if (save) {
dataset_path <- paste0(.session_variables$data_path, "/datasets/", dataset_name_prefix, "-dataset-collated/")
save.dataset(dataset, dataset_path)
}
## ** return dataset
return(dataset)
## * end
}
|
#PROCESS Turner C7 FILES FROM R/V WALTON SMITH & ADD LATITUDE AND LONGITUDE
#Step 1: Import and process R/V Walton Smith Turner C7 "*DAT" files
setwd("C:/Users/kelly.robinson/Dropbox/Cowen_Sponaugle/OSTRICH/PhysData/vids/Turner C7")
library(data.table)
options(digits = 5)
##Loop through .DAT files in a directory and apply multiple functions
files <- list.files(full.names=T, recursive=FALSE)
d <- files[-grep("String", files, fixed=T)]
for(i in 1:length(d)) {
#read file in
chl.data <- read.table(d[i], skip = 2, blank.lines.skip = TRUE, sep = "\t")
#assign column headers
colnames(chl.data) <- c("comp_date","comp_time","instr_date","instru_time","Chl-a_raw_fluor","Chl-a_Calib_fluor","CDOM_raw_fluor",
"CDOM_calib_fluor","turbidity_raw_fluor","turbidity_calib_fluor","crude_Oil_raw_fluor","crude_oil_Calib_fluor", "phycoer_raw_fluor","phycoer_calib_fluor",
"phycocy_raw_fluor","phycocy_calib_fluor","depth","temp_C","space")
#Remove "instru_date", "instru_time", "space" fields
chl.data$instr_date <- NULL
chl.data$instru_time <- NULL
chl.data$space <- NULL
#convert "comp_date" in from a string to a date format
chl.data$bDate <- as.Date(chl.data$comp_date, "%m/%d/%Y")
##If you need to replace the "-" with "/" in the dates
#data$Date <- as.character(data$Date)
#create new field in the df "Date_Time" by joining "Date", "Time" , sep=' ')
chl.data$Date_Time <- NA
chl.data$Date_Time <- paste(chl.data$bDate, chl.data$comp_time, sep=' ')
#remove the "date" and "time" fields so information is not duplicated in output
chl.data$comp_date <- NULL
chl.data$comp_time <- NULL
chl.data$bDate <- NULL
turnerC7 <- chl.data[c("Date_Time","depth","temp_C","Chl_a_raw_fluor","Chl_a_Calib_fluor","CDOM_raw_fluor","CDOM_calib_fluor",
"turbidity_raw_fluor","turbidity_calib_fluor","crude_Oil_raw_fluor","crude_oil_Calib_fluor", "phycoer_raw_fluor","phycoer_calib_fluor",
"phycocy_raw_fluor","phycocy_calib_fluor")]
#write new text file
suppressWarnings(dir.create("TurnerC7 processed")) #remove YMDHMS to create directory for high-resolution data
write.table(turnerC7, paste0("TurnerC7 processed/", substr(paste0(basename(d[i])),1,11),"_TurnerC7", ".txt"), row.names=FALSE, sep="\t")
}
#STEP 2: merge TurnerC7 processed files into a single file
setwd("C:/Users/kelly.robinson/Dropbox/Cowen_Sponaugle/OSTRICH/PhysData/vids/Turner C7/TurnerC7 processed")
library(plyr)
options("digits" = 5)
##Get a List of Files in working directory
t_files <- list.files()
##Merge the LatLon_hms files into a Single Dataframe
for (file in t_files){
# if the merged dataset doesn't exist, create it
#use 'skip' function to skip the first row and get the headers from the second row if need be
if (!exists("turnerC7.dataset")){
turnerC7.dataset <- do.call("rbind",lapply(t_files, FUN=function(files){fread(files, header=TRUE, sep="\t")}))
}
print(file)
}
df.turnerC7 <- as.data.frame(turnerC7.dataset)
#format(x = df.turnerC7$crude_Oil_raw_fluo, digits = 5, format = "f")
#format(x = df.turnerC7$crude_Oil_calib_fluo, digits = 5, format = "f")
colnames(df.turnerC7) <- c("Date_Time","depth","temp_C","Chl_a_raw_fluor","Chl_a_Calib_fluor","CDOM_raw_fluor","CDOM_calib_fluor",
"turbidity_raw_fluor","turbidity_calib_fluor","crude_Oil_raw_fluor","crude_oil_Calib_fluor", "phycoer_raw_fluor","phycoer_calib_fluor",
"phycocy_raw_fluor","phycocy_calib_fluor")
df.turnerC7$AggDate_Time <- as.POSIXct(df.turnerC7$Date_Time, format = "%Y-%m-%d %H:%M:%S" )
#Second-averaged fluorescence data (YMDHMS)
y <- ddply(df.turnerC7, c("AggDate_Time"), summarise, mean(depth), mean(temp_C), mean(Chl_a_raw_fluor), mean(Chl_a_Calib_fluor), mean(CDOM_raw_fluor),
mean(CDOM_calib_fluor), mean(turbidity_raw_fluor), mean(turbidity_calib_fluor), mean(crude_Oil_raw_fluor), mean(crude_oil_Calib_fluor),
mean(phycoer_raw_fluor), mean(phycoer_calib_fluor), mean(phycocy_raw_fluor), mean(phycocy_calib_fluor))
y.na <- na.omit(y) #Remove rows with NAs
#Assign column names to summarized data
colnames(y.na) <- c("Date_Time","AvgDepth","AvgTemp_C","AvgChl-a_raw_fluor","AvgChl-a_Calib_fluor","AvgCDOM_raw_fluor","AvgCDOM_calib_fluor",
"Avgturbidity_raw_fluor","Avgturbidity_calib_fluor","Avgcrude_Oil_raw_fluor","Avgcrude_oil_Calib_fluor", "Avgphycoer_raw_fluor",
"Avgphycoer_calib_fluor","Avgphycocy_raw_fluor","Avgphycocy_calib_fluor")
#write new text file
suppressWarnings(dir.create("YMDHMS")) #remove YMDHMS to create directory for high-resolution data
write.table(y.na, "YMDHMS/turnerC7_20140527_20140615_YMDHMS.txt", row.names=FALSE, sep="\t")
| /process Turner C7 files.R | no_license | Planktos/OSTRICH_rvws | R | false | false | 4,771 | r | #PROCESS Turner C7 FILES FROM R/V WALTON SMITH & ADD LATITUDE AND LONGITUDE
#Step 1: Import and process R/V Walton Smith Turner C7 "*DAT" files
setwd("C:/Users/kelly.robinson/Dropbox/Cowen_Sponaugle/OSTRICH/PhysData/vids/Turner C7")
library(data.table)
options(digits = 5)
##Loop through .DAT files in a directory and apply multiple functions
files <- list.files(full.names=T, recursive=FALSE)
d <- files[-grep("String", files, fixed=T)]
for(i in 1:length(d)) {
#read file in
chl.data <- read.table(d[i], skip = 2, blank.lines.skip = TRUE, sep = "\t")
#assign column headers
colnames(chl.data) <- c("comp_date","comp_time","instr_date","instru_time","Chl-a_raw_fluor","Chl-a_Calib_fluor","CDOM_raw_fluor",
"CDOM_calib_fluor","turbidity_raw_fluor","turbidity_calib_fluor","crude_Oil_raw_fluor","crude_oil_Calib_fluor", "phycoer_raw_fluor","phycoer_calib_fluor",
"phycocy_raw_fluor","phycocy_calib_fluor","depth","temp_C","space")
#Remove "instru_date", "instru_time", "space" fields
chl.data$instr_date <- NULL
chl.data$instru_time <- NULL
chl.data$space <- NULL
#convert "comp_date" in from a string to a date format
chl.data$bDate <- as.Date(chl.data$comp_date, "%m/%d/%Y")
##If you need to replace the "-" with "/" in the dates
#data$Date <- as.character(data$Date)
#create new field in the df "Date_Time" by joining "Date", "Time" , sep=' ')
chl.data$Date_Time <- NA
chl.data$Date_Time <- paste(chl.data$bDate, chl.data$comp_time, sep=' ')
#remove the "date" and "time" fields so information is not duplicated in output
chl.data$comp_date <- NULL
chl.data$comp_time <- NULL
chl.data$bDate <- NULL
turnerC7 <- chl.data[c("Date_Time","depth","temp_C","Chl_a_raw_fluor","Chl_a_Calib_fluor","CDOM_raw_fluor","CDOM_calib_fluor",
"turbidity_raw_fluor","turbidity_calib_fluor","crude_Oil_raw_fluor","crude_oil_Calib_fluor", "phycoer_raw_fluor","phycoer_calib_fluor",
"phycocy_raw_fluor","phycocy_calib_fluor")]
#write new text file
suppressWarnings(dir.create("TurnerC7 processed")) #remove YMDHMS to create directory for high-resolution data
write.table(turnerC7, paste0("TurnerC7 processed/", substr(paste0(basename(d[i])),1,11),"_TurnerC7", ".txt"), row.names=FALSE, sep="\t")
}
#STEP 2: merge TurnerC7 processed files into a single file
setwd("C:/Users/kelly.robinson/Dropbox/Cowen_Sponaugle/OSTRICH/PhysData/vids/Turner C7/TurnerC7 processed")
library(plyr)
options("digits" = 5)
##Get a List of Files in working directory
t_files <- list.files()
##Merge the LatLon_hms files into a Single Dataframe
for (file in t_files){
# if the merged dataset doesn't exist, create it
#use 'skip' function to skip the first row and get the headers from the second row if need be
if (!exists("turnerC7.dataset")){
turnerC7.dataset <- do.call("rbind",lapply(t_files, FUN=function(files){fread(files, header=TRUE, sep="\t")}))
}
print(file)
}
df.turnerC7 <- as.data.frame(turnerC7.dataset)
#format(x = df.turnerC7$crude_Oil_raw_fluo, digits = 5, format = "f")
#format(x = df.turnerC7$crude_Oil_calib_fluo, digits = 5, format = "f")
colnames(df.turnerC7) <- c("Date_Time","depth","temp_C","Chl_a_raw_fluor","Chl_a_Calib_fluor","CDOM_raw_fluor","CDOM_calib_fluor",
"turbidity_raw_fluor","turbidity_calib_fluor","crude_Oil_raw_fluor","crude_oil_Calib_fluor", "phycoer_raw_fluor","phycoer_calib_fluor",
"phycocy_raw_fluor","phycocy_calib_fluor")
df.turnerC7$AggDate_Time <- as.POSIXct(df.turnerC7$Date_Time, format = "%Y-%m-%d %H:%M:%S" )
#Second-averaged fluorescence data (YMDHMS)
y <- ddply(df.turnerC7, c("AggDate_Time"), summarise, mean(depth), mean(temp_C), mean(Chl_a_raw_fluor), mean(Chl_a_Calib_fluor), mean(CDOM_raw_fluor),
mean(CDOM_calib_fluor), mean(turbidity_raw_fluor), mean(turbidity_calib_fluor), mean(crude_Oil_raw_fluor), mean(crude_oil_Calib_fluor),
mean(phycoer_raw_fluor), mean(phycoer_calib_fluor), mean(phycocy_raw_fluor), mean(phycocy_calib_fluor))
y.na <- na.omit(y) #Remove rows with NAs
#Assign column names to summarized data
colnames(y.na) <- c("Date_Time","AvgDepth","AvgTemp_C","AvgChl-a_raw_fluor","AvgChl-a_Calib_fluor","AvgCDOM_raw_fluor","AvgCDOM_calib_fluor",
"Avgturbidity_raw_fluor","Avgturbidity_calib_fluor","Avgcrude_Oil_raw_fluor","Avgcrude_oil_Calib_fluor", "Avgphycoer_raw_fluor",
"Avgphycoer_calib_fluor","Avgphycocy_raw_fluor","Avgphycocy_calib_fluor")
#write new text file
suppressWarnings(dir.create("YMDHMS")) #remove YMDHMS to create directory for high-resolution data
write.table(y.na, "YMDHMS/turnerC7_20140527_20140615_YMDHMS.txt", row.names=FALSE, sep="\t")
|
library(shiny)
library(DT)
fluidPage(titlePanel("DaKaMon Importer"),
tabsetPanel(
tabPanel("FoI generation",
sidebarLayout(
sidebarPanel(
fileInput("csvFileFoI", "Select a FoI file for upload."),
checkboxInput("headerFoI", "Header", TRUE),
fluidRow(column(
6,
textInput(
"sepFoI",
"Column separator:",
value = ";",
width = "80%"
)
),
column(
6,
textInput(
"decFoI",
"Decimal separator:",
value = ".",
width = "80%"
)
)),
textInput("exclRowFoI", "Exclude rows:"),
textInput("exclColFoI", "Exclude columns:"),
actionButton("goFoI", "store in DB"),
width = 2
),
mainPanel(dataTableOutput('tableFoI'))
)),
tabPanel("Data upload",
sidebarLayout(
sidebarPanel(
fileInput("csvFileData", "Select a data file for upload."),
checkboxInput("headerData", "Header", TRUE),
fluidRow(column(
6,
textInput(
"sepData",
"Column separator:",
value = ";",
width = "80%"
)
),
column(
6,
textInput(
"decData",
"Decimal separator:",
value = ".",
width = "80%"
)
)),
textInput("exclRowData", "Exclude rows:"),
textInput("exclColData", "Exclude columns:"),
actionButton("goData", "store in DB"),
width = 2
),
mainPanel(dataTableOutput('tableData'))
))
)) | /DaKaMon/import/ui.R | no_license | BenGraeler/demos | R | false | false | 2,708 | r | library(shiny)
library(DT)
fluidPage(titlePanel("DaKaMon Importer"),
tabsetPanel(
tabPanel("FoI generation",
sidebarLayout(
sidebarPanel(
fileInput("csvFileFoI", "Select a FoI file for upload."),
checkboxInput("headerFoI", "Header", TRUE),
fluidRow(column(
6,
textInput(
"sepFoI",
"Column separator:",
value = ";",
width = "80%"
)
),
column(
6,
textInput(
"decFoI",
"Decimal separator:",
value = ".",
width = "80%"
)
)),
textInput("exclRowFoI", "Exclude rows:"),
textInput("exclColFoI", "Exclude columns:"),
actionButton("goFoI", "store in DB"),
width = 2
),
mainPanel(dataTableOutput('tableFoI'))
)),
tabPanel("Data upload",
sidebarLayout(
sidebarPanel(
fileInput("csvFileData", "Select a data file for upload."),
checkboxInput("headerData", "Header", TRUE),
fluidRow(column(
6,
textInput(
"sepData",
"Column separator:",
value = ";",
width = "80%"
)
),
column(
6,
textInput(
"decData",
"Decimal separator:",
value = ".",
width = "80%"
)
)),
textInput("exclRowData", "Exclude rows:"),
textInput("exclColData", "Exclude columns:"),
actionButton("goData", "store in DB"),
width = 2
),
mainPanel(dataTableOutput('tableData'))
))
)) |
library(ggplot2)
setwd("~/Documents/git/cocolab/determiners/experiments/2-choose-utterance/Submiterator-master")
d = read.table("choose-utterance-trials.tsv",sep="\t",header=T)
head(d)
s = read.table("choose-utterance-subject_information.tsv",sep="\t",header=T)
head(s)
d$language = s$language[match(d$workerid,s$workerid)]
summary(d)
d$scene <- as.character(d$scene)
ggplot(d, aes(x=scene,fill=response))+
geom_histogram(position=position_dodge())+
#facet_wrap(~verb) +
ylab("count\n")+
xlab("\nscene")+
theme_bw()
ggsave("../results/utterance_hist.pdf",height=3.5)
## load in comprehension data
c = read.table("~/Documents/git/cocolab/determiners/experiments/1-infer-scene/Submiterator-master/infer-scene-trials.tsv",sep="\t",header=T)
head(c)
cs = read.table("~/Documents/git/cocolab/determiners/experiments/1-infer-scene/Submiterator-master/infer-scene-subject_information.tsv",sep="\t",header=T)
head(cs)
c$language = cs$language[match(c$workerid,cs$workerid)]
summary(c)
c$choice <- as.character(c$choice)
head(c)
## merge comprehension and production
head(d)
d$verb = "is"
d[nchar(as.character(d$response))==7,]$verb = "not"
d[nchar(as.character(d$response))==5,]$verb = "not"
d$determiner = "A"
d[nchar(as.character(d$response))>5,]$determiner = "The"
d$expt = "production"
head(c)
c$expt = "comprehension"
c$response = paste(c$determiner,"_",c$verb,sep="")
c$scene = c$choice
d$response = paste(d$determiner,"_",d$verb,sep="")
d$choice = d$scene
d = rbind(d,c)
head(d)
# remove 2121 2122 2222 scens from comprehension
d <- subset(d, scene != "2121")
d <- subset(d, scene != "2122")
d <- subset(d, scene != "2222")
ggplot(d, aes(x=scene,fill=response))+
geom_histogram(position=position_dodge())+
facet_grid(expt~.,scales="free_y") +
ylab("count\n")+
xlab("\nscene")+
theme_bw()
ggsave("../results/prod_comp_hist.pdf")
| /experiments/2-choose-utterance/results/analysis.R | no_license | gscontras/determiners | R | false | false | 1,863 | r | library(ggplot2)
setwd("~/Documents/git/cocolab/determiners/experiments/2-choose-utterance/Submiterator-master")
d = read.table("choose-utterance-trials.tsv",sep="\t",header=T)
head(d)
s = read.table("choose-utterance-subject_information.tsv",sep="\t",header=T)
head(s)
d$language = s$language[match(d$workerid,s$workerid)]
summary(d)
d$scene <- as.character(d$scene)
ggplot(d, aes(x=scene,fill=response))+
geom_histogram(position=position_dodge())+
#facet_wrap(~verb) +
ylab("count\n")+
xlab("\nscene")+
theme_bw()
ggsave("../results/utterance_hist.pdf",height=3.5)
## load in comprehension data
c = read.table("~/Documents/git/cocolab/determiners/experiments/1-infer-scene/Submiterator-master/infer-scene-trials.tsv",sep="\t",header=T)
head(c)
cs = read.table("~/Documents/git/cocolab/determiners/experiments/1-infer-scene/Submiterator-master/infer-scene-subject_information.tsv",sep="\t",header=T)
head(cs)
c$language = cs$language[match(c$workerid,cs$workerid)]
summary(c)
c$choice <- as.character(c$choice)
head(c)
## merge comprehension and production
head(d)
d$verb = "is"
d[nchar(as.character(d$response))==7,]$verb = "not"
d[nchar(as.character(d$response))==5,]$verb = "not"
d$determiner = "A"
d[nchar(as.character(d$response))>5,]$determiner = "The"
d$expt = "production"
head(c)
c$expt = "comprehension"
c$response = paste(c$determiner,"_",c$verb,sep="")
c$scene = c$choice
d$response = paste(d$determiner,"_",d$verb,sep="")
d$choice = d$scene
d = rbind(d,c)
head(d)
# remove 2121 2122 2222 scens from comprehension
d <- subset(d, scene != "2121")
d <- subset(d, scene != "2122")
d <- subset(d, scene != "2222")
ggplot(d, aes(x=scene,fill=response))+
geom_histogram(position=position_dodge())+
facet_grid(expt~.,scales="free_y") +
ylab("count\n")+
xlab("\nscene")+
theme_bw()
ggsave("../results/prod_comp_hist.pdf")
|
##Masculine Coded Items and Frequency Table and bargraph #########
Masc_Coded_OS<-c(SERLIcat$'2_HammerOS', SERLIcat$'2_ShovelOS', SERLIcat$'2_SawOS',SERLIcat$'2_RifleOS', SERLIcat$'2_StethOS', SERLIcat$'2_BoxingGloveOS', SERLIcat$'2_FireHatOS', SERLIcat$'2_BadgeOS', SERLIcat$'2_CarOS', SERLIcat$'2_BatOS')
Masc_Coded_Freqtable<-table(Masc_Coded_OS)
height<-Masc_Coded_Freqtable
barnames<-c("Both, G", "Both, B", "Girls", "Boys", "Both")
barplot(height, names.arg=barnames, ylim=c(0,1500), main= "Responses to Masculine Coded Items")
##Feminine Coded Items and Frequency Table and bargraph ########
Fem_Coded_OS<-c(SERLIcat$'1_DeskOS', SERLIcat$'1_PitcherOS', SERLIcat$'2_StoveOS', SERLIcat$'1_BroomOS', SERLIcat$'1_AppleOS', SERLIcat$'1_DishwasherOS', SERLIcat$'2_BabyBottleOS', SERLIcat$'1_HairbrushOS', SERLIcat$'1_IronOS', SERLIcat$'1_NeedleOS')
Fem_Coded_Freqtable<-table(Fem_Coded_OS)
height<-Fem_Coded_Freqtable
barnames<-c("Both, G", "Both, B", "Girls", "Boys", "Both")
barplot(height, names.arg=barnames, ylim=c(0,1500), main= "Responses to Feminine Coded Items")
##All OS Items and Frequency Table and Bargraph #####
Total_OS<-c(SERLIcat$'1_DeskOS', SERLIcat$'1_PitcherOS', SERLIcat$'2_StoveOS', SERLIcat$'1_BroomOS', SERLIcat$'1_AppleOS', SERLIcat$'1_DishwasherOS', SERLIcat$'2_BabyBottleOS', SERLIcat$'1_HairbrushOS', SERLIcat$'1_IronOS', SERLIcat$'1_NeedleOS', SERLIcat$'2_HammerOS', SERLIcat$'2_ShovelOS', SERLIcat$'2_SawOS',SERLIcat$'2_RifleOS', SERLIcat$'2_StethOS', SERLIcat$'2_BoxingGloveOS', SERLIcat$'2_FireHatOS', SERLIcat$'2_BadgeOS', SERLIcat$'2_CarOS', SERLIcat$'2_BatOS')
Total_Freqtable<-table(Total_OS)
height<-Total_Freqtable
barnames<-c("Both, G", "Both, B", "Girls", "Boys", "Both")
barplot(height, names.arg=barnames, ylim=c(0,1500), main= "Responses to All Items")
| /OSItemsbycode_frequency.R | no_license | jtorflint/SERLI_Katz | R | false | false | 1,829 | r |
##Masculine Coded Items and Frequency Table and bargraph #########
Masc_Coded_OS<-c(SERLIcat$'2_HammerOS', SERLIcat$'2_ShovelOS', SERLIcat$'2_SawOS',SERLIcat$'2_RifleOS', SERLIcat$'2_StethOS', SERLIcat$'2_BoxingGloveOS', SERLIcat$'2_FireHatOS', SERLIcat$'2_BadgeOS', SERLIcat$'2_CarOS', SERLIcat$'2_BatOS')
Masc_Coded_Freqtable<-table(Masc_Coded_OS)
height<-Masc_Coded_Freqtable
barnames<-c("Both, G", "Both, B", "Girls", "Boys", "Both")
barplot(height, names.arg=barnames, ylim=c(0,1500), main= "Responses to Masculine Coded Items")
##Feminine Coded Items and Frequency Table and bargraph ########
Fem_Coded_OS<-c(SERLIcat$'1_DeskOS', SERLIcat$'1_PitcherOS', SERLIcat$'2_StoveOS', SERLIcat$'1_BroomOS', SERLIcat$'1_AppleOS', SERLIcat$'1_DishwasherOS', SERLIcat$'2_BabyBottleOS', SERLIcat$'1_HairbrushOS', SERLIcat$'1_IronOS', SERLIcat$'1_NeedleOS')
Fem_Coded_Freqtable<-table(Fem_Coded_OS)
height<-Fem_Coded_Freqtable
barnames<-c("Both, G", "Both, B", "Girls", "Boys", "Both")
barplot(height, names.arg=barnames, ylim=c(0,1500), main= "Responses to Feminine Coded Items")
##All OS Items and Frequency Table and Bargraph #####
Total_OS<-c(SERLIcat$'1_DeskOS', SERLIcat$'1_PitcherOS', SERLIcat$'2_StoveOS', SERLIcat$'1_BroomOS', SERLIcat$'1_AppleOS', SERLIcat$'1_DishwasherOS', SERLIcat$'2_BabyBottleOS', SERLIcat$'1_HairbrushOS', SERLIcat$'1_IronOS', SERLIcat$'1_NeedleOS', SERLIcat$'2_HammerOS', SERLIcat$'2_ShovelOS', SERLIcat$'2_SawOS',SERLIcat$'2_RifleOS', SERLIcat$'2_StethOS', SERLIcat$'2_BoxingGloveOS', SERLIcat$'2_FireHatOS', SERLIcat$'2_BadgeOS', SERLIcat$'2_CarOS', SERLIcat$'2_BatOS')
Total_Freqtable<-table(Total_OS)
height<-Total_Freqtable
barnames<-c("Both, G", "Both, B", "Girls", "Boys", "Both")
barplot(height, names.arg=barnames, ylim=c(0,1500), main= "Responses to All Items")
|
/Rcode_Kernel SVM.R | no_license | sawantmayur47/1-Alert | R | false | false | 1,688 | r | ||
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/init.R
\name{initAFFMeanCD}
\alias{initAFFMeanCD}
\title{Initialisation of AFF change detector}
\usage{
initAFFMeanCD(alpha = 0.01, eta = 0.01, BL = 50)
}
\arguments{
\item{alpha}{The value of the significance level.
Default value is \code{0.01}, although it is
recommended that the user set this parameter.}
\item{eta}{The value of the step-size in the gradient descent.
Default is \code{eta=0.01}.}
\item{BL}{The length of the burn-in region. Default value is \code{BL=50}.
Must be at least greater than or equal to \code{2}. No maximum.
However, there is an exception: \code{BL=0} also works, but in
this case the user needs to specify the \code{streamEstMean} and
\code{streamEstSigma}; see}
}
\description{
This function makes it simple to initalise an FFF object.
}
\examples{
library(Rcpp)
affmeancd1 <- initAFFMeanCD() # initialises with alpha=0.01,
# eta=0.01 and BL=50
affmeancd2 <- initAFFMeanCD(alpha=0.005, eta=0.1, BL=100)
affmeancd3 <- initAFFMeanCD(alpha=0.005, eta=0.1, BL=0) # Example 3
affmeancd3$streamEstMean <- 0
affmeancd3$streamEstSigma <- 1
}
| /ffstream/man/initAFFMeanCD.Rd | no_license | akhikolla/InformationHouse | R | false | true | 1,220 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/init.R
\name{initAFFMeanCD}
\alias{initAFFMeanCD}
\title{Initialisation of AFF change detector}
\usage{
initAFFMeanCD(alpha = 0.01, eta = 0.01, BL = 50)
}
\arguments{
\item{alpha}{The value of the significance level.
Default value is \code{0.01}, although it is
recommended that the user set this parameter.}
\item{eta}{The value of the step-size in the gradient descent.
Default is \code{eta=0.01}.}
\item{BL}{The length of the burn-in region. Default value is \code{BL=50}.
Must be at least greater than or equal to \code{2}. No maximum.
However, there is an exception: \code{BL=0} also works, but in
this case the user needs to specify the \code{streamEstMean} and
\code{streamEstSigma}; see}
}
\description{
This function makes it simple to initalise an FFF object.
}
\examples{
library(Rcpp)
affmeancd1 <- initAFFMeanCD() # initialises with alpha=0.01,
# eta=0.01 and BL=50
affmeancd2 <- initAFFMeanCD(alpha=0.005, eta=0.1, BL=100)
affmeancd3 <- initAFFMeanCD(alpha=0.005, eta=0.1, BL=0) # Example 3
affmeancd3$streamEstMean <- 0
affmeancd3$streamEstSigma <- 1
}
|
### function to get reasons from a retraction xml file
get_reasons <- function(fn){
require(xml2)
# read xml
data = read_xml(fn, as_html = T)
# find all div nodes
nodes<-xml_find_all(data, ".//div")
# filter out the nodes where the attribute class="rReason"
nodes<-nodes[which(xml_attr(nodes, "class") == "rReason")]
# get values (as character strings)
reasons = xml_text(nodes)
reasons = sub(pattern = "\\+", replacement = "", x = reasons)
return(reasons)
}
### function to get the number of retracted papers
get_number_of_retractions <- function(fn){
require(xml2)
# read xml
data = read_xml(fn, as_html = T)
# find all div nodes
nodes<-xml_find_all(data, ".//tr")
# filter out the nodes where the attribute class="rReason"
nodes<-nodes[which(xml_attr(nodes, "class") == "mainrow")]
# get total number of entries
n_entries <- length(nodes)
return(n_entries)
}
### files and labels
retraction_files = list.files(path = "data/", pattern = "*.xml", full.names = T)
retraction_labels = gsub(x = basename(retraction_files), pattern = "_[0-9]+_entries.xml$|.xml$", replacement = "")
### read retraction files
n_retractions = sapply(retraction_files, get_number_of_retractions)
names(n_retractions) = retraction_labels
reasons = lapply(retraction_files, get_reasons)
names(reasons) = retraction_labels
### add overall reasons
overall_field_labels = retraction_labels[! retraction_labels %in% c("nature_science_cell_all_time", "jhu_affiliation_all_time") ]
overall_n_retraction = sum(n_retractions[overall_field_labels])
overall_reasons = unlist(reasons[overall_field_labels])
retraction_labels[length(retraction_labels) + 1] = "overall"
n_retractions["overall"] = overall_n_retraction
reasons[["overall"]] = overall_reasons
### get top reasons in each field
n_top_reasons = 10
top_reasons = lapply(reasons, function(rs){
counts = table(rs)
counts = counts[order(counts, decreasing = T)]
counts =counts[seq_len(n_top_reasons)]
return(counts)
})
### plot
library(ggplot2)
plt_fn = "results/top_reasons_for_retractions.pdf"
pdf(plt_fn, width = 7, height = 4)
for(lbl in retraction_labels){
tr = top_reasons[[lbl]]
plt_df = data.frame(reason = factor(names(tr), levels = names(tr)[order(tr)]),
count = as.numeric(tr),
count_label = sprintf("%s/%s", as.numeric(tr), n_retractions[lbl]),
stringsAsFactors = F)
g <- ggplot(data=plt_df, aes(x=reason, y=count)) +
geom_bar(stat="identity", fill="steelblue") +
geom_text(aes(label=count_label), hjust=1.1, color="white", size=3.5)+
theme_bw() +
xlab("") +
ylab("Number of retracted articles") +
ggtitle(sprintf("Top reasons: %s", lbl)) +
coord_flip()
print(g)
}
dev.off()
| /retraction_reasons.R | permissive | alorchhota/retraction | R | false | false | 2,770 | r | ### function to get reasons from a retraction xml file
get_reasons <- function(fn){
require(xml2)
# read xml
data = read_xml(fn, as_html = T)
# find all div nodes
nodes<-xml_find_all(data, ".//div")
# filter out the nodes where the attribute class="rReason"
nodes<-nodes[which(xml_attr(nodes, "class") == "rReason")]
# get values (as character strings)
reasons = xml_text(nodes)
reasons = sub(pattern = "\\+", replacement = "", x = reasons)
return(reasons)
}
### function to get the number of retracted papers
get_number_of_retractions <- function(fn){
require(xml2)
# read xml
data = read_xml(fn, as_html = T)
# find all div nodes
nodes<-xml_find_all(data, ".//tr")
# filter out the nodes where the attribute class="rReason"
nodes<-nodes[which(xml_attr(nodes, "class") == "mainrow")]
# get total number of entries
n_entries <- length(nodes)
return(n_entries)
}
### files and labels
retraction_files = list.files(path = "data/", pattern = "*.xml", full.names = T)
retraction_labels = gsub(x = basename(retraction_files), pattern = "_[0-9]+_entries.xml$|.xml$", replacement = "")
### read retraction files
n_retractions = sapply(retraction_files, get_number_of_retractions)
names(n_retractions) = retraction_labels
reasons = lapply(retraction_files, get_reasons)
names(reasons) = retraction_labels
### add overall reasons
overall_field_labels = retraction_labels[! retraction_labels %in% c("nature_science_cell_all_time", "jhu_affiliation_all_time") ]
overall_n_retraction = sum(n_retractions[overall_field_labels])
overall_reasons = unlist(reasons[overall_field_labels])
retraction_labels[length(retraction_labels) + 1] = "overall"
n_retractions["overall"] = overall_n_retraction
reasons[["overall"]] = overall_reasons
### get top reasons in each field
n_top_reasons = 10
top_reasons = lapply(reasons, function(rs){
counts = table(rs)
counts = counts[order(counts, decreasing = T)]
counts =counts[seq_len(n_top_reasons)]
return(counts)
})
### plot
library(ggplot2)
plt_fn = "results/top_reasons_for_retractions.pdf"
pdf(plt_fn, width = 7, height = 4)
for(lbl in retraction_labels){
tr = top_reasons[[lbl]]
plt_df = data.frame(reason = factor(names(tr), levels = names(tr)[order(tr)]),
count = as.numeric(tr),
count_label = sprintf("%s/%s", as.numeric(tr), n_retractions[lbl]),
stringsAsFactors = F)
g <- ggplot(data=plt_df, aes(x=reason, y=count)) +
geom_bar(stat="identity", fill="steelblue") +
geom_text(aes(label=count_label), hjust=1.1, color="white", size=3.5)+
theme_bw() +
xlab("") +
ylab("Number of retracted articles") +
ggtitle(sprintf("Top reasons: %s", lbl)) +
coord_flip()
print(g)
}
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plotBurnSummary.R
\name{plotBurnSummary}
\alias{plotBurnSummary}
\title{Plots summary of burns}
\usage{
plotBurnSummary(
dataPath,
typeSim,
lastYear,
theObject = NULL,
overwrite = FALSE
)
}
\arguments{
\item{dataPath}{character. Path to data}
\item{typeSim}{character. Which simulation is it? i.e. 'LandR_SCFM' | 'LandR.CS_fS'}
\item{overwrite}{logical. Default FALSE}
\item{years}{numeric. Years available/intended to be used for the giphy}
\item{saveRAS}{logical. Save the raster for posterior use?}
}
\value{
plot
}
\description{
Plots summary of burns
}
\author{
Tati Micheletti
}
| /man/plotBurnSummary.Rd | no_license | eliotmcintire/usefun | R | false | true | 677 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plotBurnSummary.R
\name{plotBurnSummary}
\alias{plotBurnSummary}
\title{Plots summary of burns}
\usage{
plotBurnSummary(
dataPath,
typeSim,
lastYear,
theObject = NULL,
overwrite = FALSE
)
}
\arguments{
\item{dataPath}{character. Path to data}
\item{typeSim}{character. Which simulation is it? i.e. 'LandR_SCFM' | 'LandR.CS_fS'}
\item{overwrite}{logical. Default FALSE}
\item{years}{numeric. Years available/intended to be used for the giphy}
\item{saveRAS}{logical. Save the raster for posterior use?}
}
\value{
plot
}
\description{
Plots summary of burns
}
\author{
Tati Micheletti
}
|
activity_total_steps <- with(activity, aggregate(steps, by = list(date), FUN = sum, na.rm = TRUE))
names(activity_total_steps) <- c("date", "steps")
hist(activity_total_steps$steps, main = "Total number of steps taken per day", xlab = "Total steps taken per day", col = "darkblue", ylim = c(0,20), breaks = seq(0,25000, by=2500))
mean(activity_total_steps$steps)
median(activity_total_steps$steps) | /steps taken per day.R | no_license | juliovidal14/Reproducible-research-course-project-1 | R | false | false | 409 | r | activity_total_steps <- with(activity, aggregate(steps, by = list(date), FUN = sum, na.rm = TRUE))
names(activity_total_steps) <- c("date", "steps")
hist(activity_total_steps$steps, main = "Total number of steps taken per day", xlab = "Total steps taken per day", col = "darkblue", ylim = c(0,20), breaks = seq(0,25000, by=2500))
mean(activity_total_steps$steps)
median(activity_total_steps$steps) |
\name{exercise_input}
\alias{exercise_input}
\docType{data}
\title{
An example dataset to demonstrate the usage of 'HDMT'
}
\description{
This example dataset was included to assess the mediation role of DNA methylation in the effect of exercise
on prostate cancer progression in a
Seattle-based cohort of patients diagnosed with clinically localized
PCa. The entire data set contains two sets of p-values from genome-wide testing of 450K CpG sites. Due to space limit,
a subset (10 percent) of the full dataset is included in the package for illustration.
The dataset is a matrix containing two columns of p-values for
candidate mediators. Column 1 contains the p-values for testing if an exposure is associated with
the mediator (alpha!=0). Column 2 contains the p-value for testing if a mediator is associated with
the outcome after adjusted for the exposure (beta!=0).
}
\usage{data("exercise_input")}
\format{
The format of exercise_input is:
num [1:47900, 1:2] 0.4966344 0.1048730 0.1005355 0.4946623 ...
}
\references{
James Y. Dai, Janet L. Stanford, Michael LeBlanc. A multiple-testing
procedure for high-dimensional mediation hypotheses,
Journal of the American Statistical Association, 2020, DOI: 10.1080/01621459.2020.1765785.
}
\examples{
data(exercise_input)
dim(exercise_input)
}
\keyword{dataset}
| /man/exercise_input.Rd | no_license | cran/HDMT | R | false | false | 1,333 | rd | \name{exercise_input}
\alias{exercise_input}
\docType{data}
\title{
An example dataset to demonstrate the usage of 'HDMT'
}
\description{
This example dataset was included to assess the mediation role of DNA methylation in the effect of exercise
on prostate cancer progression in a
Seattle-based cohort of patients diagnosed with clinically localized
PCa. The entire data set contains two sets of p-values from genome-wide testing of 450K CpG sites. Due to space limit,
a subset (10 percent) of the full dataset is included in the package for illustration.
The dataset is a matrix containing two columns of p-values for
candidate mediators. Column 1 contains the p-values for testing if an exposure is associated with
the mediator (alpha!=0). Column 2 contains the p-value for testing if a mediator is associated with
the outcome after adjusted for the exposure (beta!=0).
}
\usage{data("exercise_input")}
\format{
The format of exercise_input is:
num [1:47900, 1:2] 0.4966344 0.1048730 0.1005355 0.4946623 ...
}
\references{
James Y. Dai, Janet L. Stanford, Michael LeBlanc. A multiple-testing
procedure for high-dimensional mediation hypotheses,
Journal of the American Statistical Association, 2020, DOI: 10.1080/01621459.2020.1765785.
}
\examples{
data(exercise_input)
dim(exercise_input)
}
\keyword{dataset}
|
conf <- rapbase::getConfig()
conf$reg <- list(noric = list(nationalAccess = list(reshId = 100000)))
conf$reg$noric$nationalAccess$nameKey <- "Nat"
yaml::write_yaml(conf, file = "./rapbaseConfig.yml")
orig_path <- Sys.getenv("R_RAP_CONFIG_PATH")
Sys.setenv(R_RAP_CONFIG_PATH = getwd())
test_that("a registry name can be provided", {
expect_equal(makeRegistryName("noric", 100000), "noricNat")
expect_equal(makeRegistryName("noric", 100001), "noric100001")
})
# clean-up and recreate environment
file.remove("./rapbaseConfig.yml")
Sys.setenv(R_RAP_CONFIG_PATH = orig_path)
| /tests/testthat/test-makeRegistryName.R | permissive | Rapporteket/NORIC | R | false | false | 578 | r | conf <- rapbase::getConfig()
conf$reg <- list(noric = list(nationalAccess = list(reshId = 100000)))
conf$reg$noric$nationalAccess$nameKey <- "Nat"
yaml::write_yaml(conf, file = "./rapbaseConfig.yml")
orig_path <- Sys.getenv("R_RAP_CONFIG_PATH")
Sys.setenv(R_RAP_CONFIG_PATH = getwd())
test_that("a registry name can be provided", {
expect_equal(makeRegistryName("noric", 100000), "noricNat")
expect_equal(makeRegistryName("noric", 100001), "noric100001")
})
# clean-up and recreate environment
file.remove("./rapbaseConfig.yml")
Sys.setenv(R_RAP_CONFIG_PATH = orig_path)
|
library(hash)
### Name: clear
### Title: Removes all key-value pairs from a hash
### Aliases: clear clear-methods clear,hash-method
### Keywords: methods data manip
### ** Examples
h <- hash( letters, 1:26 )
h # An object of type 'hash' containing 26 key-value pairs.
clear(h)
h # An object of type 'hash' containing 0 key-value pairs.
| /data/genthat_extracted_code/hash/examples/clear.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 355 | r | library(hash)
### Name: clear
### Title: Removes all key-value pairs from a hash
### Aliases: clear clear-methods clear,hash-method
### Keywords: methods data manip
### ** Examples
h <- hash( letters, 1:26 )
h # An object of type 'hash' containing 26 key-value pairs.
clear(h)
h # An object of type 'hash' containing 0 key-value pairs.
|
/Flores Sheyla - Tipo de producción/Flores Sheyla - Tipo de producción.R | no_license | Sheyla28/Hello_World | R | false | false | 8,514 | r | ||
library(lubridate)
library(xlsx)
library(dplyr)
library(doBy)
library(data.table)
Retail_Data_R <- read.csv(file.choose())
Retail_Data_T <- read.csv(file.choose())
str(Retail_Data_R)
str(Retail_Data_T)
dim(Retail_Data_R)
dim(Retail_Data_T)
Retail_Data_T$trans_date<-dmy(Retail_Data_T$trans_date) #date month year
length(Retail_Data_R$customer_id)
length(unique(Retail_Data_T$customer_id))
max(Retail_Data_T$trans_date)
min(Retail_Data_T$trans_date)
#working in doby library and groupwise summary statistic
Cust_Sum<- summaryBy(tran_amount~customer_id, data = Retail_Data_T,
FUN=c(sum,length))
Cust_Sum<- rename(Cust_Sum, Amt_sum = tran_amount.sum, Frequency=tran_amount.length)
# get the max data for each customer
# ave = group averages over level combinations of factors
Retail_Data_T$Max_date <- with(Retail_Data_T, ave(trans_date, customer_id, FUN=max))
DB <- merge(Retail_Data_R,Cust_Sum,by="customer_id")
DB1 <- merge(DB, Retail_Data_T, by="customer_id")
# remove duplicates based on customer_id columns
DB2 <- DB1[!duplicated(DB1$customer_id),]
DB2<- DB2[-c(2,5:6)]
# group_by work in dplyr library
df_RFM <- DB2 %>%
group_by(customer_id) %>%
summarise(recency=as.numeric(as.Date("2015-04-01")-max(Max_date)),
frequenci=Frequency, monitery=Amt_sum)
summary(df_RFM)
head(df_RFM)
# CREATING R, F, M LEVELS
df_RFM$rankR = cut(df_RFM$recency, 5, labels = F) # RANKR 1 IS VERY RECENT WHILE RANKR 5 IS LEAST RECENT
df_RFM$rankF = cut(df_RFM$frequenci, 5, labels = F) #rankF 1 is least frequent while rankF 5 IS MOST FREQUENT
df_RFM$rankM = cut(df_RFM$monitery, 5, labels = F)# rankM 1 is lowest sales while rankM
# total
attach(df_RFM)
df_RFM$Total <- (rankR*100)+(rankF*10)+(rankM)
| /RFM.R | no_license | devdatta95/RFM-For-customer-analysis-in-R | R | false | false | 1,779 | r | library(lubridate)
library(xlsx)
library(dplyr)
library(doBy)
library(data.table)
Retail_Data_R <- read.csv(file.choose())
Retail_Data_T <- read.csv(file.choose())
str(Retail_Data_R)
str(Retail_Data_T)
dim(Retail_Data_R)
dim(Retail_Data_T)
Retail_Data_T$trans_date<-dmy(Retail_Data_T$trans_date) #date month year
length(Retail_Data_R$customer_id)
length(unique(Retail_Data_T$customer_id))
max(Retail_Data_T$trans_date)
min(Retail_Data_T$trans_date)
#working in doby library and groupwise summary statistic
Cust_Sum<- summaryBy(tran_amount~customer_id, data = Retail_Data_T,
FUN=c(sum,length))
Cust_Sum<- rename(Cust_Sum, Amt_sum = tran_amount.sum, Frequency=tran_amount.length)
# get the max data for each customer
# ave = group averages over level combinations of factors
Retail_Data_T$Max_date <- with(Retail_Data_T, ave(trans_date, customer_id, FUN=max))
DB <- merge(Retail_Data_R,Cust_Sum,by="customer_id")
DB1 <- merge(DB, Retail_Data_T, by="customer_id")
# remove duplicates based on customer_id columns
DB2 <- DB1[!duplicated(DB1$customer_id),]
DB2<- DB2[-c(2,5:6)]
# group_by work in dplyr library
df_RFM <- DB2 %>%
group_by(customer_id) %>%
summarise(recency=as.numeric(as.Date("2015-04-01")-max(Max_date)),
frequenci=Frequency, monitery=Amt_sum)
summary(df_RFM)
head(df_RFM)
# CREATING R, F, M LEVELS
df_RFM$rankR = cut(df_RFM$recency, 5, labels = F) # RANKR 1 IS VERY RECENT WHILE RANKR 5 IS LEAST RECENT
df_RFM$rankF = cut(df_RFM$frequenci, 5, labels = F) #rankF 1 is least frequent while rankF 5 IS MOST FREQUENT
df_RFM$rankM = cut(df_RFM$monitery, 5, labels = F)# rankM 1 is lowest sales while rankM
# total
attach(df_RFM)
df_RFM$Total <- (rankR*100)+(rankF*10)+(rankM)
|
\alias{gtkRangeSetFlippable}
\name{gtkRangeSetFlippable}
\title{gtkRangeSetFlippable}
\description{If a range is flippable, it will switch its direction if it is
horizontal and its direction is \code{GTK_TEXT_DIR_RTL}.}
\usage{gtkRangeSetFlippable(object, flippable)}
\arguments{
\item{\verb{object}}{a \code{\link{GtkRange}}}
\item{\verb{flippable}}{\code{TRUE} to make the range flippable}
}
\details{See \code{\link{gtkWidgetGetDirection}}.
Since 2.18}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
| /RGtk2/man/gtkRangeSetFlippable.Rd | no_license | lawremi/RGtk2 | R | false | false | 529 | rd | \alias{gtkRangeSetFlippable}
\name{gtkRangeSetFlippable}
\title{gtkRangeSetFlippable}
\description{If a range is flippable, it will switch its direction if it is
horizontal and its direction is \code{GTK_TEXT_DIR_RTL}.}
\usage{gtkRangeSetFlippable(object, flippable)}
\arguments{
\item{\verb{object}}{a \code{\link{GtkRange}}}
\item{\verb{flippable}}{\code{TRUE} to make the range flippable}
}
\details{See \code{\link{gtkWidgetGetDirection}}.
Since 2.18}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/functions_manip_db.R
\name{divid_plot}
\alias{divid_plot}
\title{Divide 1 ha square plots into 25 squares subplots of 400m² following a regular 5*5 grid}
\usage{
divid_plot(coordinates_sf, plot_name = "plot_name")
}
\arguments{
\item{coordinates_sf}{a spatial object representing the plot geometries, either a \code{SpatialPolygonsDataFrame} or \code{sf} object. Each line should correspond to a single plot.}
\item{plot_name}{the name as character of the column where the plot name are stored. Default value is 'plot_name'.}
}
\value{
A \code{sf} object with the 25 subplots geometries with 2 fields : sous_plot_name and plot_name for each plot.
}
\description{
Divide 1 ha square plots into 25 squares subplots of 400m² following a regular 5*5 grid
}
\details{
The function takes either a \code{SpatialPolygonsDataFrame} or \code{sf} object containing the plot geometries and the plot names. For each plot, it first
identifies the 4 corners, then creates the 25 square subplots following a regular 5*5 grid. The subplots are named using the xy coordinates inside the plot, starting
from 0_0 for the southeasterly corner to 80_80 for the northwesternly.
}
\examples{
## Test 1
# Define the coordinates of the 4 corners
x1 <- c(0, 1, 1, 0, 0)
y1 <- c(0, 0, 1, 1, 0)
x2 <- c(2, 2, 3, 3, 2)
y2 <- c(1, 2, 2, 1, 1)
# Combine x and y coordinates into matrix
coords1 <- cbind(x1, y1)
coords2 <- cbind(x2, y2)
# Rotate by 20 degrees the matrix coord1
angle <- pi/9 # angle in radians
rotation_mat <- matrix(c(cos(angle), sin(angle), -sin(angle), cos(angle)), nrow = 2)
coords1 <- coords1 \%*\% rotation_mat
# Create SF object
poly1 <- st_sfc(st_polygon(list(coords1)))
poly2 <- st_sfc(st_polygon(list(coords2)))
coordinates_sf <-
st_as_sf(data.frame(
plot_name = c('Plot_001', 'Plot_002'),
geometry = c(poly1, poly2)
))
#Plot
plot(coordinates_sf$geometry)
# Divide the plot into smaller squares
sub_plot <- divid_plot(coordinates_sf = coordinates_sf, plot_name = 'plot_name')
# Plot the plots and the result subplots
par(mfrow = c(1, 2))
plot(coordinates_sf$geometry, main = "Plots")
plot(sub_plot$geometry, main = "Subplots")
# Plot the plots and the result subplots
library(ggplot2)
ggplot(sub_plot) +
geom_sf() +
scale_fill_continuous(type = 'viridis')+
geom_sf_text(aes(label = as.character(sous_plot_name)))
## Test 2
library(plotsdatabase)
# Extract datas
x <- query_plots(locality_name = "Mbalmayo", extract_individuals = TRUE, show_all_coordinates = TRUE)
coordinates_sf <- x$coordinates_sf
sub_plot <- divid_plot(coordinates_sf,'plot_name')
par(mfrow = c(1, 1))
for(i in 1:length(unique(sub_plot$plot_name))) {
print(ggplot(sub_plot \%>\% filter(plot_name == unique(plot_name)[i])) +
geom_sf() +
scale_fill_continuous(type = 'viridis')+
geom_sf_label(aes(label = as.character(sous_plot_name))) +
ggtitle(paste(unique(unique(sub_plot$plot_name)[i]))) )
}
}
\author{
Hugo Leblanc
}
| /man/divid_plot.Rd | no_license | gdauby/bdd_plots_central_africa | R | false | true | 3,043 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/functions_manip_db.R
\name{divid_plot}
\alias{divid_plot}
\title{Divide 1 ha square plots into 25 squares subplots of 400m² following a regular 5*5 grid}
\usage{
divid_plot(coordinates_sf, plot_name = "plot_name")
}
\arguments{
\item{coordinates_sf}{a spatial object representing the plot geometries, either a \code{SpatialPolygonsDataFrame} or \code{sf} object. Each line should correspond to a single plot.}
\item{plot_name}{the name as character of the column where the plot name are stored. Default value is 'plot_name'.}
}
\value{
A \code{sf} object with the 25 subplots geometries with 2 fields : sous_plot_name and plot_name for each plot.
}
\description{
Divide 1 ha square plots into 25 squares subplots of 400m² following a regular 5*5 grid
}
\details{
The function takes either a \code{SpatialPolygonsDataFrame} or \code{sf} object containing the plot geometries and the plot names. For each plot, it first
identifies the 4 corners, then creates the 25 square subplots following a regular 5*5 grid. The subplots are named using the xy coordinates inside the plot, starting
from 0_0 for the southeasterly corner to 80_80 for the northwesternly.
}
\examples{
## Test 1
# Define the coordinates of the 4 corners
x1 <- c(0, 1, 1, 0, 0)
y1 <- c(0, 0, 1, 1, 0)
x2 <- c(2, 2, 3, 3, 2)
y2 <- c(1, 2, 2, 1, 1)
# Combine x and y coordinates into matrix
coords1 <- cbind(x1, y1)
coords2 <- cbind(x2, y2)
# Rotate by 20 degrees the matrix coord1
angle <- pi/9 # angle in radians
rotation_mat <- matrix(c(cos(angle), sin(angle), -sin(angle), cos(angle)), nrow = 2)
coords1 <- coords1 \%*\% rotation_mat
# Create SF object
poly1 <- st_sfc(st_polygon(list(coords1)))
poly2 <- st_sfc(st_polygon(list(coords2)))
coordinates_sf <-
st_as_sf(data.frame(
plot_name = c('Plot_001', 'Plot_002'),
geometry = c(poly1, poly2)
))
#Plot
plot(coordinates_sf$geometry)
# Divide the plot into smaller squares
sub_plot <- divid_plot(coordinates_sf = coordinates_sf, plot_name = 'plot_name')
# Plot the plots and the result subplots
par(mfrow = c(1, 2))
plot(coordinates_sf$geometry, main = "Plots")
plot(sub_plot$geometry, main = "Subplots")
# Plot the plots and the result subplots
library(ggplot2)
ggplot(sub_plot) +
geom_sf() +
scale_fill_continuous(type = 'viridis')+
geom_sf_text(aes(label = as.character(sous_plot_name)))
## Test 2
library(plotsdatabase)
# Extract datas
x <- query_plots(locality_name = "Mbalmayo", extract_individuals = TRUE, show_all_coordinates = TRUE)
coordinates_sf <- x$coordinates_sf
sub_plot <- divid_plot(coordinates_sf,'plot_name')
par(mfrow = c(1, 1))
for(i in 1:length(unique(sub_plot$plot_name))) {
print(ggplot(sub_plot \%>\% filter(plot_name == unique(plot_name)[i])) +
geom_sf() +
scale_fill_continuous(type = 'viridis')+
geom_sf_label(aes(label = as.character(sous_plot_name))) +
ggtitle(paste(unique(unique(sub_plot$plot_name)[i]))) )
}
}
\author{
Hugo Leblanc
}
|
test_that("DAPSm with fixed weight for toyData2", {
data('toyData2')
toyData2$prop.scores <- glm(Z ~ X1 + X2 + X3 + X4, family = binomial,
data = toyData2)$fitted.values
daps <- DAPSest(toyData2, out.col = 2, trt.col = 1, caliper = 0.3,
weight = 0.7, coords.columns = c(4, 5),
pairsRet = TRUE, cov.cols = 6:9, cutoff = 0.1,
coord_dist = TRUE, caliper_type = 'DAPS',
matching_algorithm = 'greedy')
expect_equal(daps$weight, 0.7)
})
test_that("DAPSm with fast optimal for toyData2", {
data('toyData2')
toyData2$prop.scores <- glm(Z ~ X1 + X2 + X3 + X4, family = binomial,
data = toyData2)$fitted.values
daps <- DAPSest(toyData2, out.col = 2, trt.col = 1, caliper = 0.3,
weight = 'optimal', coords.columns = c(4, 5), quiet = TRUE,
pairsRet = TRUE, cov.cols = 6:9, cutoff = 0.15,
coord_dist = TRUE, caliper_type = 'DAPS', w_tol = 0.05,
matching_algorithm = 'greedy')
expect_equal(daps$weight, 0.515625)
expect_equal(as.numeric(abs(daps$est - 1.2868) < 0.001), 1)
})
test_that("DAPSm with extensive optimal for toyData2", {
data('toyData2')
toyData2$prop.scores <- glm(Z ~ X1 + X2 + X3 + X4, family = binomial,
data = toyData2)$fitted.values
bal <- CalcDAPSWeightBalance(toyData2, weights = seq(0, 1, length.out = 40),
cov.cols = 6:9, trt.col = 1,
coords.columns = c(4, 5), caliper = 0.3,
matching_algorithm = 'greedy')
daps <- DAPSchoiceModel(toyData, trt.col = 1, balance = bal$balance,
cutoff = 0.15, pairs = bal$pairs,
weights = seq(0, 1, length.out = 40))
expect_equal(abs(daps$weight - 0.2307692) < 0.001, TRUE)
expect_equal(daps$num_match, 55)
print(abs(daps$est - 2.53958) < 0.1)
expect_equal(as.numeric(abs(daps$est - 2.5) < 1), 1)
})
| /tests/testthat/tests.R | no_license | gpapadog/DAPSm | R | false | false | 2,056 | r | test_that("DAPSm with fixed weight for toyData2", {
data('toyData2')
toyData2$prop.scores <- glm(Z ~ X1 + X2 + X3 + X4, family = binomial,
data = toyData2)$fitted.values
daps <- DAPSest(toyData2, out.col = 2, trt.col = 1, caliper = 0.3,
weight = 0.7, coords.columns = c(4, 5),
pairsRet = TRUE, cov.cols = 6:9, cutoff = 0.1,
coord_dist = TRUE, caliper_type = 'DAPS',
matching_algorithm = 'greedy')
expect_equal(daps$weight, 0.7)
})
test_that("DAPSm with fast optimal for toyData2", {
data('toyData2')
toyData2$prop.scores <- glm(Z ~ X1 + X2 + X3 + X4, family = binomial,
data = toyData2)$fitted.values
daps <- DAPSest(toyData2, out.col = 2, trt.col = 1, caliper = 0.3,
weight = 'optimal', coords.columns = c(4, 5), quiet = TRUE,
pairsRet = TRUE, cov.cols = 6:9, cutoff = 0.15,
coord_dist = TRUE, caliper_type = 'DAPS', w_tol = 0.05,
matching_algorithm = 'greedy')
expect_equal(daps$weight, 0.515625)
expect_equal(as.numeric(abs(daps$est - 1.2868) < 0.001), 1)
})
test_that("DAPSm with extensive optimal for toyData2", {
data('toyData2')
toyData2$prop.scores <- glm(Z ~ X1 + X2 + X3 + X4, family = binomial,
data = toyData2)$fitted.values
bal <- CalcDAPSWeightBalance(toyData2, weights = seq(0, 1, length.out = 40),
cov.cols = 6:9, trt.col = 1,
coords.columns = c(4, 5), caliper = 0.3,
matching_algorithm = 'greedy')
daps <- DAPSchoiceModel(toyData, trt.col = 1, balance = bal$balance,
cutoff = 0.15, pairs = bal$pairs,
weights = seq(0, 1, length.out = 40))
expect_equal(abs(daps$weight - 0.2307692) < 0.001, TRUE)
expect_equal(daps$num_match, 55)
print(abs(daps$est - 2.53958) < 0.1)
expect_equal(as.numeric(abs(daps$est - 2.5) < 1), 1)
})
|
#############################################################
### R-code for master thesis 1, 22.03.2017
### Written by Albert Kwame Osei-Owusu
#############################################################
### Initial stuff ###
rm(list=ls())
## setting path to library
.libPaths("/Users/albertosei-owusu/Desktop/R Packages")
# load packages
#library(grid)
#library(gridExtra)
#library(cowplot)
pack<-c("car","sandwich","ggplot2","lmtest","ggrepel","RColorBrewer","plm",
"dplyr","mgcv","foreign","xtable","AER","stargazer",
"ggrepel","lfe","gridExtra","cowplot")
lapply(pack, require, character.only=T)
# specify the path of the folder containing the dataset
path <- "/Users/albertosei-owusu/Desktop/Data/Gravity/"
setwd(path)
##specify the path of the folder containing the results of regressions
results="/Users/albertosei-owusu/Desktop/Data/Gravity/Tables"
#############################################################
### 1. Preliminaries ###
#############################################################
## 1.a Load dataset and create dummies
dat = read.dta(paste0(path,"col_regfile09.dta"))
## 2.a Run regression
ecowasset = c("BEN", "BFA", "CPV","CIV", "GMB", "GHA", "GIN", "GNB",
"LBR","MLI","NGA", "SEN", "SLE", "TGO","NER")
##Change working directory
results="/Users/albertosei-owusu/Desktop/Data/Gravity/Tables"
path=setwd(results)
#############################################################
### 2. Average trade creation and diversion ###
#############################################################
# Note. It might be a good idea to consider a subset of the
# data. E.g. choose only the data from the 1985-2000 period
## 2.a Run regression
dat$bothinE = ifelse(dat$iso_o %in% ecowasset &
dat$iso_d %in% ecowasset &
dat$year >= 1975, 1, 0)
dat$oneinE = ifelse(!(dat$iso_o %in% ecowasset) &
dat$iso_d %in% ecowasset &
dat$year >= 1975, 1, 0)
dat$bothinC = ifelse(dat$iso_o %in% comesa &
dat$iso_d %in% comesa &
dat$year >= 1982, 1, 0)
dat$oneinC = ifelse(!(dat$iso_o %in% comesa) &
dat$iso_d %in% comesa &
dat$year >= 1982, 1, 0)
##Change working directory
results="/Users/albertosei-owusu/Desktop/Data/Gravity/Tables"
path=setwd(results)
#
ols1 = lm(log(flow)~log(gdp_o)+log(gdp_d)+log(distw)+
contig+comlang_off+col_hist+bothinE+oneinE,
data = subset(dat, year == 1985 & flow > 0))
summary(ols1)
ols2 = lm(log(flow)~log(gdp_o)+log(gdp_d)+log(distw)+
contig+comlang_off+col_hist+bothinE+oneinE,
data = subset(dat, year == 1990 & flow > 0))
summary(ols2)
ols3 = lm(log(flow)~log(gdp_o)+log(gdp_d)+log(distw)+
contig+comlang_off+col_hist+bothinE+oneinE,
data = subset(dat, year == 1995 & flow > 0))
summary(ols3)
ols4 = lm(log(flow)~log(gdp_o)+log(gdp_d)+log(distw)+
contig+comlang_off+col_hist+bothinE+oneinE,
data = subset(dat, year == 2000 & flow > 0))
summary(ols4)
ols5 = lm(log(flow)~log(gdp_o)+log(gdp_d)+log(distw)+
contig+comlang_off+col_hist+bothinE+oneinE,
data = subset(dat, year == 2005 & flow > 0))
summary(ols5)
## Presenting results for selected years
stargazer(ols1, ols2,ols3,ols4,ols5, header=FALSE, type='text',
title = "Table 1 : Simple Cross-sectional OLS estimation",
no.space = TRUE,
align = TRUE,
column.labels=c("1985","1990","1995","2000","2005"),
dep.var.labels=c("Trade flow"),
covariate.labels=c("Exporter income","Importer income","Distance",
"Border","Common language",
"Colonial history","ECOWAS","OneinE"),
out="ols_model1.txt",
omit.stat= c("adj.rsq", "f", "ser"))
stargazer(ols1, ols2,ols3,ols4,ols5, header=FALSE, type='text',
title = "Table 2 : Simple Cross-sectional OLS estimation",
no.space = TRUE,
align = TRUE,
column.labels=c("1985","1990","1995","2000","2005"),
dep.var.labels=c("Coefficients"),
out="ols_model2.txt",
omit.stat= c("adj.rsq", "f", "ser"))
# A) OLS with importer, exporter and year fixed effects
m1 = lm(log(flow)~factor(iso_o)+factor(iso_d)+factor(year)+
log(distw)+col_cur+contig+comlang_off+col_hist+
bothinE + oneinE,
data = subset(dat, flow>0 & year %in% 1970:2005))
summary(m1)
## double demeaning
m11 = felm(log(flow)~log(gdp_o)+log(gdp_d)+log(distw)+
col_cur+contig+comlang_off+col_hist+
bothinE + oneinE | year + iso_o + iso_d,
data = subset(dat, flow>0 & year %in% 1970:2005))
summary(m11)
# B) OLS with time varying importer, exporter fixed effects
# and year fixed effects
# Generate time varying importer/exporter dummies
dat$iso_o_yr = paste0(dat$iso_o, dat$year)
dat$iso_d_yr = paste0(dat$iso_d, dat$year)
m2 = lm(log(flow)~factor(iso_o_yr)+factor(iso_d_yr)+factor(year)+
log(distw) + contig + comlang_off + col_hist +
+ bothinE + oneinE,
data = subset(dat, flow>0 & year %in% 1970:2005))
summary(m2)
m2 = lm(log(flow)~factor(iso_o_yr)+factor(iso_d_yr)+factor(year)+
log(distw) + contig + comlang_off + col_hist +
+ bothinE + oneinE,
data = subset(dat, flow>0 & year %in% 1970:2005))
summary(m2)
## double demeaning
m22 = felm(log(flow)~log(distw)+
+ contig + comlang_off + col_hist +
+ bothinE + oneinE | year + iso_o_yr + iso_d_yr,
data = subset(dat, flow>0 & year %in% 1970:2000))
summary(m22)
m22 = felm(log(flow)~log(distw)+
+ contig + comlang_off + col_hist +
+ bothinE + oneinE | year + iso_o_yr + iso_d_yr,
data = subset(dat, flow>0 & year %in% 1970:1990))
summary(m22)
# C) Within with year fixed effects
# generate country pair
dat$pairid = paste0(dat$iso_o, dat$iso_d)
m3 = plm(log(flow)~ factor(year) +
log(distw) + contig + comlang_off + col_hist +
bothinE + oneinE,
model = "within",
index=c("pairid", "year"),
data = subset(dat, flow>0 & year %in% 1970:2005))
summary(m3)
m3 = plm(log(flow)~ factor(year) + factor(iso_o) + factor(iso_d) +
log(distw) + contig + comlang_off + col_hist +
bothinE + oneinE,
model = "within",
index=c("pairid", "year"),
data = subset(dat, flow>0 & year %in% 1970:2005))
summary(m3)
# D) Within with year fixed effects and time varying country fixed effects
# Note: this takes forever to run
m4 = plm(log(flow)~ factor(iso_o_yr)+factor(iso_d_yr)+factor(year)+
log(distw) + contig + comlang_off + col_hist +
bothinE + oneinE,
model = "within",
index=c("pairid", "year"),
data = subset(dat, flow>0 & year %in% 1970:2005))
summary(m4)
## double demeaning
m44 = felm(log(flow)~ log(distw) + contig + comlang_off + col_hist +
bothinE + oneinE | year + iso_o_yr + iso_d_yr,
data = subset(dat, flow>0 & year %in% 1970:2005))
summary(m44)
m44 = felm(log(flow)~ log(distw) +
bothinE + oneinE | year + iso_o_yr + iso_d_yr,
data = subset(dat, flow>0 & year %in% 1970:2005))
summary(m44)
# E) Within with year fixed effects and GDP controls
m5 = plm(log(flow)~ factor(year)+ log(gdp_o)+log(gdp_d)+
log(distw) + contig + comlang_off + col_hist +
bothinE + oneinE,
model = "within",
index=c("pairid", "year"),
data = subset(dat, flow>0 & year %in% 1970:2005))
summary(m5)
##Change working directory
path=setwd(results)
## 2.b Present results
stargazer(m1, m3, m44, header=FALSE, type='text',
title = "Table 1: Gravity models of trade",
no.space = TRUE,
keep = c("bothinE","oneinE"),
out="model2.txt",
omit.stat= c("adj.rsq", "f", "ser"))
stargazer(m1,m3,m5, header=FALSE, type='text',
title = "Table 2 : Panel regressions",
no.space = TRUE,
align = TRUE,
column.labels=c("OLS with imp,exp and year FE",
"Within with year FE",
"Within with year FE and GDP controls"),
dep.var.labels=c("Coefficients"),
keep = c("bothinE","oneinE"),
out="model3.txt",
omit.stat= c("adj.rsq", "f", "ser"))
stargazer(m1,m11,m22,m3,m5, header=FALSE, type='text',
title = "Table 3 : Regressions",
no.space = TRUE,
align = TRUE,
dep.var.labels=c("Coefficients"),
keep=c( "gdp_o","gdp_d",
"distw","col_cur","contig", "comlang_off", "col_hist",
"bothinE","oneinE"),
out="model4.txt",
omit.stat= c("adj.rsq", "f", "ser"))
##Change working directory
results="/Users/albertosei-owusu/Desktop/Data/Gravity/Tables"
path=setwd(results)
## 2.b Present results
stargazer(m1, m3, m44, header=FALSE, type='text',
title = "Table 1: Gravity models of trade",
align = TRUE,
column.labels=c("imp,exp and year FE",
"year FE",
"year FE and time varying country FE"),
dep.var.labels=c("Coefficients"),
keep = c("bothinE","oneinE"),
out="models.txt",
omit.stat= c("adj.rsq", "f", "ser"))
stargazer(m1,m3,m5, header=FALSE, type='text',
title = "Table 2 : Panel regressions",
no.space = TRUE,
align = TRUE,
column.labels=c("imp,exp and year FE",
"year FE",
"year FE and GDP controls"),
dep.var.labels=c("Coefficients"),
keep = c("bothinE","oneinE"),
out="model1.txt",
omit.stat= c("adj.rsq", "f", "ser"))
#############################################################
### 3. Evolution of trade creation and diversion ###
#############################################################
## 3.a Introduce interaction terms
m6 = plm(log(flow)~ log(distw) + contig + comlang_off + col_hist +
factor(year) +
factor(year):bothinE +
factor(year):oneinE,
model = "within",
index=c("pairid", "year"),
data = subset(dat, flow>0 & year %in% 1970:2005))
summary(m6)
stargazer(m6, title="Results",
dep.var.labels="Property sales prices",
style="aer",
align=TRUE,
type = "text")
## 3.b Plot results
plot(1975:2005, (coef(m6)[36:66]+coef(m6)[5:35]), type = "b",
xlab = "year",
ylab = "bothinE coefficient",
main = "Evolution of the bothinE coefficient")
plot(1975:2005, (coef(m6)[67:97]+coef(m6)[5:35]), type = "b",
xlab = "year",
ylab = "oneinE coefficient",
main = "Evolution of the oneinE coefficient")
#############################################################
### 4. Export diversion ###
#############################################################
## 4.a Generate ECOWAS extra export dummy
dat$oneinE1 = ifelse(dat$iso_o %in% ecowasset &
!(dat$iso_d %in% ecowasset) &
dat$year >= 1975, 1, 0)
# Run regression
# E) Within with year fixed effects and GDP controls
m7 = plm(log(flow)~ factor(year)+ log(gdp_o)+log(gdp_d)+
log(distw) + contig + comlang_off + col_hist +
bothinE + oneinE + oneinE1,
model = "within",
index=c("pairid", "year"),
data = subset(dat, flow>0 & year %in% 1970:2005))
summary(m7)
## 4.b
# ECOWAS export to ROW
eco1 = select(subset(dat, iso_o %in% ecowasset &
!(dat$iso_d %in% ecowasset)),
year, flow)
eco2 = aggregate(flow ~ ., data = eco1, sum)
# Building the plot (using the ggplot2 package)
ggplot(eco2, aes(year, log(flow))) +
geom_line() +
geom_point() +
ylab("trade flow") +
geom_vline(xintercept = 1975) +
ggtitle("ECOWAS exports to ROW")
######################################################
# F) OLS with importer, exporter and year fixed effects
m8 = lm(log(flow)~factor(iso_o)+factor(iso_d)+factor(year)+
log(distw) + contig + comlang_off + col_hist +
bothinE + oneinE + oneinE1 ,
data = subset(dat, flow>0 & year %in% 1970:2005))
summary(m8)
# G) OLS with time varying importer, exporter fixed effects
# and year fixed effects
# Generate time varying importer/exporter dummies
dat$iso_o_yr = paste0(dat$iso_o, dat$year)
dat$iso_d_yr = paste0(dat$iso_d, dat$year)
m9 = lm(log(flow)~factor(iso_o_yr)+factor(iso_d_yr)+factor(year)+
log(distw) + bothinE + oneinE + oneinE1 ,
data = subset(dat, flow>0 & year %in% 1970:2005))
summary(m9)
## double demeaning
m99 = felm(log(flow)~log(distw) + contig + comlang_off + col_hist +
bothinE + oneinE + oneinE1 | year + iso_o_yr + iso_d_yr,
data = subset(dat, flow>0 & year %in% 1970:2005))
summary(m99)
m99 = felm(log(flow)~log(distw)+
bothinE + oneinE + oneinE1 | year + iso_o_yr + iso_d_yr,
data = subset(dat, flow>0 & year %in% 1970:2005))
summary(m99)
# H) Within with year fixed effects
# generate country pair
dat$pairid = paste0(dat$iso_o, dat$iso_d)
m10 = plm(log(flow)~ factor(year) +
log(distw) + contig + comlang_off + col_hist +
bothinE + oneinE + oneinE1 ,
model = "within",
index=c("pairid", "year"),
data = subset(dat, flow>0 & year %in% 1970:2005))
summary(m10)
##Change working directory
results="/Users/albertosei-owusu/Desktop/Data/Gravity/Tables"
path=setwd(results)
## Present results
stargazer(m8,m99,m10,m7, header=FALSE, type='text',
title = "Table 3 : Panel regressions",
no.space = TRUE,
align = TRUE,
column.labels=c("imp,exp and year","vimp,vexp and year",
"year FE",
"year FE and GDP controls"),
dep.var.labels=c("Coefficients"),
keep = c("bothinE","oneinE", "oneinE1"),
out="model3.txt",
omit.stat= c("adj.rsq", "f", "ser"))
## Introduce interaction terms
m11 = plm(log(flow)~ log(distw) + col_cur + contig + comlang_off + col_hist +
factor(year) +
factor(year):bothinE+
factor(year):oneinE+
factor(year):oneinE1,
model = "within",
index=c("pairid", "year"),
data = subset(dat, flow>0 & year %in% 1970:2005))
summary(m11)
## 3.b Plot results
par(mfrow=c(1,3))
plot(1975:2005, (coef(m11)[37:67]+coef(m11)[6:36]), type = "b",
xlab = "year",
ylab = "bothinE coefficient",
main = "Evolution of the bothinE coefficient")
plot(1975:2005, (coef(m11)[68:98]+ coef(m11)[6:36]), type = "b",
xlab = "year",
ylab = "oneinE coefficient",
main = "Evolution of the oneinE coefficient")
plot(1975:2005, (coef(m11)[99:129]+ coef(m11)[6:36]), type = "b",
xlab = "year",
ylab = "oneinE1 coefficient",
main = "Evolution of the onein1E coefficient")
### alt
dat4 <- data.frame( year=c(1975:2005),
bothinE = c(coef(m11)[37:67]+coef(m11)[6:36]),
oneinE = c(coef(m11)[68:98]+coef(m11)[6:36]),
oneinE1 =c(coef(m11)[99:129]+coef(m11)[6:36]))
## transposing dataframe
dat5 <- t(dat4)
View(dat5)
## Multiple line of trade creation and diversion
lines(plot(dat4$year,dat4$bothinE, type = "l",
xlab = "year", ylim=c(0.864,2.792),
ylab = "coefficients",
main = "Evolution of trade creation and trade diversion",
col= "red"))
lines(dat4$year,dat4$oneinE,type="l",col="blue")
lines(dat4$year,dat4$oneinE1,type="bl",col="black")
legend("topleft",
border="black",col=c("red","blue","black") ,
lty=c(1,1),
legend=c("Import diversion","Trade creation", "Export Diversion"),
bg ="white")
# alt
legend("bottomright",
border="black",col=c("red","blue","black") ,
lty=c(1,1),
legend=c("Import diversion","Trade creation", "Export diversion"),
bg ="white")
#############################################################
### 5. The Average RTA effect ###
#############################################################
## 5.a
# RTA dummy
## Regional trade agreements
m14 = lm(log(flow)~log(gdp_o)+log(gdp_d)+
log(distw)+contig+comlang_off+
col_hist+col_cur+rta,
data = subset(dat, flow>0 & year %in% 1970:2005))
summary(m14)
# J) OLS with importer, exporter and year fixed effects
m14 = lm(log(flow)~factor(iso_o)+factor(iso_d)+factor(year)+
log(distw)+contig+comlang_off+
col_hist+col_cur+rta,
data = subset(dat, flow>0 & year %in% 1970:2005))
summary(m14)
# K) OLS with time varying importer, exporter fixed effects
# and year fixed effects
# Generate time varying importer/exporter dummies
dat$iso_o_yr = paste0(dat$iso_o, dat$year)
dat$iso_d_yr = paste0(dat$iso_d, dat$year)
m15 = lm(log(flow)~factor(iso_o_yr)+factor(iso_d_yr)+factor(year)+
log(distw)+contig+comlang_off+
col_hist+col_cur+rta,
data = subset(dat, flow>0 & year %in% 1970:2005))
summary(m15)
# H) Within with year fixed effects
# generate country pair
dat$pairid = paste0(dat$iso_o, dat$iso_d)
m16 = plm(log(flow)~ factor(year) +
log(distw)+contig+comlang_off+
col_hist+col_cur+rta,
model = "within",
index=c("pairid", "year"),
data = subset(dat, flow>0 & year %in% 1970:2005))
summary(m16)
# I) Within with year fixed effects and time varying country fixed effects
# Note: this takes forever to run
m17 = plm(log(flow)~ factor(iso_o_yr)+factor(iso_d_yr)+factor(year)+
log(distw)+contig+comlang_off+
col_hist+col_cur+rta,
model = "within",
index=c("pairid", "year"),
data = subset(dat, flow>0 & year %in% 1970:2005))
summary(m17)
## Present results
stargazer(m8,m10,m7, header=FALSE, type='text',
title = "Table 2 : Panel regressions",
no.space = TRUE,
align = TRUE,
column.labels=c("imp,exp and year FE",
"year FE",
"year FE and GDP controls"),
dep.var.labels=c("Coefficients"),
keep = c("bothin","onein"),
out="model2.txt",
omit.stat= c("adj.rsq", "f", "ser"))
# 5.c
##### PPML estimator ####
# A) PPML without importer, exporter and year fixed effects
m20 = glm(flow~ log(gdp_o) + log(gdp_d) +
log(distw)+col_cur+contig+comlang_off+col_hist+
bothinE + oneinE,
data = subset(dat,year %in% 1970:2005), family = poisson())
summary(m20)
# B) PPML with importer, exporter and year fixed effects
m21 = glm(flow~factor(iso_o)+factor(iso_d)+factor(year)+
log(distw)+col_cur+contig+comlang_off+col_hist+
bothinE + oneinE,
data = subset(dat,year %in% 1970:2005), family = poisson)
summary(m21)
m21 = glm(flow~factor(iso_o)+factor(iso_d)+factor(year)+
log(distw)+col_cur+contig+comlang_off+col_hist+
bothinE + oneinE,
data = subset(dat,year %in% 1970:2005), family = poisson())
summary(m21)
# C) PPML with time varying importer, exporter fixed effects
# and year fixed effects
# Generate time varying importer/exporter dummies
dat$iso_o_yr = paste0(dat$iso_o, dat$year)
dat$iso_d_yr = paste0(dat$iso_d, dat$year)
m22 = glm(flow~factor(iso_o_yr)+factor(iso_d_yr)+factor(year)+
log(distw) + col_cur + contig + comlang_off + col_hist +
+ bothinE + oneinE,
data = subset(dat,year %in% 1970:2005), family = poisson())
summary(m22)
m22 = glm(flow~factor(iso_o_yr)+factor(iso_d_yr)+factor(year)+
log(distw) + col_cur + contig + comlang_off + col_hist +
+ bothinE + oneinE,
data = subset(dat,year %in% 1970:2005), family = poisson)
summary(m22)
# D) Within with year fixed effects
# generate country pair
dat$pairid = paste0(dat$iso_o, dat$iso_d)
m23 = glm(log(flow)~ factor(year) +
log(distw) + col_cur + contig + comlang_off + col_hist +
bothinE + oneinE,
model = "within",
index=c("pairid", "year"),
data = subset(dat, flow>0 & year %in% 1970:2005))
summary(m23)
## End of script | /Regressions.R | no_license | Allisterh/Master-thesis | R | false | false | 20,598 | r |
#############################################################
### R-code for master thesis 1, 22.03.2017
### Written by Albert Kwame Osei-Owusu
#############################################################
### Initial stuff ###
rm(list=ls())
## setting path to library
.libPaths("/Users/albertosei-owusu/Desktop/R Packages")
# load packages
#library(grid)
#library(gridExtra)
#library(cowplot)
pack<-c("car","sandwich","ggplot2","lmtest","ggrepel","RColorBrewer","plm",
"dplyr","mgcv","foreign","xtable","AER","stargazer",
"ggrepel","lfe","gridExtra","cowplot")
lapply(pack, require, character.only=T)
# specify the path of the folder containing the dataset
path <- "/Users/albertosei-owusu/Desktop/Data/Gravity/"
setwd(path)
##specify the path of the folder containing the results of regressions
results="/Users/albertosei-owusu/Desktop/Data/Gravity/Tables"
#############################################################
### 1. Preliminaries ###
#############################################################
## 1.a Load dataset and create dummies
dat = read.dta(paste0(path,"col_regfile09.dta"))
## 2.a Run regression
ecowasset = c("BEN", "BFA", "CPV","CIV", "GMB", "GHA", "GIN", "GNB",
"LBR","MLI","NGA", "SEN", "SLE", "TGO","NER")
##Change working directory
results="/Users/albertosei-owusu/Desktop/Data/Gravity/Tables"
path=setwd(results)
#############################################################
### 2. Average trade creation and diversion ###
#############################################################
# Note. It might be a good idea to consider a subset of the
# data. E.g. choose only the data from the 1985-2000 period
## 2.a Run regression
dat$bothinE = ifelse(dat$iso_o %in% ecowasset &
dat$iso_d %in% ecowasset &
dat$year >= 1975, 1, 0)
dat$oneinE = ifelse(!(dat$iso_o %in% ecowasset) &
dat$iso_d %in% ecowasset &
dat$year >= 1975, 1, 0)
dat$bothinC = ifelse(dat$iso_o %in% comesa &
dat$iso_d %in% comesa &
dat$year >= 1982, 1, 0)
dat$oneinC = ifelse(!(dat$iso_o %in% comesa) &
dat$iso_d %in% comesa &
dat$year >= 1982, 1, 0)
##Change working directory
results="/Users/albertosei-owusu/Desktop/Data/Gravity/Tables"
path=setwd(results)
#
ols1 = lm(log(flow)~log(gdp_o)+log(gdp_d)+log(distw)+
contig+comlang_off+col_hist+bothinE+oneinE,
data = subset(dat, year == 1985 & flow > 0))
summary(ols1)
ols2 = lm(log(flow)~log(gdp_o)+log(gdp_d)+log(distw)+
contig+comlang_off+col_hist+bothinE+oneinE,
data = subset(dat, year == 1990 & flow > 0))
summary(ols2)
ols3 = lm(log(flow)~log(gdp_o)+log(gdp_d)+log(distw)+
contig+comlang_off+col_hist+bothinE+oneinE,
data = subset(dat, year == 1995 & flow > 0))
summary(ols3)
ols4 = lm(log(flow)~log(gdp_o)+log(gdp_d)+log(distw)+
contig+comlang_off+col_hist+bothinE+oneinE,
data = subset(dat, year == 2000 & flow > 0))
summary(ols4)
ols5 = lm(log(flow)~log(gdp_o)+log(gdp_d)+log(distw)+
contig+comlang_off+col_hist+bothinE+oneinE,
data = subset(dat, year == 2005 & flow > 0))
summary(ols5)
## Presenting results for selected years
stargazer(ols1, ols2,ols3,ols4,ols5, header=FALSE, type='text',
title = "Table 1 : Simple Cross-sectional OLS estimation",
no.space = TRUE,
align = TRUE,
column.labels=c("1985","1990","1995","2000","2005"),
dep.var.labels=c("Trade flow"),
covariate.labels=c("Exporter income","Importer income","Distance",
"Border","Common language",
"Colonial history","ECOWAS","OneinE"),
out="ols_model1.txt",
omit.stat= c("adj.rsq", "f", "ser"))
stargazer(ols1, ols2,ols3,ols4,ols5, header=FALSE, type='text',
title = "Table 2 : Simple Cross-sectional OLS estimation",
no.space = TRUE,
align = TRUE,
column.labels=c("1985","1990","1995","2000","2005"),
dep.var.labels=c("Coefficients"),
out="ols_model2.txt",
omit.stat= c("adj.rsq", "f", "ser"))
# A) OLS with importer, exporter and year fixed effects
m1 = lm(log(flow)~factor(iso_o)+factor(iso_d)+factor(year)+
log(distw)+col_cur+contig+comlang_off+col_hist+
bothinE + oneinE,
data = subset(dat, flow>0 & year %in% 1970:2005))
summary(m1)
## double demeaning
m11 = felm(log(flow)~log(gdp_o)+log(gdp_d)+log(distw)+
col_cur+contig+comlang_off+col_hist+
bothinE + oneinE | year + iso_o + iso_d,
data = subset(dat, flow>0 & year %in% 1970:2005))
summary(m11)
# B) OLS with time varying importer, exporter fixed effects
# and year fixed effects
# Generate time varying importer/exporter dummies
dat$iso_o_yr = paste0(dat$iso_o, dat$year)
dat$iso_d_yr = paste0(dat$iso_d, dat$year)
m2 = lm(log(flow)~factor(iso_o_yr)+factor(iso_d_yr)+factor(year)+
log(distw) + contig + comlang_off + col_hist +
+ bothinE + oneinE,
data = subset(dat, flow>0 & year %in% 1970:2005))
summary(m2)
m2 = lm(log(flow)~factor(iso_o_yr)+factor(iso_d_yr)+factor(year)+
log(distw) + contig + comlang_off + col_hist +
+ bothinE + oneinE,
data = subset(dat, flow>0 & year %in% 1970:2005))
summary(m2)
## double demeaning
m22 = felm(log(flow)~log(distw)+
+ contig + comlang_off + col_hist +
+ bothinE + oneinE | year + iso_o_yr + iso_d_yr,
data = subset(dat, flow>0 & year %in% 1970:2000))
summary(m22)
m22 = felm(log(flow)~log(distw)+
+ contig + comlang_off + col_hist +
+ bothinE + oneinE | year + iso_o_yr + iso_d_yr,
data = subset(dat, flow>0 & year %in% 1970:1990))
summary(m22)
# C) Within with year fixed effects
# generate country pair
dat$pairid = paste0(dat$iso_o, dat$iso_d)
m3 = plm(log(flow)~ factor(year) +
log(distw) + contig + comlang_off + col_hist +
bothinE + oneinE,
model = "within",
index=c("pairid", "year"),
data = subset(dat, flow>0 & year %in% 1970:2005))
summary(m3)
m3 = plm(log(flow)~ factor(year) + factor(iso_o) + factor(iso_d) +
log(distw) + contig + comlang_off + col_hist +
bothinE + oneinE,
model = "within",
index=c("pairid", "year"),
data = subset(dat, flow>0 & year %in% 1970:2005))
summary(m3)
# D) Within with year fixed effects and time varying country fixed effects
# Note: this takes forever to run
m4 = plm(log(flow)~ factor(iso_o_yr)+factor(iso_d_yr)+factor(year)+
log(distw) + contig + comlang_off + col_hist +
bothinE + oneinE,
model = "within",
index=c("pairid", "year"),
data = subset(dat, flow>0 & year %in% 1970:2005))
summary(m4)
## double demeaning
m44 = felm(log(flow)~ log(distw) + contig + comlang_off + col_hist +
bothinE + oneinE | year + iso_o_yr + iso_d_yr,
data = subset(dat, flow>0 & year %in% 1970:2005))
summary(m44)
m44 = felm(log(flow)~ log(distw) +
bothinE + oneinE | year + iso_o_yr + iso_d_yr,
data = subset(dat, flow>0 & year %in% 1970:2005))
summary(m44)
# E) Within with year fixed effects and GDP controls
m5 = plm(log(flow)~ factor(year)+ log(gdp_o)+log(gdp_d)+
log(distw) + contig + comlang_off + col_hist +
bothinE + oneinE,
model = "within",
index=c("pairid", "year"),
data = subset(dat, flow>0 & year %in% 1970:2005))
summary(m5)
##Change working directory
path=setwd(results)
## 2.b Present results
stargazer(m1, m3, m44, header=FALSE, type='text',
title = "Table 1: Gravity models of trade",
no.space = TRUE,
keep = c("bothinE","oneinE"),
out="model2.txt",
omit.stat= c("adj.rsq", "f", "ser"))
stargazer(m1,m3,m5, header=FALSE, type='text',
title = "Table 2 : Panel regressions",
no.space = TRUE,
align = TRUE,
column.labels=c("OLS with imp,exp and year FE",
"Within with year FE",
"Within with year FE and GDP controls"),
dep.var.labels=c("Coefficients"),
keep = c("bothinE","oneinE"),
out="model3.txt",
omit.stat= c("adj.rsq", "f", "ser"))
stargazer(m1,m11,m22,m3,m5, header=FALSE, type='text',
title = "Table 3 : Regressions",
no.space = TRUE,
align = TRUE,
dep.var.labels=c("Coefficients"),
keep=c( "gdp_o","gdp_d",
"distw","col_cur","contig", "comlang_off", "col_hist",
"bothinE","oneinE"),
out="model4.txt",
omit.stat= c("adj.rsq", "f", "ser"))
##Change working directory
results="/Users/albertosei-owusu/Desktop/Data/Gravity/Tables"
path=setwd(results)
## 2.b Present results
stargazer(m1, m3, m44, header=FALSE, type='text',
title = "Table 1: Gravity models of trade",
align = TRUE,
column.labels=c("imp,exp and year FE",
"year FE",
"year FE and time varying country FE"),
dep.var.labels=c("Coefficients"),
keep = c("bothinE","oneinE"),
out="models.txt",
omit.stat= c("adj.rsq", "f", "ser"))
stargazer(m1,m3,m5, header=FALSE, type='text',
title = "Table 2 : Panel regressions",
no.space = TRUE,
align = TRUE,
column.labels=c("imp,exp and year FE",
"year FE",
"year FE and GDP controls"),
dep.var.labels=c("Coefficients"),
keep = c("bothinE","oneinE"),
out="model1.txt",
omit.stat= c("adj.rsq", "f", "ser"))
#############################################################
### 3. Evolution of trade creation and diversion ###
#############################################################
## 3.a Introduce interaction terms
m6 = plm(log(flow)~ log(distw) + contig + comlang_off + col_hist +
factor(year) +
factor(year):bothinE +
factor(year):oneinE,
model = "within",
index=c("pairid", "year"),
data = subset(dat, flow>0 & year %in% 1970:2005))
summary(m6)
stargazer(m6, title="Results",
dep.var.labels="Property sales prices",
style="aer",
align=TRUE,
type = "text")
## 3.b Plot results
plot(1975:2005, (coef(m6)[36:66]+coef(m6)[5:35]), type = "b",
xlab = "year",
ylab = "bothinE coefficient",
main = "Evolution of the bothinE coefficient")
plot(1975:2005, (coef(m6)[67:97]+coef(m6)[5:35]), type = "b",
xlab = "year",
ylab = "oneinE coefficient",
main = "Evolution of the oneinE coefficient")
#############################################################
### 4. Export diversion ###
#############################################################
## 4.a Generate ECOWAS extra export dummy
dat$oneinE1 = ifelse(dat$iso_o %in% ecowasset &
!(dat$iso_d %in% ecowasset) &
dat$year >= 1975, 1, 0)
# Run regression
# E) Within with year fixed effects and GDP controls
m7 = plm(log(flow)~ factor(year)+ log(gdp_o)+log(gdp_d)+
log(distw) + contig + comlang_off + col_hist +
bothinE + oneinE + oneinE1,
model = "within",
index=c("pairid", "year"),
data = subset(dat, flow>0 & year %in% 1970:2005))
summary(m7)
## 4.b
# ECOWAS export to ROW
eco1 = select(subset(dat, iso_o %in% ecowasset &
!(dat$iso_d %in% ecowasset)),
year, flow)
eco2 = aggregate(flow ~ ., data = eco1, sum)
# Building the plot (using the ggplot2 package)
ggplot(eco2, aes(year, log(flow))) +
geom_line() +
geom_point() +
ylab("trade flow") +
geom_vline(xintercept = 1975) +
ggtitle("ECOWAS exports to ROW")
######################################################
# F) OLS with importer, exporter and year fixed effects
m8 = lm(log(flow)~factor(iso_o)+factor(iso_d)+factor(year)+
log(distw) + contig + comlang_off + col_hist +
bothinE + oneinE + oneinE1 ,
data = subset(dat, flow>0 & year %in% 1970:2005))
summary(m8)
# G) OLS with time varying importer, exporter fixed effects
# and year fixed effects
# Generate time varying importer/exporter dummies
dat$iso_o_yr = paste0(dat$iso_o, dat$year)
dat$iso_d_yr = paste0(dat$iso_d, dat$year)
m9 = lm(log(flow)~factor(iso_o_yr)+factor(iso_d_yr)+factor(year)+
log(distw) + bothinE + oneinE + oneinE1 ,
data = subset(dat, flow>0 & year %in% 1970:2005))
summary(m9)
## double demeaning
m99 = felm(log(flow)~log(distw) + contig + comlang_off + col_hist +
bothinE + oneinE + oneinE1 | year + iso_o_yr + iso_d_yr,
data = subset(dat, flow>0 & year %in% 1970:2005))
summary(m99)
m99 = felm(log(flow)~log(distw)+
bothinE + oneinE + oneinE1 | year + iso_o_yr + iso_d_yr,
data = subset(dat, flow>0 & year %in% 1970:2005))
summary(m99)
# H) Within with year fixed effects
# generate country pair
dat$pairid = paste0(dat$iso_o, dat$iso_d)
m10 = plm(log(flow)~ factor(year) +
log(distw) + contig + comlang_off + col_hist +
bothinE + oneinE + oneinE1 ,
model = "within",
index=c("pairid", "year"),
data = subset(dat, flow>0 & year %in% 1970:2005))
summary(m10)
##Change working directory
results="/Users/albertosei-owusu/Desktop/Data/Gravity/Tables"
path=setwd(results)
## Present results
stargazer(m8,m99,m10,m7, header=FALSE, type='text',
title = "Table 3 : Panel regressions",
no.space = TRUE,
align = TRUE,
column.labels=c("imp,exp and year","vimp,vexp and year",
"year FE",
"year FE and GDP controls"),
dep.var.labels=c("Coefficients"),
keep = c("bothinE","oneinE", "oneinE1"),
out="model3.txt",
omit.stat= c("adj.rsq", "f", "ser"))
## Introduce interaction terms
m11 = plm(log(flow)~ log(distw) + col_cur + contig + comlang_off + col_hist +
factor(year) +
factor(year):bothinE+
factor(year):oneinE+
factor(year):oneinE1,
model = "within",
index=c("pairid", "year"),
data = subset(dat, flow>0 & year %in% 1970:2005))
summary(m11)
## 3.b Plot results
par(mfrow=c(1,3))
plot(1975:2005, (coef(m11)[37:67]+coef(m11)[6:36]), type = "b",
xlab = "year",
ylab = "bothinE coefficient",
main = "Evolution of the bothinE coefficient")
plot(1975:2005, (coef(m11)[68:98]+ coef(m11)[6:36]), type = "b",
xlab = "year",
ylab = "oneinE coefficient",
main = "Evolution of the oneinE coefficient")
plot(1975:2005, (coef(m11)[99:129]+ coef(m11)[6:36]), type = "b",
xlab = "year",
ylab = "oneinE1 coefficient",
main = "Evolution of the onein1E coefficient")
### alt
dat4 <- data.frame( year=c(1975:2005),
bothinE = c(coef(m11)[37:67]+coef(m11)[6:36]),
oneinE = c(coef(m11)[68:98]+coef(m11)[6:36]),
oneinE1 =c(coef(m11)[99:129]+coef(m11)[6:36]))
## transposing dataframe
dat5 <- t(dat4)
View(dat5)
## Multiple line of trade creation and diversion
lines(plot(dat4$year,dat4$bothinE, type = "l",
xlab = "year", ylim=c(0.864,2.792),
ylab = "coefficients",
main = "Evolution of trade creation and trade diversion",
col= "red"))
lines(dat4$year,dat4$oneinE,type="l",col="blue")
lines(dat4$year,dat4$oneinE1,type="bl",col="black")
legend("topleft",
border="black",col=c("red","blue","black") ,
lty=c(1,1),
legend=c("Import diversion","Trade creation", "Export Diversion"),
bg ="white")
# alt
legend("bottomright",
border="black",col=c("red","blue","black") ,
lty=c(1,1),
legend=c("Import diversion","Trade creation", "Export diversion"),
bg ="white")
#############################################################
### 5. The Average RTA effect ###
#############################################################
## 5.a
# RTA dummy
## Regional trade agreements
m14 = lm(log(flow)~log(gdp_o)+log(gdp_d)+
log(distw)+contig+comlang_off+
col_hist+col_cur+rta,
data = subset(dat, flow>0 & year %in% 1970:2005))
summary(m14)
# J) OLS with importer, exporter and year fixed effects
m14 = lm(log(flow)~factor(iso_o)+factor(iso_d)+factor(year)+
log(distw)+contig+comlang_off+
col_hist+col_cur+rta,
data = subset(dat, flow>0 & year %in% 1970:2005))
summary(m14)
# K) OLS with time varying importer, exporter fixed effects
# and year fixed effects
# Generate time varying importer/exporter dummies
dat$iso_o_yr = paste0(dat$iso_o, dat$year)
dat$iso_d_yr = paste0(dat$iso_d, dat$year)
m15 = lm(log(flow)~factor(iso_o_yr)+factor(iso_d_yr)+factor(year)+
log(distw)+contig+comlang_off+
col_hist+col_cur+rta,
data = subset(dat, flow>0 & year %in% 1970:2005))
summary(m15)
# H) Within with year fixed effects
# generate country pair
dat$pairid = paste0(dat$iso_o, dat$iso_d)
m16 = plm(log(flow)~ factor(year) +
log(distw)+contig+comlang_off+
col_hist+col_cur+rta,
model = "within",
index=c("pairid", "year"),
data = subset(dat, flow>0 & year %in% 1970:2005))
summary(m16)
# I) Within with year fixed effects and time varying country fixed effects
# Note: this takes forever to run
m17 = plm(log(flow)~ factor(iso_o_yr)+factor(iso_d_yr)+factor(year)+
log(distw)+contig+comlang_off+
col_hist+col_cur+rta,
model = "within",
index=c("pairid", "year"),
data = subset(dat, flow>0 & year %in% 1970:2005))
summary(m17)
## Present results
stargazer(m8,m10,m7, header=FALSE, type='text',
title = "Table 2 : Panel regressions",
no.space = TRUE,
align = TRUE,
column.labels=c("imp,exp and year FE",
"year FE",
"year FE and GDP controls"),
dep.var.labels=c("Coefficients"),
keep = c("bothin","onein"),
out="model2.txt",
omit.stat= c("adj.rsq", "f", "ser"))
# 5.c
##### PPML estimator ####
# A) PPML without importer, exporter and year fixed effects
m20 = glm(flow~ log(gdp_o) + log(gdp_d) +
log(distw)+col_cur+contig+comlang_off+col_hist+
bothinE + oneinE,
data = subset(dat,year %in% 1970:2005), family = poisson())
summary(m20)
# B) PPML with importer, exporter and year fixed effects
m21 = glm(flow~factor(iso_o)+factor(iso_d)+factor(year)+
log(distw)+col_cur+contig+comlang_off+col_hist+
bothinE + oneinE,
data = subset(dat,year %in% 1970:2005), family = poisson)
summary(m21)
m21 = glm(flow~factor(iso_o)+factor(iso_d)+factor(year)+
log(distw)+col_cur+contig+comlang_off+col_hist+
bothinE + oneinE,
data = subset(dat,year %in% 1970:2005), family = poisson())
summary(m21)
# C) PPML with time varying importer, exporter fixed effects
# and year fixed effects
# Generate time varying importer/exporter dummies
dat$iso_o_yr = paste0(dat$iso_o, dat$year)
dat$iso_d_yr = paste0(dat$iso_d, dat$year)
m22 = glm(flow~factor(iso_o_yr)+factor(iso_d_yr)+factor(year)+
log(distw) + col_cur + contig + comlang_off + col_hist +
+ bothinE + oneinE,
data = subset(dat,year %in% 1970:2005), family = poisson())
summary(m22)
m22 = glm(flow~factor(iso_o_yr)+factor(iso_d_yr)+factor(year)+
log(distw) + col_cur + contig + comlang_off + col_hist +
+ bothinE + oneinE,
data = subset(dat,year %in% 1970:2005), family = poisson)
summary(m22)
# D) Within with year fixed effects
# generate country pair
dat$pairid = paste0(dat$iso_o, dat$iso_d)
m23 = glm(log(flow)~ factor(year) +
log(distw) + col_cur + contig + comlang_off + col_hist +
bothinE + oneinE,
model = "within",
index=c("pairid", "year"),
data = subset(dat, flow>0 & year %in% 1970:2005))
summary(m23)
## End of script |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/quicksight_operations.R
\name{quicksight_create_group_membership}
\alias{quicksight_create_group_membership}
\title{Adds an Amazon QuickSight user to an Amazon QuickSight group}
\usage{
quicksight_create_group_membership(
MemberName,
GroupName,
AwsAccountId,
Namespace
)
}
\arguments{
\item{MemberName}{[required] The name of the user that you want to add to the group membership.}
\item{GroupName}{[required] The name of the group that you want to add the user to.}
\item{AwsAccountId}{[required] The ID for the Amazon Web Services account that the group is in.
Currently, you use the ID for the Amazon Web Services account that
contains your Amazon QuickSight account.}
\item{Namespace}{[required] The namespace that you want the user to be a part of.}
}
\description{
Adds an Amazon QuickSight user to an Amazon QuickSight group.
See \url{https://www.paws-r-sdk.com/docs/quicksight_create_group_membership/} for full documentation.
}
\keyword{internal}
| /cran/paws.analytics/man/quicksight_create_group_membership.Rd | permissive | paws-r/paws | R | false | true | 1,046 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/quicksight_operations.R
\name{quicksight_create_group_membership}
\alias{quicksight_create_group_membership}
\title{Adds an Amazon QuickSight user to an Amazon QuickSight group}
\usage{
quicksight_create_group_membership(
MemberName,
GroupName,
AwsAccountId,
Namespace
)
}
\arguments{
\item{MemberName}{[required] The name of the user that you want to add to the group membership.}
\item{GroupName}{[required] The name of the group that you want to add the user to.}
\item{AwsAccountId}{[required] The ID for the Amazon Web Services account that the group is in.
Currently, you use the ID for the Amazon Web Services account that
contains your Amazon QuickSight account.}
\item{Namespace}{[required] The namespace that you want the user to be a part of.}
}
\description{
Adds an Amazon QuickSight user to an Amazon QuickSight group.
See \url{https://www.paws-r-sdk.com/docs/quicksight_create_group_membership/} for full documentation.
}
\keyword{internal}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ANOVA_exact.R
\name{ANOVA_exact}
\alias{ANOVA_exact}
\alias{ANOVA_exact2}
\title{Simulates an exact dataset (mu, sd, and r represent empirical, not population, mean and covariance matrix) from the design to calculate power}
\usage{
ANOVA_exact(
design_result,
correction = Superpower_options("correction"),
alpha_level = Superpower_options("alpha_level"),
verbose = Superpower_options("verbose"),
emm = Superpower_options("emm"),
emm_model = Superpower_options("emm_model"),
contrast_type = Superpower_options("contrast_type"),
liberal_lambda = Superpower_options("liberal_lambda"),
emm_comp
)
ANOVA_exact2(
design_result,
correction = Superpower_options("correction"),
alpha_level = Superpower_options("alpha_level"),
verbose = Superpower_options("verbose"),
emm = Superpower_options("emm"),
emm_model = Superpower_options("emm_model"),
contrast_type = Superpower_options("contrast_type"),
emm_comp,
liberal_lambda = Superpower_options("liberal_lambda")
)
}
\arguments{
\item{design_result}{Output from the ANOVA_design function}
\item{correction}{Set a correction of violations of sphericity. This can be set to "none", "GG" Greenhouse-Geisser, and "HF" Huynh-Feldt}
\item{alpha_level}{Alpha level used to determine statistical significance}
\item{verbose}{Set to FALSE to not print results (default = TRUE)}
\item{emm}{Set to FALSE to not perform analysis of estimated marginal means}
\item{emm_model}{Set model type ("multivariate", or "univariate") for estimated marginal means}
\item{contrast_type}{Select the type of comparison for the estimated marginal means. Default is pairwise. See ?emmeans::`contrast-methods` for more details on acceptable methods.}
\item{liberal_lambda}{Logical indicator of whether to use the liberal (cohen_f^2\*(num_df+den_df)) or conservative (cohen_f^2\*den_df) calculation of the noncentrality (lambda) parameter estimate. Default is FALSE.}
\item{emm_comp}{Set the comparisons for estimated marginal means comparisons. This is a factor name (a), combination of factor names (a+b), or for simple effects a | sign is needed (a|b)}
}
\value{
Returns dataframe with simulation data (power and effect sizes!), anova results and simple effect results, plot of exact data, and alpha_level. Note: Cohen's f = sqrt(pes/1-pes) and the noncentrality parameter is = f^2*df(error)
\describe{
\item{\code{"dataframe"}}{A dataframe of the simulation result.}
\item{\code{"aov_result"}}{\code{aov} object returned from \code{\link{aov_car}}.}
\item{\code{"aov_result"}}{\code{emmeans} object returned from \code{\link{emmeans}}.}
\item{\code{"main_result"}}{The power analysis results for ANOVA level effects.}
\item{\code{"pc_results"}}{The power analysis results for the pairwise (t-test) comparisons.}
\item{\code{"emm_results"}}{The power analysis results of the pairwise comparison results.}
\item{\code{"manova_results"}}{Default is "NULL". If a within-subjects factor is included, then the power of the multivariate (i.e. MANOVA) analyses will be provided.}
\item{\code{"alpha_level"}}{The alpha level, significance cut-off, used for the power analysis.}
\item{\code{"method"}}{Record of the function used to produce the simulation}
\item{\code{"plot"}}{A plot of the dataframe from the simulation; should closely match the meansplot in \code{\link{ANOVA_design}}}
}
}
\description{
Simulates an exact dataset (mu, sd, and r represent empirical, not population, mean and covariance matrix) from the design to calculate power
}
\section{Functions}{
\itemize{
\item \code{ANOVA_exact2}: An extension of ANOVA_exact that uses the effect sizes calculated from very large sample size empirical simulation. This allows for small sample sizes, where ANOVA_exact cannot, while still accurately estimating power. However, model objects (emmeans and aov) are not included as output, and pairwise (t-test) results are not currently supported.
}}
\section{Warnings}{
Varying the sd or r (e.g., entering multiple values) violates assumptions of homoscedascity and sphericity respectively
}
\examples{
## Set up a within design with 2 factors, each with 2 levels,
## with correlation between observations of 0.8,
## 40 participants (who do all conditions), and standard deviation of 2
## with a mean pattern of 1, 0, 1, 0, conditions labeled 'condition' and
## 'voice', with names for levels of "cheerful", "sad", amd "human", "robot"
design_result <- ANOVA_design(design = "2w*2w", n = 40, mu = c(1, 0, 1, 0),
sd = 2, r = 0.8, labelnames = c("condition", "cheerful",
"sad", "voice", "human", "robot"))
exact_result <- ANOVA_exact(design_result, alpha_level = 0.05)
}
| /man/ANOVA_exact.Rd | permissive | arcaldwell49/Superpower | R | false | true | 4,744 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ANOVA_exact.R
\name{ANOVA_exact}
\alias{ANOVA_exact}
\alias{ANOVA_exact2}
\title{Simulates an exact dataset (mu, sd, and r represent empirical, not population, mean and covariance matrix) from the design to calculate power}
\usage{
ANOVA_exact(
design_result,
correction = Superpower_options("correction"),
alpha_level = Superpower_options("alpha_level"),
verbose = Superpower_options("verbose"),
emm = Superpower_options("emm"),
emm_model = Superpower_options("emm_model"),
contrast_type = Superpower_options("contrast_type"),
liberal_lambda = Superpower_options("liberal_lambda"),
emm_comp
)
ANOVA_exact2(
design_result,
correction = Superpower_options("correction"),
alpha_level = Superpower_options("alpha_level"),
verbose = Superpower_options("verbose"),
emm = Superpower_options("emm"),
emm_model = Superpower_options("emm_model"),
contrast_type = Superpower_options("contrast_type"),
emm_comp,
liberal_lambda = Superpower_options("liberal_lambda")
)
}
\arguments{
\item{design_result}{Output from the ANOVA_design function}
\item{correction}{Set a correction of violations of sphericity. This can be set to "none", "GG" Greenhouse-Geisser, and "HF" Huynh-Feldt}
\item{alpha_level}{Alpha level used to determine statistical significance}
\item{verbose}{Set to FALSE to not print results (default = TRUE)}
\item{emm}{Set to FALSE to not perform analysis of estimated marginal means}
\item{emm_model}{Set model type ("multivariate", or "univariate") for estimated marginal means}
\item{contrast_type}{Select the type of comparison for the estimated marginal means. Default is pairwise. See ?emmeans::`contrast-methods` for more details on acceptable methods.}
\item{liberal_lambda}{Logical indicator of whether to use the liberal (cohen_f^2\*(num_df+den_df)) or conservative (cohen_f^2\*den_df) calculation of the noncentrality (lambda) parameter estimate. Default is FALSE.}
\item{emm_comp}{Set the comparisons for estimated marginal means comparisons. This is a factor name (a), combination of factor names (a+b), or for simple effects a | sign is needed (a|b)}
}
\value{
Returns dataframe with simulation data (power and effect sizes!), anova results and simple effect results, plot of exact data, and alpha_level. Note: Cohen's f = sqrt(pes/1-pes) and the noncentrality parameter is = f^2*df(error)
\describe{
\item{\code{"dataframe"}}{A dataframe of the simulation result.}
\item{\code{"aov_result"}}{\code{aov} object returned from \code{\link{aov_car}}.}
\item{\code{"aov_result"}}{\code{emmeans} object returned from \code{\link{emmeans}}.}
\item{\code{"main_result"}}{The power analysis results for ANOVA level effects.}
\item{\code{"pc_results"}}{The power analysis results for the pairwise (t-test) comparisons.}
\item{\code{"emm_results"}}{The power analysis results of the pairwise comparison results.}
\item{\code{"manova_results"}}{Default is "NULL". If a within-subjects factor is included, then the power of the multivariate (i.e. MANOVA) analyses will be provided.}
\item{\code{"alpha_level"}}{The alpha level, significance cut-off, used for the power analysis.}
\item{\code{"method"}}{Record of the function used to produce the simulation}
\item{\code{"plot"}}{A plot of the dataframe from the simulation; should closely match the meansplot in \code{\link{ANOVA_design}}}
}
}
\description{
Simulates an exact dataset (mu, sd, and r represent empirical, not population, mean and covariance matrix) from the design to calculate power
}
\section{Functions}{
\itemize{
\item \code{ANOVA_exact2}: An extension of ANOVA_exact that uses the effect sizes calculated from very large sample size empirical simulation. This allows for small sample sizes, where ANOVA_exact cannot, while still accurately estimating power. However, model objects (emmeans and aov) are not included as output, and pairwise (t-test) results are not currently supported.
}}
\section{Warnings}{
Varying the sd or r (e.g., entering multiple values) violates assumptions of homoscedascity and sphericity respectively
}
\examples{
## Set up a within design with 2 factors, each with 2 levels,
## with correlation between observations of 0.8,
## 40 participants (who do all conditions), and standard deviation of 2
## with a mean pattern of 1, 0, 1, 0, conditions labeled 'condition' and
## 'voice', with names for levels of "cheerful", "sad", amd "human", "robot"
design_result <- ANOVA_design(design = "2w*2w", n = 40, mu = c(1, 0, 1, 0),
sd = 2, r = 0.8, labelnames = c("condition", "cheerful",
"sad", "voice", "human", "robot"))
exact_result <- ANOVA_exact(design_result, alpha_level = 0.05)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Proc.reg.reset.R
\name{Proc.reg.reset}
\alias{Proc.reg.reset}
\title{Summary table of the Reset test}
\usage{
Proc.reg.reset(lm)
}
\arguments{
\item{lm}{A lm() object}
}
\value{
returns a summary table of the Reset test
}
\description{
Summary table of the Reset test
}
\examples{
\dontrun{Proc.reg.reset(lm)}
}
| /man/Proc.reg.reset.Rd | permissive | ipveka/Proc-Sas | R | false | true | 410 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Proc.reg.reset.R
\name{Proc.reg.reset}
\alias{Proc.reg.reset}
\title{Summary table of the Reset test}
\usage{
Proc.reg.reset(lm)
}
\arguments{
\item{lm}{A lm() object}
}
\value{
returns a summary table of the Reset test
}
\description{
Summary table of the Reset test
}
\examples{
\dontrun{Proc.reg.reset(lm)}
}
|
## Potential Connectivity Matrices
#From Diane Thompson- for 10-day PLD
library(tidyverse) #version 1.3.0
##Loading in matrices from "Downloaded_Data" folder
library(ncdf4) #version 1.17
connect_matrix <- nc_open("CORAL-connect_25_historical_pconnect_10-day_noedges2.nc")
print(connect_matrix) #This gives the meta-data for the matrix
#[dst_site, src_site] - dst=destination site, src=source site
connect_data <- ncvar_get(connect_matrix, varid="pconnect") #this is the actual potential connectivity matrix
print(connect_data)
dim(connect_data)
head(connect_data)
latlon_matrix <- nc_open("coral_all_release_site_coarse.nc")
print(latlon_matrix)
lon_data <- ncvar_get(latlon_matrix, varid="lon")
print(lon_data)
dim(lon_data)
lon_data[1:10, 1:10]
lat_data <- ncvar_get(latlon_matrix, varid="lat")
print(lat_data)
lat_data[1:10, 1:10]
release_site_data <- ncvar_get(latlon_matrix, varid="release_site")
print(release_site_data)
dim(release_site_data)
release_site_data[1:10, 1:10]
sort(unique(c(release_site_data)))
range(unique(c(release_site_data))) #2947
i <- which(c(release_site_data)==1) #setting i as release site=1,
#lumped coordinates into 2947 release sites
#Indexing to identify release sites that match the coordinates of our sampling sites
c(lat_data)[i] #lat of release site 1
c(lon_data)[i] #lon of release site 1
plot(c(lon_data)[i], c(lat_data)[i])
i <- which(c(lat_data) > 9)
c(lat_data)[i]
c(lon_data)[i]
pos_coord <- c(release_site_data)[i]
View(pos_coord) #vector with release sites where latitude is greater than 9
i <- which(c(lon_data) > 123)
pos_lon <- c(release_site_data)[i]
View(pos_lon)
pos_lon <- fun.zero.omit(pos_lon)
pos_lon <- sort(pos_lon)
pos_lon <- table(pos_lon)
library(GLDEX) #version 2.0.0.7
pos_coord <- fun.zero.omit(pos_coord) #omits zeros from the vector
pos_coord <- sort(pos_coord) #orders from least to greatest
pos_coord <- table(pos_coord) #gives frequency table, so each release site is only listed once
i <- which(c(release_site_data)==2481)
c(lat_data)[i]
c(lon_data)[i]
#Possible matching release sites: 2375, 2401 (pop 7?), 2402, 2426, 2427, 2428, 2454, 2455, 2457,
#2458, 2479, 2480, 2481, 2483 (pop 19?), 2484
#Create pairwise pconnect matrix
#Pop 1- site 2480
#Pop 2- site 2480
#Pop 7- site 2401
#Pop 8- site 2426
#Pop 9- site 2427
#Pop 10- site 2454
#Pop 11- site 2455
#Pop 19- site 2483
#Lower triangle pconnect matrix
#Pop 19 as the destination
pconnect_lowertri_vector <- c(NA, NA, NA, NA, NA, NA, NA, NA,
connect_data[2480, 2480], NA, NA, NA, NA, NA, NA, NA,
connect_data[2401, 2480], connect_data[2401, 2480], NA, NA, NA, NA, NA, NA,
connect_data[2426, 2480], connect_data[2426, 2480], connect_data[2426, 2401], NA, NA, NA, NA, NA,
connect_data[2427, 2480], connect_data[2427, 2480], connect_data[2427, 2401], connect_data[2427, 2426], NA, NA, NA, NA,
connect_data[2454, 2480], connect_data[2454, 2480], connect_data[2454, 2401], connect_data[2454, 2426], connect_data[2454, 2427], NA, NA, NA,
connect_data[2455, 2480], connect_data[2455, 2480], connect_data[2455, 2401], connect_data[2455, 2426], connect_data[2455, 2427], connect_data[2455, 2454], NA, NA,
connect_data[2483, 2480], connect_data[2483, 2480], connect_data[2483, 2401], connect_data[2483, 2426], connect_data[2483, 2427], connect_data[2483, 2454], connect_data[2483, 2455], NA)
pconnect_lowertri <- matrix(pconnect_lowertri_vector, nrow=8)
pconnect_lowertri
colnames(pconnect_lowertri) <- c(1, 2, 7, 8, 9, 10, 11, 19)
rownames(pconnect_lowertri) <- c(1, 2, 7, 8, 9, 10, 11, 19)
#Upper triangle pconnect matrix
#Pop 19 as the source
pconnect_uppertri_vector <- c(NA, NA, NA, NA, NA, NA, NA, NA,
connect_data[2480, 2480], NA, NA, NA, NA, NA, NA, NA,
connect_data[2480, 2401], connect_data[2480, 2401], NA, NA, NA, NA, NA, NA,
connect_data[2480, 2426], connect_data[2480, 2426], connect_data[2401, 2426], NA, NA, NA, NA, NA,
connect_data[2480, 2427], connect_data[2480, 2427], connect_data[2401, 2427], connect_data[2426, 2427], NA, NA, NA, NA,
connect_data[2480, 2454], connect_data[2480, 2454], connect_data[2401, 2454], connect_data[2426, 2454], connect_data[2427, 2454], NA, NA, NA,
connect_data[2480, 2455], connect_data[2480, 2455], connect_data[2401, 2455], connect_data[2426, 2455], connect_data[2427, 2455], connect_data[2454, 2455], NA, NA,
connect_data[2480, 2483], connect_data[2480, 2483], connect_data[2401, 2483], connect_data[2426, 2483], connect_data[2427, 2483], connect_data[2454, 2483], connect_data[2455, 2483], NA)
pconnect_uppertri <- matrix(pconnect_uppertri_vector, nrow=8)
pconnect_uppertri
colnames(pconnect_uppertri) <- c(1, 2, 7, 8, 9, 10, 11, 19)
rownames(pconnect_uppertri) <- c(1, 2, 7, 8, 9, 10, 11, 19)
#Compute average pconnect matrix
pconnect_avg_matrix <- apply(rbind(pconnect_lowertri_vector, pconnect_uppertri_vector), 2, mean)
pconnect_avg_matrix <- matrix(pconnect_avg_matrix, nrow=8)
pconnect_avg_matrix
colnames(pconnect_avg_matrix) <- c(1, 2, 7, 8, 9, 10, 11, 19)
rownames(pconnect_avg_matrix) <- c(1, 2, 7, 8, 9, 10, 11, 19)
#Flip values across diagonal, so they are in the lower triangle of the matrix
library(gdata) #version 2.18.0
lowerTriangle(pconnect_avg_matrix) <- upperTriangle(pconnect_avg_matrix, byrow=TRUE)
##Pairwise matrix with pops 1 and 2 Fst combined and release site 2480 for pop 1#
#Combining pops 1 and 2 as they are about 10 km apart, meaning they are too close together
#for two distinct release sites
#Pop 1- site 2480
#Pop 7- site 2401
#Pop 8- site 2426
#Pop 9- site 2427
#Pop 10- site 2454
#Pop 11- site 2455
#Pop 19- site 2483
#Fst matrix is read in and linearized in "PRBI IBD analysis.R" script
pconnect_avg_comb12_matrix <- pconnect_avg_matrix[-2, -2]
upperTriangle(prbi_11_12_comb12_fstlin) <- lowerTriangle(prbi_11_12_comb12_fstlin, byrow=TRUE)
#Mantel test between average pconnect and linearized Fst
library(vegan) #version 2.5-7
mantel(pconnect_avg_comb12_matrix, prbi_11_12_comb12_fstlin)
#Mantel statistic r: -0.4417, p-value: 0.975, 5039 permutations
#Get 95% CI
cor.test(pconnect_avg_comb12_matrix[lower.tri(pconnect_avg_comb12_matrix)],
prbi_11_12_comb12_fstlin[lower.tri(prbi_11_12_comb12_fstlin)])
#-0.734 - -0.012
plot(pconnect_avg_comb12_matrix, prbi_11_12_comb12_fstlin,
xlab="Probability of Larval Dispersal between Populations", ylab="Fst/(1-Fst)",
pch=20)
y <- as.numeric(prbi_11_12_comb12_fstlin)
x <- as.numeric(pconnect_avg_comb12_matrix)
mod_pconnect_comb12_matrix <- lm(y~x)
summary(mod_pconnect_comb12_matrix)
#Adjusted R-squared: 0.175, p-value: 0.003409
cor.test(x, y, method='pearson')
#cor: -0.4416733, p-value: 0.003409
plot(pconnect_avg_comb12_matrix, prbi_11_12_comb12_fstlin,
xlab="Probability of Larval Dispersal between Populations", ylab="Fst/(1-Fst)",
pch=20)
abline(mod_pconnect_comb12_matrix, col="red")
#Partial Mantel test controlling for over water geographic distance
#Load in matrix with over water distances from "Coordinates_Distances" folder
water_distance_comb12 <- read.csv("OverWater_Comb12_Distance.csv")
water_distance_comb12 <- as.matrix(water_distance_comb12)
water_distance_comb12 <- water_distance_comb12[, -1]
colnames(water_distance_comb12) <- c(1, 7, 8, 9, 10, 11, 19)
rownames(water_distance_comb12) <- c(1, 7, 8, 9, 10, 11, 19)
#Make matrix symmetrical
upperTriangle(water_distance_comb12) <- lowerTriangle(water_distance_comb12, byrow=TRUE)
#Run Partial Mantel test with geographic distance as control
mantel.partial(pconnect_avg_comb12_matrix, prbi_11_12_comb12_fstlin, water_distance_comb12)
#Mantel statistic r: -0.4151, p-value: 0.963, 5039 permutations
#Run Partial Mantel test with pconnect as control
mantel.partial(water_distance_comb12, prbi_11_12_comb12_fstlin, pconnect_avg_comb12_matrix)
#Mantel statistic r: -0.06837, p-value: 0.591, 5039 permutations
#Mantel test excluding pops 8, 9, 10
pconnect_avg_comb12_no8_9_10_matrix <- pconnect_avg_comb12_matrix[c(-3, -4, -5), c(-3, -4, -5)]
prbi_11_12_comb12_no8_9_10_fstlin <- prbi_11_12_comb12_fstlin[c(-3, -4, -5), c(-3, -4, -5)]
mantel(pconnect_avg_comb12_no8_9_10_matrix, prbi_11_12_comb12_no8_9_10_fstlin, permutations=999)
#R: -0.6616, p: 0.91667, 23 permutations
#Get 95% CI
cor.test(pconnect_avg_comb12_no8_9_10_matrix[lower.tri(pconnect_avg_comb12_no8_9_10_matrix)],
prbi_11_12_comb12_no8_9_10_fstlin[lower.tri(prbi_11_12_comb12_no8_9_10_fstlin)])
#-0.959 - 0.324
plot(pconnect_avg_comb12_no8_9_10_matrix, prbi_11_12_comb12_no8_9_10_fstlin,
xlab="Probability of Larval Dispersal between Populations", ylab="Fst/(1-Fst)",
pch=20)
##Pairwise matrix with pops 1 and 2 Fst combined and release site 2480 for pop 1 and pop 19 excluded##
pconnect_avg_comb12_no19_matrix <- pconnect_avg_comb12_matrix[-7, -7]
prbi_11_12_comb12_no19_fstlin <- prbi_11_12_comb12_fstlin[-7, -7]
#Mantel test between average pconnect and linearized Fst
mantel(pconnect_avg_comb12_no19_matrix, prbi_11_12_comb12_no19_fstlin)
#Mantel statistic r: -0.3366, p-value: 0.81667, 719 permutations
cor.test(pconnect_avg_comb12_no19_matrix[lower.tri(pconnect_avg_comb12_no19_matrix)],
prbi_11_12_comb12_no19_fstlin[lower.tri(prbi_11_12_comb12_no19_fstlin)])
#-0.724 - 0.212
(-0.724 - 0.212) / 3.92 #-0.239
plot(pconnect_avg_comb12_no19_matrix, prbi_11_12_comb12_no19_fstlin,
xlab="Probability of Larval Dispersal between Populations", ylab="Fst/(1-Fst)",
pch=20)
y <- as.numeric(prbi_11_12_comb12_no19_fstlin)
x <- as.numeric(pconnect_avg_comb12_no19_matrix)
mod_pconnect_comb12_no19_matrix <- lm(y~x)
summary(mod_pconnect_comb12_no19_matrix)
#Adjusted R-squared: 0.08165, p-value: 0.06892
cor.test(x, y, method='pearson')
#cor: -0.3366305, p-value: 0.06892
plot(pconnect_avg_comb12_no19_matrix, prbi_11_12_comb12_no19_fstlin,
xlab="Probability of Larval Dispersal between Populations", ylab="Fst/(1-Fst)",
pch=20)
abline(mod_pconnect_comb12_no19_matrix, col="red")
#Partial Mantel test controlling for geographic distance
water_distance_comb12_no19 <- water_distance_comb12[-7, -7]
mantel.partial(pconnect_avg_comb12_no19_matrix, prbi_11_12_comb12_no19_fstlin, water_distance_comb12_no19)
#Mantel statistic r: -0.01532, p-value: 0.52361, 719 permutations
#Partial Mantel controlling for pconnect
mantel.partial(water_distance_comb12_no19, prbi_11_12_comb12_no19_fstlin, pconnect_avg_comb12_no19_matrix)
#Mantel statistic r: 0.4026, p-value: 0.098611, 719 permutations
#Plot showing 19 in different color
pconnect_avg_comb12_matrix[upper.tri(pconnect_avg_comb12_matrix, diag=T)] = NA
prbi_11_12_comb12_fstlin[upper.tri(prbi_11_12_comb12_fstlin, diag=T)] = NA
pconnectframe_comb12 <- tibble(PConnect=as.vector(pconnect_avg_comb12_no19_matrix),
GeneticDistance=as.vector(prbi_11_12_comb12_no19_fstlin))
pconnectframe_comb12 <- drop_na(pconnectframe_comb12)
pop19_comb12 <- tibble(PConnect = as.vector(pconnect_avg_comb12_matrix[7,]),
GeneticDistance = as.vector(prbi_11_12_comb12_fstlin[7,]))
pop19_comb12 <- drop_na(pop19_comb12)
#Separate symbol for 8&9, 8&10, 9&10
pop8910 <- tibble(PConnect= as.vector(c(pconnect_avg_comb12_matrix[4, 3], pconnect_avg_comb12_matrix[5, 3],
pconnect_avg_comb12_matrix[5, 4])),
GeneticDistance= as.vector(c(prbi_11_12_comb12_fstlin[4, 3], prbi_11_12_comb12_fstlin[5, 3],
prbi_11_12_comb12_fstlin[5, 4])))
ggplot(data=pconnectframe_comb12, aes(x=PConnect,
y=GeneticDistance)) +
geom_point(size=1.75, color="#7b3294") +
geom_point(data=pop19_comb12, aes(x=PConnect, y=GeneticDistance), color="#008837", size=1.75) +
geom_smooth(data=pop19_comb12, method="lm", se=FALSE, color="#008837") +
theme_bw() +
xlab("Potential Connectivity") +
ylab("Genetic Distance (Fst/(1-Fst)")
#With points and circles
ggplot(data=pconnectframe_comb12, aes(x=PConnect,
y=GeneticDistance)) +
geom_point(size=2.5) +
geom_point(data=pop19_comb12, aes(x=PConnect, y=GeneticDistance), size=3, shape=1) +
geom_smooth(data=pop19_comb12, method="lm", se=FALSE, color="black", linetype="dotted", size=1) +
geom_point(data=pop8910, aes(x=PConnect, y=GeneticDistance), size=3, shape=15) +
theme_bw() +
xlab("Potential Connectivity") +
ylab("Genetic Distance (Fst/(1-Fst)") +
annotate("text", x = 0.038, y = -0.005, label = "8") +
annotate("text", x = 0.023, y = -0.0023, label = "7") +
annotate("text", x = 0.0062, y = 0.0023, label = "9") +
annotate("text", x = 0.0045, y = 0.011, label = "1&2") +
annotate("text", x = -0.0045, y = 0.0075, label = "10") +
annotate("text", x = 0.007, y = 0.009, label = "11") +
theme(axis.text = element_text(size = 10))
##Plot Pop 19 compared to other pops with pops 1 and 2 combined (release site 2480)##
#Make vector with pconnect between pop 19 and other pops
#Pop 1- site 2480
#Pop 7- site 2401
#Pop 8- site 2426
#Pop 9- site 2427
#Pop 10- site 2454
#Pop 11- site 2455
#Pop 19- site 2483
#1st in direction of pop 19 to other pops
#[dst site, src site] would be [other pop, pop 19], so [other site, 2483]
#Order of vector: Pop 1, 7, 8, 9, 10, 11, 19
pconnect_pop19src_comb12 <- c(connect_data[2480, 2483],connect_data[2401, 2483], connect_data[2426, 2483],
connect_data[2427, 2483], connect_data[2454, 2483], connect_data[2455, 2483])
pconnect_pop19src_comb12
#2nd in direction of other pops to pop 19
pconnect_pop19dst_comb12 <- c(connect_data[2483, 2480], connect_data[2483, 2401], connect_data[2483, 2426],
connect_data[2483, 2427], connect_data[2483, 2454], connect_data[2483, 2455])
pconnect_pop19dst_comb12
pconnect_avg_comb12 <- apply(rbind(pconnect_pop19src_comb12, pconnect_pop19dst_comb12), 2, mean)
pconnect_avg_comb12
pop_numbers_comb12 <- c(1, 7, 8, 9, 10, 11)
pop19_comb12_linfst <- as.vector(prbi_11_12_comb12_fstlin[7,])
pop19_comb12_linfst <- pop19_comb12_linfst[-7]
pconnect_avg_comb12_df <- data.frame(pop_numbers_comb12, pconnect_avg_comb12, pop19_comb12_linfst)
pconnect_avg_comb12_df
#Plotting average pconnect vs. lin Fst
mod_pconnect_comb12_avg <- lm(pconnect_avg_comb12_df$pop19_comb12_linfst ~ pconnect_avg_comb12_df$pconnect_avg_comb12)
summary(mod_pconnect_comb12_avg) #Adjusted R-squared:0.900235922, p:0.0024
mantel(pconnect_avg_comb12_df$pconnect_avg_comb12, pconnect_avg_comb12_df$pop19_comb12_linfst)
cor.test(pconnect_avg_comb12_df$pconnect_avg_comb12, pconnect_avg_comb12_df$pop19_comb12_linfst, method = 'pearson')
#cor: -0.9600763, p-value: 0.002359, df=4
plot(pconnect_avg_comb12_df$pconnect_avg_comb12, pconnect_avg_comb12_df$pop19_comb12_linfst, pch=19)
abline(mod_pconnect_comb12_avg, col="red")
pconnect_avg_comb12_df$pop_numbers_comb12 <- as.factor(pconnect_avg_comb12_df$pop_numbers_comb12)
class(pconnect_avg_comb12_df$pop_numbers_comb12)
ggplot(data=pconnect_avg_comb12_df, aes(x=pconnect_avg_comb12,
y=pop19_comb12_linfst, color=pop_numbers_comb12)) +
geom_point(size=2.5) +
geom_smooth(method="lm", se=FALSE, color="steelblue") +
theme_bw() +
xlab("PConnect") +
ylab("Fst/(1-Fst)")
#Dispersal spread estimate using equation from Siegel et al. 2003
#standard deviation of current velocity in IBD region: 0.2004 m/s, need to convert to km/day
sd_currentvel <- (0.2004 * 86400) / 1000 #17.31456 km/day
pconnect_spread <- 2.238 * sd_currentvel * 10^0.5 #122.5382
(2.238 * sd_currentvel * 10)^0.5
#But if the power to 0.5 is applied after, answer is 19.685 km
| /R Code/Potential_Connectivity.R | no_license | pinskylab/Premnas-biaculeatus | R | false | false | 16,058 | r | ## Potential Connectivity Matrices
#From Diane Thompson- for 10-day PLD
library(tidyverse) #version 1.3.0
##Loading in matrices from "Downloaded_Data" folder
library(ncdf4) #version 1.17
connect_matrix <- nc_open("CORAL-connect_25_historical_pconnect_10-day_noedges2.nc")
print(connect_matrix) #This gives the meta-data for the matrix
#[dst_site, src_site] - dst=destination site, src=source site
connect_data <- ncvar_get(connect_matrix, varid="pconnect") #this is the actual potential connectivity matrix
print(connect_data)
dim(connect_data)
head(connect_data)
latlon_matrix <- nc_open("coral_all_release_site_coarse.nc")
print(latlon_matrix)
lon_data <- ncvar_get(latlon_matrix, varid="lon")
print(lon_data)
dim(lon_data)
lon_data[1:10, 1:10]
lat_data <- ncvar_get(latlon_matrix, varid="lat")
print(lat_data)
lat_data[1:10, 1:10]
release_site_data <- ncvar_get(latlon_matrix, varid="release_site")
print(release_site_data)
dim(release_site_data)
release_site_data[1:10, 1:10]
sort(unique(c(release_site_data)))
range(unique(c(release_site_data))) #2947
i <- which(c(release_site_data)==1) #setting i as release site=1,
#lumped coordinates into 2947 release sites
#Indexing to identify release sites that match the coordinates of our sampling sites
c(lat_data)[i] #lat of release site 1
c(lon_data)[i] #lon of release site 1
plot(c(lon_data)[i], c(lat_data)[i])
i <- which(c(lat_data) > 9)
c(lat_data)[i]
c(lon_data)[i]
pos_coord <- c(release_site_data)[i]
View(pos_coord) #vector with release sites where latitude is greater than 9
i <- which(c(lon_data) > 123)
pos_lon <- c(release_site_data)[i]
View(pos_lon)
pos_lon <- fun.zero.omit(pos_lon)
pos_lon <- sort(pos_lon)
pos_lon <- table(pos_lon)
library(GLDEX) #version 2.0.0.7
pos_coord <- fun.zero.omit(pos_coord) #omits zeros from the vector
pos_coord <- sort(pos_coord) #orders from least to greatest
pos_coord <- table(pos_coord) #gives frequency table, so each release site is only listed once
i <- which(c(release_site_data)==2481)
c(lat_data)[i]
c(lon_data)[i]
#Possible matching release sites: 2375, 2401 (pop 7?), 2402, 2426, 2427, 2428, 2454, 2455, 2457,
#2458, 2479, 2480, 2481, 2483 (pop 19?), 2484
#Create pairwise pconnect matrix
#Pop 1- site 2480
#Pop 2- site 2480
#Pop 7- site 2401
#Pop 8- site 2426
#Pop 9- site 2427
#Pop 10- site 2454
#Pop 11- site 2455
#Pop 19- site 2483
#Lower triangle pconnect matrix
#Pop 19 as the destination
pconnect_lowertri_vector <- c(NA, NA, NA, NA, NA, NA, NA, NA,
connect_data[2480, 2480], NA, NA, NA, NA, NA, NA, NA,
connect_data[2401, 2480], connect_data[2401, 2480], NA, NA, NA, NA, NA, NA,
connect_data[2426, 2480], connect_data[2426, 2480], connect_data[2426, 2401], NA, NA, NA, NA, NA,
connect_data[2427, 2480], connect_data[2427, 2480], connect_data[2427, 2401], connect_data[2427, 2426], NA, NA, NA, NA,
connect_data[2454, 2480], connect_data[2454, 2480], connect_data[2454, 2401], connect_data[2454, 2426], connect_data[2454, 2427], NA, NA, NA,
connect_data[2455, 2480], connect_data[2455, 2480], connect_data[2455, 2401], connect_data[2455, 2426], connect_data[2455, 2427], connect_data[2455, 2454], NA, NA,
connect_data[2483, 2480], connect_data[2483, 2480], connect_data[2483, 2401], connect_data[2483, 2426], connect_data[2483, 2427], connect_data[2483, 2454], connect_data[2483, 2455], NA)
pconnect_lowertri <- matrix(pconnect_lowertri_vector, nrow=8)
pconnect_lowertri
colnames(pconnect_lowertri) <- c(1, 2, 7, 8, 9, 10, 11, 19)
rownames(pconnect_lowertri) <- c(1, 2, 7, 8, 9, 10, 11, 19)
#Upper triangle pconnect matrix
#Pop 19 as the source
pconnect_uppertri_vector <- c(NA, NA, NA, NA, NA, NA, NA, NA,
connect_data[2480, 2480], NA, NA, NA, NA, NA, NA, NA,
connect_data[2480, 2401], connect_data[2480, 2401], NA, NA, NA, NA, NA, NA,
connect_data[2480, 2426], connect_data[2480, 2426], connect_data[2401, 2426], NA, NA, NA, NA, NA,
connect_data[2480, 2427], connect_data[2480, 2427], connect_data[2401, 2427], connect_data[2426, 2427], NA, NA, NA, NA,
connect_data[2480, 2454], connect_data[2480, 2454], connect_data[2401, 2454], connect_data[2426, 2454], connect_data[2427, 2454], NA, NA, NA,
connect_data[2480, 2455], connect_data[2480, 2455], connect_data[2401, 2455], connect_data[2426, 2455], connect_data[2427, 2455], connect_data[2454, 2455], NA, NA,
connect_data[2480, 2483], connect_data[2480, 2483], connect_data[2401, 2483], connect_data[2426, 2483], connect_data[2427, 2483], connect_data[2454, 2483], connect_data[2455, 2483], NA)
pconnect_uppertri <- matrix(pconnect_uppertri_vector, nrow=8)
pconnect_uppertri
colnames(pconnect_uppertri) <- c(1, 2, 7, 8, 9, 10, 11, 19)
rownames(pconnect_uppertri) <- c(1, 2, 7, 8, 9, 10, 11, 19)
#Compute average pconnect matrix
pconnect_avg_matrix <- apply(rbind(pconnect_lowertri_vector, pconnect_uppertri_vector), 2, mean)
pconnect_avg_matrix <- matrix(pconnect_avg_matrix, nrow=8)
pconnect_avg_matrix
colnames(pconnect_avg_matrix) <- c(1, 2, 7, 8, 9, 10, 11, 19)
rownames(pconnect_avg_matrix) <- c(1, 2, 7, 8, 9, 10, 11, 19)
#Flip values across diagonal, so they are in the lower triangle of the matrix
library(gdata) #version 2.18.0
lowerTriangle(pconnect_avg_matrix) <- upperTriangle(pconnect_avg_matrix, byrow=TRUE)
##Pairwise matrix with pops 1 and 2 Fst combined and release site 2480 for pop 1#
#Combining pops 1 and 2 as they are about 10 km apart, meaning they are too close together
#for two distinct release sites
#Pop 1- site 2480
#Pop 7- site 2401
#Pop 8- site 2426
#Pop 9- site 2427
#Pop 10- site 2454
#Pop 11- site 2455
#Pop 19- site 2483
#Fst matrix is read in and linearized in "PRBI IBD analysis.R" script
pconnect_avg_comb12_matrix <- pconnect_avg_matrix[-2, -2]
upperTriangle(prbi_11_12_comb12_fstlin) <- lowerTriangle(prbi_11_12_comb12_fstlin, byrow=TRUE)
#Mantel test between average pconnect and linearized Fst
library(vegan) #version 2.5-7
mantel(pconnect_avg_comb12_matrix, prbi_11_12_comb12_fstlin)
#Mantel statistic r: -0.4417, p-value: 0.975, 5039 permutations
#Get 95% CI
cor.test(pconnect_avg_comb12_matrix[lower.tri(pconnect_avg_comb12_matrix)],
prbi_11_12_comb12_fstlin[lower.tri(prbi_11_12_comb12_fstlin)])
#-0.734 - -0.012
plot(pconnect_avg_comb12_matrix, prbi_11_12_comb12_fstlin,
xlab="Probability of Larval Dispersal between Populations", ylab="Fst/(1-Fst)",
pch=20)
y <- as.numeric(prbi_11_12_comb12_fstlin)
x <- as.numeric(pconnect_avg_comb12_matrix)
mod_pconnect_comb12_matrix <- lm(y~x)
summary(mod_pconnect_comb12_matrix)
#Adjusted R-squared: 0.175, p-value: 0.003409
cor.test(x, y, method='pearson')
#cor: -0.4416733, p-value: 0.003409
plot(pconnect_avg_comb12_matrix, prbi_11_12_comb12_fstlin,
xlab="Probability of Larval Dispersal between Populations", ylab="Fst/(1-Fst)",
pch=20)
abline(mod_pconnect_comb12_matrix, col="red")
#Partial Mantel test controlling for over water geographic distance
#Load in matrix with over water distances from "Coordinates_Distances" folder
water_distance_comb12 <- read.csv("OverWater_Comb12_Distance.csv")
water_distance_comb12 <- as.matrix(water_distance_comb12)
water_distance_comb12 <- water_distance_comb12[, -1]
colnames(water_distance_comb12) <- c(1, 7, 8, 9, 10, 11, 19)
rownames(water_distance_comb12) <- c(1, 7, 8, 9, 10, 11, 19)
#Make matrix symmetrical
upperTriangle(water_distance_comb12) <- lowerTriangle(water_distance_comb12, byrow=TRUE)
#Run Partial Mantel test with geographic distance as control
mantel.partial(pconnect_avg_comb12_matrix, prbi_11_12_comb12_fstlin, water_distance_comb12)
#Mantel statistic r: -0.4151, p-value: 0.963, 5039 permutations
#Run Partial Mantel test with pconnect as control
mantel.partial(water_distance_comb12, prbi_11_12_comb12_fstlin, pconnect_avg_comb12_matrix)
#Mantel statistic r: -0.06837, p-value: 0.591, 5039 permutations
#Mantel test excluding pops 8, 9, 10
pconnect_avg_comb12_no8_9_10_matrix <- pconnect_avg_comb12_matrix[c(-3, -4, -5), c(-3, -4, -5)]
prbi_11_12_comb12_no8_9_10_fstlin <- prbi_11_12_comb12_fstlin[c(-3, -4, -5), c(-3, -4, -5)]
mantel(pconnect_avg_comb12_no8_9_10_matrix, prbi_11_12_comb12_no8_9_10_fstlin, permutations=999)
#R: -0.6616, p: 0.91667, 23 permutations
#Get 95% CI
cor.test(pconnect_avg_comb12_no8_9_10_matrix[lower.tri(pconnect_avg_comb12_no8_9_10_matrix)],
prbi_11_12_comb12_no8_9_10_fstlin[lower.tri(prbi_11_12_comb12_no8_9_10_fstlin)])
#-0.959 - 0.324
plot(pconnect_avg_comb12_no8_9_10_matrix, prbi_11_12_comb12_no8_9_10_fstlin,
xlab="Probability of Larval Dispersal between Populations", ylab="Fst/(1-Fst)",
pch=20)
##Pairwise matrix with pops 1 and 2 Fst combined and release site 2480 for pop 1 and pop 19 excluded##
pconnect_avg_comb12_no19_matrix <- pconnect_avg_comb12_matrix[-7, -7]
prbi_11_12_comb12_no19_fstlin <- prbi_11_12_comb12_fstlin[-7, -7]
#Mantel test between average pconnect and linearized Fst
mantel(pconnect_avg_comb12_no19_matrix, prbi_11_12_comb12_no19_fstlin)
#Mantel statistic r: -0.3366, p-value: 0.81667, 719 permutations
cor.test(pconnect_avg_comb12_no19_matrix[lower.tri(pconnect_avg_comb12_no19_matrix)],
prbi_11_12_comb12_no19_fstlin[lower.tri(prbi_11_12_comb12_no19_fstlin)])
#-0.724 - 0.212
(-0.724 - 0.212) / 3.92 #-0.239
plot(pconnect_avg_comb12_no19_matrix, prbi_11_12_comb12_no19_fstlin,
xlab="Probability of Larval Dispersal between Populations", ylab="Fst/(1-Fst)",
pch=20)
y <- as.numeric(prbi_11_12_comb12_no19_fstlin)
x <- as.numeric(pconnect_avg_comb12_no19_matrix)
mod_pconnect_comb12_no19_matrix <- lm(y~x)
summary(mod_pconnect_comb12_no19_matrix)
#Adjusted R-squared: 0.08165, p-value: 0.06892
cor.test(x, y, method='pearson')
#cor: -0.3366305, p-value: 0.06892
plot(pconnect_avg_comb12_no19_matrix, prbi_11_12_comb12_no19_fstlin,
xlab="Probability of Larval Dispersal between Populations", ylab="Fst/(1-Fst)",
pch=20)
abline(mod_pconnect_comb12_no19_matrix, col="red")
#Partial Mantel test controlling for geographic distance
water_distance_comb12_no19 <- water_distance_comb12[-7, -7]
mantel.partial(pconnect_avg_comb12_no19_matrix, prbi_11_12_comb12_no19_fstlin, water_distance_comb12_no19)
#Mantel statistic r: -0.01532, p-value: 0.52361, 719 permutations
#Partial Mantel controlling for pconnect
mantel.partial(water_distance_comb12_no19, prbi_11_12_comb12_no19_fstlin, pconnect_avg_comb12_no19_matrix)
#Mantel statistic r: 0.4026, p-value: 0.098611, 719 permutations
#Plot showing 19 in different color
pconnect_avg_comb12_matrix[upper.tri(pconnect_avg_comb12_matrix, diag=T)] = NA
prbi_11_12_comb12_fstlin[upper.tri(prbi_11_12_comb12_fstlin, diag=T)] = NA
pconnectframe_comb12 <- tibble(PConnect=as.vector(pconnect_avg_comb12_no19_matrix),
GeneticDistance=as.vector(prbi_11_12_comb12_no19_fstlin))
pconnectframe_comb12 <- drop_na(pconnectframe_comb12)
pop19_comb12 <- tibble(PConnect = as.vector(pconnect_avg_comb12_matrix[7,]),
GeneticDistance = as.vector(prbi_11_12_comb12_fstlin[7,]))
pop19_comb12 <- drop_na(pop19_comb12)
#Separate symbol for 8&9, 8&10, 9&10
pop8910 <- tibble(PConnect= as.vector(c(pconnect_avg_comb12_matrix[4, 3], pconnect_avg_comb12_matrix[5, 3],
pconnect_avg_comb12_matrix[5, 4])),
GeneticDistance= as.vector(c(prbi_11_12_comb12_fstlin[4, 3], prbi_11_12_comb12_fstlin[5, 3],
prbi_11_12_comb12_fstlin[5, 4])))
ggplot(data=pconnectframe_comb12, aes(x=PConnect,
y=GeneticDistance)) +
geom_point(size=1.75, color="#7b3294") +
geom_point(data=pop19_comb12, aes(x=PConnect, y=GeneticDistance), color="#008837", size=1.75) +
geom_smooth(data=pop19_comb12, method="lm", se=FALSE, color="#008837") +
theme_bw() +
xlab("Potential Connectivity") +
ylab("Genetic Distance (Fst/(1-Fst)")
#With points and circles
ggplot(data=pconnectframe_comb12, aes(x=PConnect,
y=GeneticDistance)) +
geom_point(size=2.5) +
geom_point(data=pop19_comb12, aes(x=PConnect, y=GeneticDistance), size=3, shape=1) +
geom_smooth(data=pop19_comb12, method="lm", se=FALSE, color="black", linetype="dotted", size=1) +
geom_point(data=pop8910, aes(x=PConnect, y=GeneticDistance), size=3, shape=15) +
theme_bw() +
xlab("Potential Connectivity") +
ylab("Genetic Distance (Fst/(1-Fst)") +
annotate("text", x = 0.038, y = -0.005, label = "8") +
annotate("text", x = 0.023, y = -0.0023, label = "7") +
annotate("text", x = 0.0062, y = 0.0023, label = "9") +
annotate("text", x = 0.0045, y = 0.011, label = "1&2") +
annotate("text", x = -0.0045, y = 0.0075, label = "10") +
annotate("text", x = 0.007, y = 0.009, label = "11") +
theme(axis.text = element_text(size = 10))
##Plot Pop 19 compared to other pops with pops 1 and 2 combined (release site 2480)##
#Make vector with pconnect between pop 19 and other pops
#Pop 1- site 2480
#Pop 7- site 2401
#Pop 8- site 2426
#Pop 9- site 2427
#Pop 10- site 2454
#Pop 11- site 2455
#Pop 19- site 2483
#1st in direction of pop 19 to other pops
#[dst site, src site] would be [other pop, pop 19], so [other site, 2483]
#Order of vector: Pop 1, 7, 8, 9, 10, 11, 19
pconnect_pop19src_comb12 <- c(connect_data[2480, 2483],connect_data[2401, 2483], connect_data[2426, 2483],
connect_data[2427, 2483], connect_data[2454, 2483], connect_data[2455, 2483])
pconnect_pop19src_comb12
#2nd in direction of other pops to pop 19
pconnect_pop19dst_comb12 <- c(connect_data[2483, 2480], connect_data[2483, 2401], connect_data[2483, 2426],
connect_data[2483, 2427], connect_data[2483, 2454], connect_data[2483, 2455])
pconnect_pop19dst_comb12
pconnect_avg_comb12 <- apply(rbind(pconnect_pop19src_comb12, pconnect_pop19dst_comb12), 2, mean)
pconnect_avg_comb12
pop_numbers_comb12 <- c(1, 7, 8, 9, 10, 11)
pop19_comb12_linfst <- as.vector(prbi_11_12_comb12_fstlin[7,])
pop19_comb12_linfst <- pop19_comb12_linfst[-7]
pconnect_avg_comb12_df <- data.frame(pop_numbers_comb12, pconnect_avg_comb12, pop19_comb12_linfst)
pconnect_avg_comb12_df
#Plotting average pconnect vs. lin Fst
mod_pconnect_comb12_avg <- lm(pconnect_avg_comb12_df$pop19_comb12_linfst ~ pconnect_avg_comb12_df$pconnect_avg_comb12)
summary(mod_pconnect_comb12_avg) #Adjusted R-squared:0.900235922, p:0.0024
mantel(pconnect_avg_comb12_df$pconnect_avg_comb12, pconnect_avg_comb12_df$pop19_comb12_linfst)
cor.test(pconnect_avg_comb12_df$pconnect_avg_comb12, pconnect_avg_comb12_df$pop19_comb12_linfst, method = 'pearson')
#cor: -0.9600763, p-value: 0.002359, df=4
plot(pconnect_avg_comb12_df$pconnect_avg_comb12, pconnect_avg_comb12_df$pop19_comb12_linfst, pch=19)
abline(mod_pconnect_comb12_avg, col="red")
pconnect_avg_comb12_df$pop_numbers_comb12 <- as.factor(pconnect_avg_comb12_df$pop_numbers_comb12)
class(pconnect_avg_comb12_df$pop_numbers_comb12)
ggplot(data=pconnect_avg_comb12_df, aes(x=pconnect_avg_comb12,
y=pop19_comb12_linfst, color=pop_numbers_comb12)) +
geom_point(size=2.5) +
geom_smooth(method="lm", se=FALSE, color="steelblue") +
theme_bw() +
xlab("PConnect") +
ylab("Fst/(1-Fst)")
#Dispersal spread estimate using equation from Siegel et al. 2003
#standard deviation of current velocity in IBD region: 0.2004 m/s, need to convert to km/day
sd_currentvel <- (0.2004 * 86400) / 1000 #17.31456 km/day
pconnect_spread <- 2.238 * sd_currentvel * 10^0.5 #122.5382
(2.238 * sd_currentvel * 10)^0.5
#But if the power to 0.5 is applied after, answer is 19.685 km
|
library(ggplot2)
library(NLP)
library(tm)
library(SnowballC)
library(cluster)
library(factoextra)
library(NbClust)
library(wordcloud)
library(plotly)
library(caret)
library(corrplot)
library(reshape2)
library(stringr)
library(cowplot)
rm(list=ls())
#Setting working directory
setwd("/Users/martinberger/Desktop/DPA/T3C_PROJECT/T3C")
#Loading the data
original_train <- read.csv("DATA/train.csv",sep=",", header=T)
original_test <- read.csv("DATA/train.csv",sep=",", header=T)
#Creating a data frame
train_df <- data.frame(original_train)
train_df<-train_df[,-1]
#A quick glance to the data
summary(train_df)
#Labels distributions
nb_none <-sum(train_df$toxic==0 & train_df$severe_toxic==0 & train_df$obscene==0 & train_df$threat==0 & train_df$insult==0 &train_df$identity_hate==0)
labels <- c("Toxic","Severe Toxic", "Obscene","Threat", "Insult", "Identity Hate", "None")
values <- c(sum(train_df$toxic==1),sum(train_df$severe_toxic==1),sum(train_df$obscene==1),sum(train_df$threat==1),
sum(train_df$insult==1),sum(train_df$identity_hate==1), nb_none)
data <- data.frame(labels, values)
p <- plot_ly(data, x = ~labels, y = ~values,text=values,textposition = 'auto', type = 'bar', name = 'Labels Density')
p
#Whcih amount of commetns have a certain amount of tags ?
train_df <-data.frame(train_df, rowSums(train_df[,2:7]))
colnames(train_df)[colnames(train_df)=="rowSums.train_df...2.7.."] <- "label_nb"
nb_tag_none <-sum(train_df$toxic==0 & train_df$severe_toxic==0 & train_df$obscene==0 & train_df$threat==0 & train_df$insult==0 &train_df$identity_hate==0)
labels <- c("0","1", "2","3", "4", "5", "6")
occurences <- c(nb_tag_none,sum(train_df[,8]==1),sum(train_df[,8]==2),sum(train_df[,8]==3),sum(train_df[,8]==4),sum(train_df[,8]==5), sum(train_df[,8]==6))
data <- data.frame(labels, values)
p <- plot_ly(data, x = ~labels, y = ~occurences,text=occurences,textposition = 'auto', type = 'bar', name = 'Labels Density')
p
#Counting number of excl in comments
train_df <- data.frame(train_df,str_count(train_df$comment_text , "$"))
colnames(train_df)[colnames(train_df)=="str_count.train_df.comment_text......"] <- "nb_of_excl"
#Correlation between toxic comments
summary(train_df)
corr_df<-train_df[!train_df$label_nb == 0 ,]
summary(corr_df)
corr_df<-corr_df[,-8]
corr_df<-corr_df[,-1]
summary(corr_df)
nrow(corr_df)
corrplot(cor(corr_df[]), method = "number")
#Creating smaller df depending on label to analyse them
#clean_df <- train_df[train_df$label_nb == 0 ,]
toxic_df <- train_df[train_df$toxic == 1 ,]
severe_toxic_df <- train_df[train_df$severe_toxic == 1 ,]
obscene_df <- train_df[train_df$obscene == 1 ,]
threat_df <- train_df[train_df$threat == 1 ,]
insult_df <- train_df[train_df$insult == 1 ,]
identity_hate_df <- train_df[train_df$identity_hate == 1 ,]
#Creation of a corpus for each label
#clean_corpus <-Corpus(VectorSource(clean_df[,1]))
toxic_corpus <- Corpus(VectorSource(toxic_df[,1]))
severe_toxic_corpus <- Corpus(VectorSource(severe_toxic_df[,1]))
obscene_corpus <- Corpus(VectorSource(obscene_df[,1]))
threat_corpus <- Corpus(VectorSource(threat_df[,1]))
insult_corpus <- Corpus(VectorSource(insult_df[,1]))
identity_hate_corpus <- Corpus(VectorSource(identity_hate_df[,1]))
#Cleaning each corpus
basic_text_cleaner <- function(corpus){
corpus <- tm_map(corpus, content_transformer(tolower))
corpus <- tm_map(corpus, removePunctuation)
corpus <- tm_map(corpus, stripWhitespace)
corpus <- tm_map(corpus, removeNumbers)
corpus <- tm_map(corpus, removeWords, stopwords("english"))
corpus <- tm_map(corpus, stemDocument)
return(corpus)
}
#clean_corpus <- basic_text_cleaner(clean_corpus)
toxic_corpus <- basic_text_cleaner(toxic_corpus)
severe_toxic_corpus <- basic_text_cleaner(severe_toxic_corpus)
obscene_corpus <- basic_text_cleaner(obscene_corpus)
threat_corpus <- basic_text_cleaner(threat_corpus)
insult_corpus <- basic_text_cleaner(insult_corpus)
identity_hate_corpus <- basic_text_cleaner(identity_hate_corpus)
#Creation of a DTM for each corpus
#clean_dtm <- DocumentTermMatrix(clean_corpus, control = list(weighting = weightTfIdf))
toxic_dtm <- DocumentTermMatrix(toxic_corpus, control = list(weighting = weightTfIdf))
severe_toxic_dtm <- DocumentTermMatrix(severe_toxic_corpus, control = list(weighting = weightTfIdf))
obscene_dtm <- DocumentTermMatrix(obscene_corpus, control = list(weighting = weightTfIdf))
threat_dtm <- DocumentTermMatrix(threat_corpus, control = list(weighting = weightTfIdf))
insult_dtm <- DocumentTermMatrix(insult_corpus, control = list(weighting = weightTfIdf))
identity_hate_dtm <- DocumentTermMatrix(identity_hate_corpus, control = list(weighting = weightTfIdf))
#Transforming each DTM into a matrix
#clean_matrix <- as.matrix(clean_dtm)
toxic_matrix <- as.matrix(toxic_dtm)
severe_toxic_matrix <- as.matrix(severe_toxic_dtm)
obscene_matrix <- as.matrix(obscene_dtm)
threat_matrix <- as.matrix(threat_dtm)
insult_matrix <- as.matrix(insult_dtm)
identity_hate_matrix <- as.matrix(identity_hate_dtm)
#Creating a wordcloud for each type
#clean_freq <-colSums(clean_matrix )
#wordcloud(names(clean_freq ), clean_freq , max.words=100, rot.per=0.2, colors=brewer.pal(6, "Dark2"))
toxic_freq <- colSums(toxic_matrix )
wordcloud(names(toxic_freq ), toxic_freq , max.words=100, rot.per=0.2, colors=brewer.pal(6, "Dark2"))
severe_toxic_freq <- colSums(toxic_matrix )
wordcloud(names(severe_toxic_freq ), severe_toxic_freq , max.words=100, rot.per=0.2, colors=brewer.pal(6, "Dark2"))
obscene_freq <- colSums(obscene_matrix)
wordcloud(names(obscene_freq ), obscene_freq , max.words=100, rot.per=0.2, colors=brewer.pal(6, "Dark2"))
threat_freq <- colSums(threat_matrix )
wordcloud(names(threat_freq ), threat_freq , max.words=100, rot.per=0.2, colors=brewer.pal(6, "Dark2"))
insult_freq <- colSums(insult_matrix)
wordcloud(names(insult_freq ), insult_freq , max.words=100, rot.per=0.2, colors=brewer.pal(6, "Dark2"))
identity_hate_freq <- colSums(identity_hate_matrix)
wordcloud(names(identity_hate_freq ), identity_hate_freq , max.words=100, rot.per=0.2, colors=brewer.pal(6, "Dark2"))
#Size of the vocab for each label.
labels <- c("Toxic","Severe Toxic", "Obscene","Threat", "Insult", "Identity Hate")
values <- c(length(toxic_freq),length(severe_toxic_freq),length(obscene_freq), length(threat_freq), length(insult_freq),length(identity_hate_freq))
data <- data.frame(labels, values)
p <- plot_ly(data, x = ~labels, y = ~values,text=values,textposition = 'auto', type = 'bar', name = 'Labels Density')
p
# Let see top ten word count for each labels
toxic_top <- head(sort(toxic_freq, decreasing=TRUE), 20)
severe_toxic_top <- head(sort(severe_toxic_freq, decreasing=TRUE), 20)
obscene_top <- head(sort(obscene_freq, decreasing=TRUE), 20)
threat_top <- head(sort(obscene_freq, decreasing=TRUE), 20)
insult_top <- head(sort(insult_freq, decreasing=TRUE), 20)
identity_hate_top <- head(sort(identity_hate_freq, decreasing=TRUE), 20)
top_words_plot <- function(topten,corpus_name){
dfplot <- as.data.frame(melt(topten))
dfplot$word <- dimnames(dfplot)[[1]]
dfplot$word <- factor(dfplot$word,
levels=dfplot$word[order(dfplot$value,
decreasing=TRUE)])
fig <- ggplot(dfplot, aes(x=word, y=value)) + geom_bar(stat="identity")
fig <- fig + xlab(corpus_name)
fig <- fig + ylab("Count")
print(fig)
}
top_words_plot(toxic_top,"Toxic Comments Top Words")
top_words_plot(severe_toxic_top, "Severe Toxic Comments Top Words")
top_words_plot(obscene_top, "Obscene Comments Top Words")
top_words_plot(threat_top, "Threat Comments Top Words")
top_words_plot(insult_top, "Insult Comments Top Words")
top_words_plot(identity_hate_top, "Identity Hate Comments Top Words")
#Boxplot of number of exclamation mark in comments depending of the label
toxic_df <- train_df[train_df$toxic == 1 ,]
severe_toxic_df <- train_df[train_df$severe_toxic == 1 ,]
obscene_df <- train_df[train_df$obscene == 1 ,]
threat_df <- train_df[train_df$threat == 1 ,]
insult_df <- train_df[train_df$insult == 1 ,]
identity_hate_df <- train_df[train_df$identity_hate == 1 ,]
p1=qplot(y=toxic_df$nb_of_excl, x= 1, geom = "boxplot") + geom_boxplot()
p2=qplot(y=severe_toxic_df$nb_of_excl, x= 1, geom = "boxplot") + geom_boxplot()
p3=qplot(y=obscene_df$nb_of_excl, x= 1, geom = "boxplot") + geom_boxplot()
p4=qplot(y=threat_df$nb_of_excl, x= 1, geom = "boxplot") + geom_boxplot()
p5=qplot(y=insult_df$nb_of_excl, x= 1, geom = "boxplot") + geom_boxplot()
p6=qplot(y=identity_hate_df$nb_of_excl, x= 1, geom = "boxplot")
plot_grid(p1, p2, p3, p4, p5, p6,labels="AUTO")
ggplot(data = df, aes(x=variable, y=value)) + geom_boxplot(aes(fill=Label))
#THE END
#THE END
#THE END | /T3C_DATA_OVERVIEW.R | no_license | Martin-Berger/T3C | R | false | false | 8,741 | r | library(ggplot2)
library(NLP)
library(tm)
library(SnowballC)
library(cluster)
library(factoextra)
library(NbClust)
library(wordcloud)
library(plotly)
library(caret)
library(corrplot)
library(reshape2)
library(stringr)
library(cowplot)
rm(list=ls())
#Setting working directory
setwd("/Users/martinberger/Desktop/DPA/T3C_PROJECT/T3C")
#Loading the data
original_train <- read.csv("DATA/train.csv",sep=",", header=T)
original_test <- read.csv("DATA/train.csv",sep=",", header=T)
#Creating a data frame
train_df <- data.frame(original_train)
train_df<-train_df[,-1]
#A quick glance to the data
summary(train_df)
#Labels distributions
nb_none <-sum(train_df$toxic==0 & train_df$severe_toxic==0 & train_df$obscene==0 & train_df$threat==0 & train_df$insult==0 &train_df$identity_hate==0)
labels <- c("Toxic","Severe Toxic", "Obscene","Threat", "Insult", "Identity Hate", "None")
values <- c(sum(train_df$toxic==1),sum(train_df$severe_toxic==1),sum(train_df$obscene==1),sum(train_df$threat==1),
sum(train_df$insult==1),sum(train_df$identity_hate==1), nb_none)
data <- data.frame(labels, values)
p <- plot_ly(data, x = ~labels, y = ~values,text=values,textposition = 'auto', type = 'bar', name = 'Labels Density')
p
#Whcih amount of commetns have a certain amount of tags ?
train_df <-data.frame(train_df, rowSums(train_df[,2:7]))
colnames(train_df)[colnames(train_df)=="rowSums.train_df...2.7.."] <- "label_nb"
nb_tag_none <-sum(train_df$toxic==0 & train_df$severe_toxic==0 & train_df$obscene==0 & train_df$threat==0 & train_df$insult==0 &train_df$identity_hate==0)
labels <- c("0","1", "2","3", "4", "5", "6")
occurences <- c(nb_tag_none,sum(train_df[,8]==1),sum(train_df[,8]==2),sum(train_df[,8]==3),sum(train_df[,8]==4),sum(train_df[,8]==5), sum(train_df[,8]==6))
data <- data.frame(labels, values)
p <- plot_ly(data, x = ~labels, y = ~occurences,text=occurences,textposition = 'auto', type = 'bar', name = 'Labels Density')
p
#Counting number of excl in comments
train_df <- data.frame(train_df,str_count(train_df$comment_text , "$"))
colnames(train_df)[colnames(train_df)=="str_count.train_df.comment_text......"] <- "nb_of_excl"
#Correlation between toxic comments
summary(train_df)
corr_df<-train_df[!train_df$label_nb == 0 ,]
summary(corr_df)
corr_df<-corr_df[,-8]
corr_df<-corr_df[,-1]
summary(corr_df)
nrow(corr_df)
corrplot(cor(corr_df[]), method = "number")
#Creating smaller df depending on label to analyse them
#clean_df <- train_df[train_df$label_nb == 0 ,]
toxic_df <- train_df[train_df$toxic == 1 ,]
severe_toxic_df <- train_df[train_df$severe_toxic == 1 ,]
obscene_df <- train_df[train_df$obscene == 1 ,]
threat_df <- train_df[train_df$threat == 1 ,]
insult_df <- train_df[train_df$insult == 1 ,]
identity_hate_df <- train_df[train_df$identity_hate == 1 ,]
#Creation of a corpus for each label
#clean_corpus <-Corpus(VectorSource(clean_df[,1]))
toxic_corpus <- Corpus(VectorSource(toxic_df[,1]))
severe_toxic_corpus <- Corpus(VectorSource(severe_toxic_df[,1]))
obscene_corpus <- Corpus(VectorSource(obscene_df[,1]))
threat_corpus <- Corpus(VectorSource(threat_df[,1]))
insult_corpus <- Corpus(VectorSource(insult_df[,1]))
identity_hate_corpus <- Corpus(VectorSource(identity_hate_df[,1]))
#Cleaning each corpus
basic_text_cleaner <- function(corpus){
corpus <- tm_map(corpus, content_transformer(tolower))
corpus <- tm_map(corpus, removePunctuation)
corpus <- tm_map(corpus, stripWhitespace)
corpus <- tm_map(corpus, removeNumbers)
corpus <- tm_map(corpus, removeWords, stopwords("english"))
corpus <- tm_map(corpus, stemDocument)
return(corpus)
}
#clean_corpus <- basic_text_cleaner(clean_corpus)
toxic_corpus <- basic_text_cleaner(toxic_corpus)
severe_toxic_corpus <- basic_text_cleaner(severe_toxic_corpus)
obscene_corpus <- basic_text_cleaner(obscene_corpus)
threat_corpus <- basic_text_cleaner(threat_corpus)
insult_corpus <- basic_text_cleaner(insult_corpus)
identity_hate_corpus <- basic_text_cleaner(identity_hate_corpus)
#Creation of a DTM for each corpus
#clean_dtm <- DocumentTermMatrix(clean_corpus, control = list(weighting = weightTfIdf))
toxic_dtm <- DocumentTermMatrix(toxic_corpus, control = list(weighting = weightTfIdf))
severe_toxic_dtm <- DocumentTermMatrix(severe_toxic_corpus, control = list(weighting = weightTfIdf))
obscene_dtm <- DocumentTermMatrix(obscene_corpus, control = list(weighting = weightTfIdf))
threat_dtm <- DocumentTermMatrix(threat_corpus, control = list(weighting = weightTfIdf))
insult_dtm <- DocumentTermMatrix(insult_corpus, control = list(weighting = weightTfIdf))
identity_hate_dtm <- DocumentTermMatrix(identity_hate_corpus, control = list(weighting = weightTfIdf))
#Transforming each DTM into a matrix
#clean_matrix <- as.matrix(clean_dtm)
toxic_matrix <- as.matrix(toxic_dtm)
severe_toxic_matrix <- as.matrix(severe_toxic_dtm)
obscene_matrix <- as.matrix(obscene_dtm)
threat_matrix <- as.matrix(threat_dtm)
insult_matrix <- as.matrix(insult_dtm)
identity_hate_matrix <- as.matrix(identity_hate_dtm)
#Creating a wordcloud for each type
#clean_freq <-colSums(clean_matrix )
#wordcloud(names(clean_freq ), clean_freq , max.words=100, rot.per=0.2, colors=brewer.pal(6, "Dark2"))
toxic_freq <- colSums(toxic_matrix )
wordcloud(names(toxic_freq ), toxic_freq , max.words=100, rot.per=0.2, colors=brewer.pal(6, "Dark2"))
severe_toxic_freq <- colSums(toxic_matrix )
wordcloud(names(severe_toxic_freq ), severe_toxic_freq , max.words=100, rot.per=0.2, colors=brewer.pal(6, "Dark2"))
obscene_freq <- colSums(obscene_matrix)
wordcloud(names(obscene_freq ), obscene_freq , max.words=100, rot.per=0.2, colors=brewer.pal(6, "Dark2"))
threat_freq <- colSums(threat_matrix )
wordcloud(names(threat_freq ), threat_freq , max.words=100, rot.per=0.2, colors=brewer.pal(6, "Dark2"))
insult_freq <- colSums(insult_matrix)
wordcloud(names(insult_freq ), insult_freq , max.words=100, rot.per=0.2, colors=brewer.pal(6, "Dark2"))
identity_hate_freq <- colSums(identity_hate_matrix)
wordcloud(names(identity_hate_freq ), identity_hate_freq , max.words=100, rot.per=0.2, colors=brewer.pal(6, "Dark2"))
#Size of the vocab for each label.
labels <- c("Toxic","Severe Toxic", "Obscene","Threat", "Insult", "Identity Hate")
values <- c(length(toxic_freq),length(severe_toxic_freq),length(obscene_freq), length(threat_freq), length(insult_freq),length(identity_hate_freq))
data <- data.frame(labels, values)
p <- plot_ly(data, x = ~labels, y = ~values,text=values,textposition = 'auto', type = 'bar', name = 'Labels Density')
p
# Let see top ten word count for each labels
toxic_top <- head(sort(toxic_freq, decreasing=TRUE), 20)
severe_toxic_top <- head(sort(severe_toxic_freq, decreasing=TRUE), 20)
obscene_top <- head(sort(obscene_freq, decreasing=TRUE), 20)
threat_top <- head(sort(obscene_freq, decreasing=TRUE), 20)
insult_top <- head(sort(insult_freq, decreasing=TRUE), 20)
identity_hate_top <- head(sort(identity_hate_freq, decreasing=TRUE), 20)
top_words_plot <- function(topten,corpus_name){
dfplot <- as.data.frame(melt(topten))
dfplot$word <- dimnames(dfplot)[[1]]
dfplot$word <- factor(dfplot$word,
levels=dfplot$word[order(dfplot$value,
decreasing=TRUE)])
fig <- ggplot(dfplot, aes(x=word, y=value)) + geom_bar(stat="identity")
fig <- fig + xlab(corpus_name)
fig <- fig + ylab("Count")
print(fig)
}
top_words_plot(toxic_top,"Toxic Comments Top Words")
top_words_plot(severe_toxic_top, "Severe Toxic Comments Top Words")
top_words_plot(obscene_top, "Obscene Comments Top Words")
top_words_plot(threat_top, "Threat Comments Top Words")
top_words_plot(insult_top, "Insult Comments Top Words")
top_words_plot(identity_hate_top, "Identity Hate Comments Top Words")
#Boxplot of number of exclamation mark in comments depending of the label
toxic_df <- train_df[train_df$toxic == 1 ,]
severe_toxic_df <- train_df[train_df$severe_toxic == 1 ,]
obscene_df <- train_df[train_df$obscene == 1 ,]
threat_df <- train_df[train_df$threat == 1 ,]
insult_df <- train_df[train_df$insult == 1 ,]
identity_hate_df <- train_df[train_df$identity_hate == 1 ,]
p1=qplot(y=toxic_df$nb_of_excl, x= 1, geom = "boxplot") + geom_boxplot()
p2=qplot(y=severe_toxic_df$nb_of_excl, x= 1, geom = "boxplot") + geom_boxplot()
p3=qplot(y=obscene_df$nb_of_excl, x= 1, geom = "boxplot") + geom_boxplot()
p4=qplot(y=threat_df$nb_of_excl, x= 1, geom = "boxplot") + geom_boxplot()
p5=qplot(y=insult_df$nb_of_excl, x= 1, geom = "boxplot") + geom_boxplot()
p6=qplot(y=identity_hate_df$nb_of_excl, x= 1, geom = "boxplot")
plot_grid(p1, p2, p3, p4, p5, p6,labels="AUTO")
ggplot(data = df, aes(x=variable, y=value)) + geom_boxplot(aes(fill=Label))
#THE END
#THE END
#THE END |
# =================== Experimental Optimisation Procedure across various scenarios [Parallelized]
# This interactively-run experimental script uses the output from a single forecasting technique
# (specified via a pointer to a particular receipt matrix), as parametrised in a certain way,
# along with using an accompanying delinquency matrix (computed on the now-completed portfolio),
# to assess the portfolio loss across all specified thresholds, using 3 pre-selected delinquency measures (g1, g2, g3).
# The various ways in which this script was run is recorded in the comments below.
# Direct dependencies include: 2.1b, 2.1c, 2.2a, 2.2b
# Interactive dependencies include forecast receipt matrices that are all created experimentally in:
# - Random defaults: 2.2c receipt forecasting
# - Markovian defaults: 2.3 Estimating MLEs for transition rates and subsequent receipt forecasting
# A NOTE ON vec.Consider: This vector is interactively reset in this script as an artificial proxy for having different loan portfolios
# It was previous set to exclude closed cases when forecasting delinquency.
# ====== 0a. Initializing some parameters
# -- toggle the following matrices to specific versions created from data
mat.Instal.Use <- mat.Instal.Treated
mat.IntRate.Use <- mat.IntRates.Treated
vec.Maturity.Use <- vec.Maturity # observed loan tenures
vec.Mat.Use <- vec.Term.Treated # contractual term
# -- Pointer to a specific forecast matrix, selected from a wider experimental basis
#mat.Receipt.Use <- (mat.ReceiptAlt) # untreated receipt matrix
# - Index of experiments: uncomment one
#### RANDOM DEFAULTS TECHNIQUE [defined in 2.2c ]
####### Using Exp distribution on S2 (delinquents)
mat.Receipt.Use <- mat.ReceiptAlt.Treated7j # for v2_5j(i-iii) (repayment probability estimated from full sample; truncation parameter k drawn randomly from Exponential distribution fitted on Max_CDAlt from delinquents-only sample)
#mat.Receipt.Use <- mat.ReceiptAlt.Treated7k # for v2_5k(i-iii) (repayment probability estimated from delinquents-only sample; truncation parameter k drawn randomly from Exponential distribution fitted on Max_CDAlt from delinquents-only sample)
#mat.Receipt.Use <- mat.ReceiptAlt.Treated7l # for v2_5l(i-iii) (repayment probability estimated from write-offs sample; truncation parameter k drawn randomly from Exponential distribution fitted on Max_CDAlt from delinquents-only sample)
####### Using Weibull distribution on S3 (write-offs)
#mat.Receipt.Use <- mat.ReceiptAlt.Treated7d # for v2_5d(i-vi) (repayment probability estimated from full sample; truncation parameter k drawn randomly from Weibull distribution fitted on Max_CDAlt from write-offs-only sample)
#mat.Receipt.Use <- mat.ReceiptAlt.Treated7e # for v2_5e(i-ix) (repayment probability estimated from delinquents-only sample; truncation parameter k drawn randomly from Weibull distribution fitted on Max_CDAlt from write-offs-only sample)
#mat.Receipt.Use <- mat.ReceiptAlt.Treated7f # for v2_5f(i-ix) (repayment probability estimated from write-offs sample; with truncation parameter k drawn randomly from Weibull distribution fitted on Max_CDAlt from write-offs-only sample)
#### MARKOVIAN DEFAULTS TECHNIQUE [defined in 2.3]
#mat.Receipt.Use <- mat.ReceiptAlt.Treated8a # for v2_6a(i-iii) (treated with multi-state Markovian defaults technique with parameter estimates from full sample)
#mat.Receipt.Use <- mat.ReceiptAlt.Treated8b # for v2_6b(i-iii) (treated with multi-state Markovian defaults technique with parameter estimates from delinquents-only sample)
#mat.Receipt.Use <- mat.ReceiptAlt.Treated8c # for v2_6c(i-iii) (treated with multi-state Markovian defaults technique with parameter estimates from write-offs-only sample)
# ====== 0b. Calculate Delinquency Measures up to full contractual term, using forecast receipts
# -- Calculate CD (g1: Contractual Delinquency)
mat.CD.Use <- calculate.CD.forData(mat.Instal.Use, mat.Receipt.Use, sc.Thres, period.term, n, method="base")
# -- Calculate MD/DoD (g2/g3: Macaulay Duration Index (MD) Measure | Degree of Delinquency (DoD) Measure)
calc.results <- calculate.MDoD.forData(mat.Instal.Use, mat.Receipt.Use, vec.LoanAmt, vec.Mat.Use,
n, mat.IntRate.Use, vec.DoD.lambda)
mat.MD.Use <- calc.results$MD
mat.DoD.Use <- calc.results$DoD
rm(calc.results) #an optimization, reduces memory usage
# ====== 0c. Loss assessment, risk profile selection, and iteration parameters for subsequent optimisation
# -- toggle sampling for loss optimisation (i: full sample [Lowest risk]; ii; delinquents-only [Medium risk]; iii: write-offs-only [Highest risk])
# This is an artificial proxy for having portfolios on with different risk profiles, on which we may optimise the recovery decision
vec.Consider <- rep(1,n) # -- i: switches to indicate use full sample
#vec.Consider[which(vec.Del.Ind==0)] <- 0 # -- ii: only consider delinquent (including write-offs) loans, by switching off the rest
#vec.Consider[which(vec.Woff==0)] <- 0 # -- iii: only consider those written-off loans, by switching off the rest
# - script saving options (may overwrite previously saved data if not careful)
inner.name <- "LossThresh" # experiment theme name
it.name <- "v2_5j(i)-excl_closed" #iteration name
plot.name <- paste0(inner.name, it.name) # full name
# -- Iteration Parameter
num.thresholds <-168; #number of delinquency thresholds (essentially the number of times that loss is calculated)
it.vec <- 1:num.thresholds # iteration vector
# - General parameters
it.max <- NROW(it.vec)
first.iter <- T
# -- Interest Rates & Loss Rates: Specification & Calculation
i.alt <- 0.07; #risk-free rate (effective rate)
i_p.alt <- ((1+i.alt)^(1/12) - 1)*12; #risk-free rate (nominal rate)
Arrears.LossRate <- 0.7;
Outstanding.LossRate <- 0.4;
# ====== 1. Select thresholds for each Delinquency Measure
# NOTE: Some of the ranges of chosen thresholds may need to be tweaked, especially for MD and Dod measures,
# depending on the chosen portfolio on which optimisation is performed, as well as the risk level underyling receipt forecasts.
# Failure to tweak may lead to false conclusions and/or local optima in results.
# Discretionary tweaking itself is currently performed on a trial-and-error basis of running the optimisation multiple times using different ranges
# and trying to isolate the 'neighbourhood' where loss optima seemingly occurs.
# -- CD
vec.k.CD <- seq(0, (num.thresholds-1)) # chose all integer-valued thresholds, no tweaking necessary
# -- MD
max.thres <- max(quantile(mat.MD.Use[!is.na(mat.MD.Use)], 0.985)) + 1
vec.k.MD <- seq(1, ifelse(is.na(max.thres), 5, max(max.thres, 5)),length.out = num.thresholds) # normal selection
vec.k.MD <- c(seq(1, 2.5, length.out = 50),seq(2.51, max.thres, length.out = num.thresholds-50)) # for full sample/delinquents
#vec.k.MD <- c(seq(1, 4.5, length.out = 50),seq(4.51, max.thres, length.out = num.thresholds-50)) # for write-offs
#plot(vec.k.MD)
# -- DoD
max.thres <- max(quantile(mat.DoD.Use[!is.na(mat.DoD.Use)], 0.985)) + 1
vec.k.DoD <- seq(1, ifelse(is.na(max.thres), 5, max(max.thres, 5)),length.out = num.thresholds) # normal selection
vec.k.DoD <- c(seq(1, 2.5, length.out = 50),seq(2.51, max.thres, length.out = num.thresholds-50)) # for full sample/delinquents
#vec.k.DoD <- c(seq(1, 4.5, length.out = 50),seq(4.51, max.thres, length.out = num.thresholds-50)) # for write-offs
#plot(vec.k.DoD)
# ====== 2. LOSS ASSESSMENT: Iterative function definitions (to be run in parallel)
# - main function for assessing the portfolio loss at a specified threshold (one for each of the g1, g2, g3 delinquency measures)
coreJob <- function(vec.Maturity.Use, vec.Mat.Use, mat.Receipt.Use, mat.Instal.Use, mat.IntRate.Use, sc.Thres, period.term, n, vec.LoanAmt,
vec.DoD.lambda, it, num.thresholds, d.CD, d.MD, d.DoD, mat.CD.Use, mat.MD.Use, mat.DoD.Use) {
cat(paste0("\n 1)[",it," of ",num.thresholds,"] Loss assessments .. "),
file="assesslog.txt", append=TRUE)
# ---- Total Loss across given Threshold (d.CD, d.MD, d.DoD)
# - get default start times of first episode (if multiple exist), given threshold d, otherwise return -1 to indicate a performing loan
# uses custom function default.start.first.v2()
vec.default.start_first.CD <- sapply(1:n, default.start.first.v2, thres.d=d.CD, del.mat=mat.CD.Use, t=vec.Mat.Use)
vec.default.start_first.MD <- sapply(1:n, default.start.first.v2, thres.d=d.MD, del.mat=mat.MD.Use, t=vec.Mat.Use)
vec.default.start_first.DoD <- sapply(1:n, default.start.first.v2, thres.d=d.DoD, del.mat=mat.DoD.Use, t=vec.Mat.Use)
# - get (g,d)-defaulting account indices across measure, given current thresholds
def.CD <- which(vec.default.start_first.CD >= 0 & vec.Consider==1)
def.MD <- which(vec.default.start_first.MD >= 0 & vec.Consider==1)
def.DoD <- which(vec.default.start_first.DoD >= 0 & vec.Consider==1)
# - get (g,d)-performing account indices across measure, given current thresholds
perf.CD <- which(vec.default.start_first.CD < 0 & vec.Consider==1)
perf.MD <- which(vec.default.start_first.MD < 0 & vec.Consider==1)
perf.DoD <- which(vec.default.start_first.DoD < 0 & vec.Consider==1)
# - calculate final maturity as either contractual term / maturity or default time, given (g,d)-default
# for use in discounting and other loss calculations
vec.maturity.CD <- copy(vec.Mat.Use)
vec.maturity.CD[def.CD] <- vec.default.start_first.CD[def.CD]
vec.maturity.MD <- copy(vec.Mat.Use)
vec.maturity.MD[def.MD] <- vec.default.start_first.MD[def.MD]
vec.maturity.DoD <- copy(vec.Mat.Use)
vec.maturity.DoD[def.DoD] <- vec.default.start_first.DoD[def.DoD]
# - Calculate NPV of receipts, given maturity and relevant receipts
vec.ReceiptsPV.CD <- sapply(1:n, function(i,r,t) {
if (t[i] > 0) {
val <- sum( r[1:t[i], i] * (1+i_p.alt/12)^(-1*1:(t[i]) ) )
} else {
val <- 0
}
return (val)
}, r=mat.Receipt.Use, t=vec.maturity.CD)
vec.ReceiptsPV.MD <- sapply(1:n, function(i,r,t) {
if (t[i] > 0) {
val <- sum( r[1:t[i], i] * (1+i_p.alt/12)^(-1*1:(t[i]) ) )
} else {
val <- 0
}
return (val)
}, r=mat.Receipt.Use, t=vec.maturity.MD)
vec.ReceiptsPV.DoD <- sapply(1:n, function(i,r,t) {
if (t[i] > 0) {
val <- sum( r[1:t[i], i] * (1+i_p.alt/12)^(-1*1:(t[i]) ) )
} else {
val <- 0
}
return (val)
}, r=mat.Receipt.Use, t=vec.maturity.DoD)
# - calculate NPV of arrears, given maturity, relevant instalments and relevant receipts
vec.ArrearsPV.CD <- sapply(1:n, function(i,ins,r,t) {
if (t[i] > 0) {
val <- sum( ins[1:t[i],i] * (1+i_p.alt/12)^(-1*1:(t[i]) ) ) - r[i]
} else {
val <- 0
}
return (val)
}, ins=mat.Instal.Use, r=vec.ReceiptsPV.CD, t=vec.maturity.CD)
vec.ArrearsPV.MD <- sapply(1:n, function(i,ins,r,t) {
if (t[i] > 0) {
val <- sum( ins[1:t[i],i] * (1+i_p.alt/12)^(-1*1:(t[i]) ) ) - r[i]
} else {
val <- 0
}
return (val)
}, ins=mat.Instal.Use, r=vec.ReceiptsPV.MD, t=vec.maturity.MD)
vec.ArrearsPV.DoD <- sapply(1:n, function(i,ins,r,t) {
if (t[i] > 0) {
val <- sum( ins[1:t[i],i] * (1+i_p.alt/12)^(-1*1:(t[i]) ) ) - r[i]
} else {
val <- 0
}
return (val)
}, ins=mat.Instal.Use, r=vec.ReceiptsPV.DoD, t=vec.maturity.DoD)
# - calculate expected balance, given tenure at (g,d)-default, resulting remaining tenure, instalments, and interest rates
vec.ExpBalance.CD <- sapply(1:n, function(i,ins,intr,t,tt) {
if (t[i] < tt[i]) {
val <- sum( ins[((t[i]+1):tt[i]),i] * (1+intr[((t[i]+1):tt[i]),i]/12)^(-1*1:(tt[i] - t[i]) ) ) ;
} else {
val <- 0
}
# discount to origination
val <- val * (1+i_p.alt/12)^(-1*t[i] )
return (val)
}, ins=mat.Instal.Use, intr=mat.IntRate.Use, t=vec.maturity.CD, tt=vec.Mat.Use)
vec.ExpBalance.MD <- sapply(1:n, function(i,ins,intr,t,tt) {
if (t[i] < tt[i]) {
val <- sum( ins[((t[i]+1):tt[i]),i] * (1+intr[((t[i]+1):tt[i]),i]/12)^(-1*1:(tt[i] - t[i]) ) ) ;
} else {
val <- 0
}
# discount to origination
val <- val * (1+i_p.alt/12)^(-1*t[i] )
return (val)
}, ins=mat.Instal.Use, intr=mat.IntRate.Use, t=vec.maturity.MD, tt=vec.Mat.Use)
vec.ExpBalance.DoD <- sapply(1:n, function(i,ins,intr,t,tt) {
if (t[i] < tt[i]) {
val <- sum( ins[((t[i]+1):tt[i]),i] * (1+intr[((t[i]+1):tt[i]),i]/12)^(-1*1:(tt[i] - t[i]) ) ) ;
} else {
val <- 0
}
# discount to origination
val <- val * (1+i_p.alt/12)^(-1*t[i] )
return (val)
}, ins=mat.Instal.Use, intr=mat.IntRate.Use, t=vec.maturity.DoD, tt=vec.Mat.Use)
# - calculate losses as weighted combination between arrears and expected balance, and associated loss rates
vec.Losses.CD <- pmax(vec.ArrearsPV.CD*Arrears.LossRate + vec.ExpBalance.CD*Outstanding.LossRate, 0)
vec.Losses.MD <- pmax(vec.ArrearsPV.MD*Arrears.LossRate + vec.ExpBalance.MD*Outstanding.LossRate, 0)
vec.Losses.DoD <- pmax(vec.ArrearsPV.DoD*Arrears.LossRate + vec.ExpBalance.DoD*Outstanding.LossRate, 0)
# - calculate actual balance [ancillary information]
vec.bal.CD <- pmax(vec.ArrearsPV.CD + vec.ExpBalance.CD, 0)
vec.bal.MD <- pmax(vec.ArrearsPV.MD + vec.ExpBalance.MD, 0)
vec.bal.DoD <- pmax(vec.ArrearsPV.DoD + vec.ExpBalance.DoD, 0)
# --- curate some vectors for reporting/graphing purposes - remove accounts not to be considered
{
vec.bal.CD[which(vec.Consider==0)] <- NA
vec.bal.MD[which(vec.Consider==0)] <- NA
vec.bal.DoD[which(vec.Consider==0)] <- NA
# - zero the loss if a particular account is not to be considered [sampling]
vec.Losses.CD[which(vec.Consider==0)] <- 0
vec.Losses.MD[which(vec.Consider==0)] <- 0
vec.Losses.DoD[which(vec.Consider==0)] <- 0
}
# ---------- Concatenate relevant information, including profit/loss calculations for optimisation
dat.EL.core <- rbind(data.table(Measure="CD", MeasureName ="g1: CD", Threshold=d.CD,
Vol_Perf=length(perf.CD),Vol_Def=length(def.CD),
Bal_Perf = sum(vec.bal.CD[perf.CD], na.rm = T), Bal_Def = sum(vec.bal.CD[def.CD], na.rm = T),
Loss=sum(vec.Losses.CD, na.rm = T)),
data.table(Measure="MD", MeasureName ="g2: MD", Threshold=d.MD,
Vol_Perf=length(perf.MD),Vol_Def=length(def.MD),
Bal_Perf = sum(vec.bal.MD[perf.MD], na.rm = T), Bal_Def = sum(vec.bal.MD[def.MD], na.rm = T),
Loss=sum(vec.Losses.MD, na.rm = T)),
data.table(Measure="DoD", MeasureName ="g3: DoD", Threshold=d.DoD,
Vol_Perf=length(perf.DoD),Vol_Def=length(def.DoD),
Bal_Perf = sum(vec.bal.DoD[perf.DoD], na.rm = T), Bal_Def = sum(vec.bal.DoD[def.DoD], na.rm = T),
Loss=sum(vec.Losses.DoD, na.rm = T))
)
cat(paste0("\n\t 2)[",it," of ",num.thresholds,"] Loss assessed! "),
file="assesslog.txt", append=TRUE)
return (dat.EL.core)
}
# ====== 3. OPTIMISATION PROCEDURE: Iterating across chosen thresholds (g1, g2, g3 delinquency measures)
ptm <- proc.time() #IGNORE: for computation time calculation
cat(paste("New Job: Assessing delinquency and profitability of given portfolio across various thresholds",sep=''),
file="assesslog.txt", append=FALSE)
# -- parallelization parameters
cl.port<-makeCluster(6) # number of threads to register in the OS for this procedure
registerDoParallel(cl.port)
# using foreach() from foreach package for distributing loop iterations across registered threads: remarkable improvement in run time
# Note: need to import all custom functions used within the loss assessment.
dat.EL.par <- foreach(it=1:it.max, .combine='rbind', .verbose=T, .inorder=F, .packages ='data.table',
.export=c('default.start.first.v2', 'coreJob')) %dopar%
{
dat.EL.core <- coreJob(vec.Maturity.Use=vec.Maturity.Use, vec.Mat.Use=vec.Mat.Use,
mat.Receipt.Use=mat.Receipt.Use, mat.Instal.Use=mat.Instal.Use, mat.IntRate.Use=mat.IntRate.Use,
sc.Thres=sc.Thres, period.term=period.term, n=n, vec.LoanAmt=vec.LoanAmt, vec.DoD.lambda=vec.DoD.lambda,
it=it, num.thresholds=num.thresholds,
d.CD=vec.k.CD[it], d.MD=vec.k.MD[it], d.DoD=vec.k.DoD[it],
mat.CD.Use=mat.CD.Use, mat.MD.Use=mat.MD.Use, mat.DoD.Use=mat.DoD.Use)
}
stopCluster(cl.port) # release threads back to the OS
# - last data preparation
setDT(dat.EL.par, key=c("Measure","Threshold"))
# - zip and save optimisation results to disk
pack.ffdf(paste0("EL",it.name),dat.EL.par)
# =========== OPTIMISATION RESULTS: Isolated optima and graphs
# -- Balance and Volume graphs across chosen threshold range, using g1-measure [CD]
# just for inspection purposes
ex.g <- "CD"
toplot <- gather(dat.EL.par[Measure==ex.g, list(Threshold, Bal_Perf, Bal_Def)], key=Type, value=Value, -Threshold)
label.vec <- c("Defaulting Balance", "Performing Balance")
ggplot(toplot, aes(x=Threshold, group=Type)) + theme_minimal() +
geom_bar(aes(x = Threshold, y=Value, fill = Type), position="fill", stat="identity") +
scale_y_continuous(breaks=pretty_breaks(), labels=percent) + theme(legend.position = "bottom") +
labs(y="Proportionate Balances (%)",x=paste0("Default Threshold (",ex.g,")")) +
scale_fill_manual(values=c("paleturquoise4","paleturquoise"),labels=label.vec, name="")
toplot <- gather(dat.EL.par[Measure==ex.g,list(Threshold, Vol_Perf, Vol_Def)], key=Type, value=Value, -Threshold)
label.vec <- c("Defaulting Volume", "Performing Volume")
ggplot(toplot, aes(x=Threshold, group=Type)) + theme_minimal() +
geom_bar(aes(x = Threshold, y=Value, fill = Type), position="fill", stat="identity") +
scale_y_continuous(breaks=pretty_breaks(), labels=percent) + theme(legend.position = "bottom") +
labs(y="Proportionate Volume (%)",x=paste0("Default Threshold (",ex.g,")")) +
scale_fill_manual(values=c("paleturquoise4","paleturquoise"),labels=label.vec, name="")
# save graphs to disk
dpi <- 110
ggsave(g.Bal, file=paste0("EL-Balances-",it.name,".png"),width=600/dpi, height=450/dpi,dpi=dpi)
ggsave(g.Vol, file=paste0("EL-Volumes-",it.name,".png"),width=600/dpi, height=450/dpi,dpi=dpi)
# -- Optimisation results across chosen threshold range, by delinquency measure
g <- ggplot(dat.EL.par, aes(x=Threshold, y=Loss)) +
geom_point(aes(x=Threshold,y=Loss, color=MeasureName, shape=MeasureName), size=1.5) +
geom_line(aes(x=Threshold, y=Loss, color=MeasureName), size = 1) +
labs(y="PV of Losses (R)",x="Thresholds (w)") + theme_minimal() +
theme(text=element_text(family="Calibri", size=12),
legend.position="bottom") +
scale_color_economist(name="Delinquency Measure",guide = guide_legend(ncol=2)) +
scale_shape_manual(values=c(1,16,8),
name="Delinquency Measure",guide = guide_legend(ncol=2)) +
scale_y_continuous(breaks= pretty_breaks(), labels=unit_format(unit="b", scale=0.000000001))
g
# -- Optimisation results across chosen threshold range, for the g1_measure [CD] only
toplot <- dat.EL.par[Measure==ex.g, list(MeasureName, Threshold, Loss)]
g.CDOnly <- ggplot(toplot, aes(x=Threshold, y=Loss)) +
geom_point(aes(x=Threshold,y=Loss, color=MeasureName, shape=MeasureName), size=1.5) +
geom_line(aes(x=Threshold, y=Loss, color=MeasureName), size = 1) +
labs(y="PV of Losses (R)",x="Thresholds (w)") + theme_minimal() +
theme(text=element_text(family="Calibri", size=12),
legend.position="bottom") +
scale_color_economist(name="Delinquency Measure",guide = guide_legend(ncol=2)) +
scale_shape_manual(values=c(1,16,8),
name="Delinquency Measure",guide = guide_legend(ncol=2)) +
scale_y_continuous(breaks= pretty_breaks(), labels=unit_format(unit="b", scale=0.000000001))
g.CDOnly
# save graphs to disk
ggsave(g, file=paste0(plot.name,".png"),width=600/100, height=450/100,dpi=100)
ggsave(g.CDOnly, file=paste0(plot.name,"-CD.png"),width=600/100, height=450/100,dpi=100)
# -- Loss-optimal thresholds found across chosen threshold range, by delinquency measure
minima <- function(given) {
dat.min <- given[Measure=="CD", list(MeasureName, Threshold, Loss)]
min.pos <- Position(function(x) x==min(dat.min$Loss), dat.min$Loss)
cat("CD: Minimum Loss of", comma(min(dat.min$Loss)),"at threshold d =", dat.min[min.pos, Threshold], "at position", min.pos, "in threshold vector")
dat.min <- given[Measure=="MD", list(MeasureName, Threshold, Loss)]
min.pos <- Position(function(x) x==min(dat.min$Loss), dat.min$Loss)
cat("\nMD: Minimum Loss of", comma(min(dat.min$Loss)),"at threshold d =", dat.min[min.pos, Threshold], "at position", min.pos, "in threshold vector")
dat.min <- given[Measure=="DoD", list(MeasureName, Threshold, Loss)]
min.pos <- Position(function(x) x==min(dat.min$Loss), dat.min$Loss)
cat("\nDoD: Minimum Loss of", comma(min(dat.min$Loss)),"at threshold d =", dat.min[min.pos, Threshold], "at position", min.pos, "in threshold vector")
}
minima(dat.EL.par)
proc.time() - ptm #IGNORE: computation time taken
| /3.2 Loss Optimisation Procedure [Parallelized].R | permissive | arnobotha/The-loss-optimisation-of-loan-recovery-decision-times-using-forecast-cash-flows | R | false | false | 21,910 | r | # =================== Experimental Optimisation Procedure across various scenarios [Parallelized]
# This interactively-run experimental script uses the output from a single forecasting technique
# (specified via a pointer to a particular receipt matrix), as parametrised in a certain way,
# along with using an accompanying delinquency matrix (computed on the now-completed portfolio),
# to assess the portfolio loss across all specified thresholds, using 3 pre-selected delinquency measures (g1, g2, g3).
# The various ways in which this script was run is recorded in the comments below.
# Direct dependencies include: 2.1b, 2.1c, 2.2a, 2.2b
# Interactive dependencies include forecast receipt matrices that are all created experimentally in:
# - Random defaults: 2.2c receipt forecasting
# - Markovian defaults: 2.3 Estimating MLEs for transition rates and subsequent receipt forecasting
# A NOTE ON vec.Consider: This vector is interactively reset in this script as an artificial proxy for having different loan portfolios
# It was previous set to exclude closed cases when forecasting delinquency.
# ====== 0a. Initializing some parameters
# -- toggle the following matrices to specific versions created from data
mat.Instal.Use <- mat.Instal.Treated
mat.IntRate.Use <- mat.IntRates.Treated
vec.Maturity.Use <- vec.Maturity # observed loan tenures
vec.Mat.Use <- vec.Term.Treated # contractual term
# -- Pointer to a specific forecast matrix, selected from a wider experimental basis
#mat.Receipt.Use <- (mat.ReceiptAlt) # untreated receipt matrix
# - Index of experiments: uncomment one
#### RANDOM DEFAULTS TECHNIQUE [defined in 2.2c ]
####### Using Exp distribution on S2 (delinquents)
mat.Receipt.Use <- mat.ReceiptAlt.Treated7j # for v2_5j(i-iii) (repayment probability estimated from full sample; truncation parameter k drawn randomly from Exponential distribution fitted on Max_CDAlt from delinquents-only sample)
#mat.Receipt.Use <- mat.ReceiptAlt.Treated7k # for v2_5k(i-iii) (repayment probability estimated from delinquents-only sample; truncation parameter k drawn randomly from Exponential distribution fitted on Max_CDAlt from delinquents-only sample)
#mat.Receipt.Use <- mat.ReceiptAlt.Treated7l # for v2_5l(i-iii) (repayment probability estimated from write-offs sample; truncation parameter k drawn randomly from Exponential distribution fitted on Max_CDAlt from delinquents-only sample)
####### Using Weibull distribution on S3 (write-offs)
#mat.Receipt.Use <- mat.ReceiptAlt.Treated7d # for v2_5d(i-vi) (repayment probability estimated from full sample; truncation parameter k drawn randomly from Weibull distribution fitted on Max_CDAlt from write-offs-only sample)
#mat.Receipt.Use <- mat.ReceiptAlt.Treated7e # for v2_5e(i-ix) (repayment probability estimated from delinquents-only sample; truncation parameter k drawn randomly from Weibull distribution fitted on Max_CDAlt from write-offs-only sample)
#mat.Receipt.Use <- mat.ReceiptAlt.Treated7f # for v2_5f(i-ix) (repayment probability estimated from write-offs sample; with truncation parameter k drawn randomly from Weibull distribution fitted on Max_CDAlt from write-offs-only sample)
#### MARKOVIAN DEFAULTS TECHNIQUE [defined in 2.3]
#mat.Receipt.Use <- mat.ReceiptAlt.Treated8a # for v2_6a(i-iii) (treated with multi-state Markovian defaults technique with parameter estimates from full sample)
#mat.Receipt.Use <- mat.ReceiptAlt.Treated8b # for v2_6b(i-iii) (treated with multi-state Markovian defaults technique with parameter estimates from delinquents-only sample)
#mat.Receipt.Use <- mat.ReceiptAlt.Treated8c # for v2_6c(i-iii) (treated with multi-state Markovian defaults technique with parameter estimates from write-offs-only sample)
# ====== 0b. Calculate Delinquency Measures up to full contractual term, using forecast receipts
# -- Calculate CD (g1: Contractual Delinquency)
mat.CD.Use <- calculate.CD.forData(mat.Instal.Use, mat.Receipt.Use, sc.Thres, period.term, n, method="base")
# -- Calculate MD/DoD (g2/g3: Macaulay Duration Index (MD) Measure | Degree of Delinquency (DoD) Measure)
calc.results <- calculate.MDoD.forData(mat.Instal.Use, mat.Receipt.Use, vec.LoanAmt, vec.Mat.Use,
n, mat.IntRate.Use, vec.DoD.lambda)
mat.MD.Use <- calc.results$MD
mat.DoD.Use <- calc.results$DoD
rm(calc.results) #an optimization, reduces memory usage
# ====== 0c. Loss assessment, risk profile selection, and iteration parameters for subsequent optimisation
# -- toggle sampling for loss optimisation (i: full sample [Lowest risk]; ii; delinquents-only [Medium risk]; iii: write-offs-only [Highest risk])
# This is an artificial proxy for having portfolios on with different risk profiles, on which we may optimise the recovery decision
vec.Consider <- rep(1,n) # -- i: switches to indicate use full sample
#vec.Consider[which(vec.Del.Ind==0)] <- 0 # -- ii: only consider delinquent (including write-offs) loans, by switching off the rest
#vec.Consider[which(vec.Woff==0)] <- 0 # -- iii: only consider those written-off loans, by switching off the rest
# - script saving options (may overwrite previously saved data if not careful)
inner.name <- "LossThresh" # experiment theme name
it.name <- "v2_5j(i)-excl_closed" #iteration name
plot.name <- paste0(inner.name, it.name) # full name
# -- Iteration Parameter
num.thresholds <-168; #number of delinquency thresholds (essentially the number of times that loss is calculated)
it.vec <- 1:num.thresholds # iteration vector
# - General parameters
it.max <- NROW(it.vec)
first.iter <- T
# -- Interest Rates & Loss Rates: Specification & Calculation
i.alt <- 0.07; #risk-free rate (effective rate)
i_p.alt <- ((1+i.alt)^(1/12) - 1)*12; #risk-free rate (nominal rate)
Arrears.LossRate <- 0.7;
Outstanding.LossRate <- 0.4;
# ====== 1. Select thresholds for each Delinquency Measure
# NOTE: Some of the ranges of chosen thresholds may need to be tweaked, especially for MD and Dod measures,
# depending on the chosen portfolio on which optimisation is performed, as well as the risk level underyling receipt forecasts.
# Failure to tweak may lead to false conclusions and/or local optima in results.
# Discretionary tweaking itself is currently performed on a trial-and-error basis of running the optimisation multiple times using different ranges
# and trying to isolate the 'neighbourhood' where loss optima seemingly occurs.
# -- CD
vec.k.CD <- seq(0, (num.thresholds-1)) # chose all integer-valued thresholds, no tweaking necessary
# -- MD
max.thres <- max(quantile(mat.MD.Use[!is.na(mat.MD.Use)], 0.985)) + 1
vec.k.MD <- seq(1, ifelse(is.na(max.thres), 5, max(max.thres, 5)),length.out = num.thresholds) # normal selection
vec.k.MD <- c(seq(1, 2.5, length.out = 50),seq(2.51, max.thres, length.out = num.thresholds-50)) # for full sample/delinquents
#vec.k.MD <- c(seq(1, 4.5, length.out = 50),seq(4.51, max.thres, length.out = num.thresholds-50)) # for write-offs
#plot(vec.k.MD)
# -- DoD
max.thres <- max(quantile(mat.DoD.Use[!is.na(mat.DoD.Use)], 0.985)) + 1
vec.k.DoD <- seq(1, ifelse(is.na(max.thres), 5, max(max.thres, 5)),length.out = num.thresholds) # normal selection
vec.k.DoD <- c(seq(1, 2.5, length.out = 50),seq(2.51, max.thres, length.out = num.thresholds-50)) # for full sample/delinquents
#vec.k.DoD <- c(seq(1, 4.5, length.out = 50),seq(4.51, max.thres, length.out = num.thresholds-50)) # for write-offs
#plot(vec.k.DoD)
# ====== 2. LOSS ASSESSMENT: Iterative function definitions (to be run in parallel)
# - main function for assessing the portfolio loss at a specified threshold (one for each of the g1, g2, g3 delinquency measures)
coreJob <- function(vec.Maturity.Use, vec.Mat.Use, mat.Receipt.Use, mat.Instal.Use, mat.IntRate.Use, sc.Thres, period.term, n, vec.LoanAmt,
vec.DoD.lambda, it, num.thresholds, d.CD, d.MD, d.DoD, mat.CD.Use, mat.MD.Use, mat.DoD.Use) {
cat(paste0("\n 1)[",it," of ",num.thresholds,"] Loss assessments .. "),
file="assesslog.txt", append=TRUE)
# ---- Total Loss across given Threshold (d.CD, d.MD, d.DoD)
# - get default start times of first episode (if multiple exist), given threshold d, otherwise return -1 to indicate a performing loan
# uses custom function default.start.first.v2()
vec.default.start_first.CD <- sapply(1:n, default.start.first.v2, thres.d=d.CD, del.mat=mat.CD.Use, t=vec.Mat.Use)
vec.default.start_first.MD <- sapply(1:n, default.start.first.v2, thres.d=d.MD, del.mat=mat.MD.Use, t=vec.Mat.Use)
vec.default.start_first.DoD <- sapply(1:n, default.start.first.v2, thres.d=d.DoD, del.mat=mat.DoD.Use, t=vec.Mat.Use)
# - get (g,d)-defaulting account indices across measure, given current thresholds
def.CD <- which(vec.default.start_first.CD >= 0 & vec.Consider==1)
def.MD <- which(vec.default.start_first.MD >= 0 & vec.Consider==1)
def.DoD <- which(vec.default.start_first.DoD >= 0 & vec.Consider==1)
# - get (g,d)-performing account indices across measure, given current thresholds
perf.CD <- which(vec.default.start_first.CD < 0 & vec.Consider==1)
perf.MD <- which(vec.default.start_first.MD < 0 & vec.Consider==1)
perf.DoD <- which(vec.default.start_first.DoD < 0 & vec.Consider==1)
# - calculate final maturity as either contractual term / maturity or default time, given (g,d)-default
# for use in discounting and other loss calculations
vec.maturity.CD <- copy(vec.Mat.Use)
vec.maturity.CD[def.CD] <- vec.default.start_first.CD[def.CD]
vec.maturity.MD <- copy(vec.Mat.Use)
vec.maturity.MD[def.MD] <- vec.default.start_first.MD[def.MD]
vec.maturity.DoD <- copy(vec.Mat.Use)
vec.maturity.DoD[def.DoD] <- vec.default.start_first.DoD[def.DoD]
# - Calculate NPV of receipts, given maturity and relevant receipts
vec.ReceiptsPV.CD <- sapply(1:n, function(i,r,t) {
if (t[i] > 0) {
val <- sum( r[1:t[i], i] * (1+i_p.alt/12)^(-1*1:(t[i]) ) )
} else {
val <- 0
}
return (val)
}, r=mat.Receipt.Use, t=vec.maturity.CD)
vec.ReceiptsPV.MD <- sapply(1:n, function(i,r,t) {
if (t[i] > 0) {
val <- sum( r[1:t[i], i] * (1+i_p.alt/12)^(-1*1:(t[i]) ) )
} else {
val <- 0
}
return (val)
}, r=mat.Receipt.Use, t=vec.maturity.MD)
vec.ReceiptsPV.DoD <- sapply(1:n, function(i,r,t) {
if (t[i] > 0) {
val <- sum( r[1:t[i], i] * (1+i_p.alt/12)^(-1*1:(t[i]) ) )
} else {
val <- 0
}
return (val)
}, r=mat.Receipt.Use, t=vec.maturity.DoD)
# - calculate NPV of arrears, given maturity, relevant instalments and relevant receipts
vec.ArrearsPV.CD <- sapply(1:n, function(i,ins,r,t) {
if (t[i] > 0) {
val <- sum( ins[1:t[i],i] * (1+i_p.alt/12)^(-1*1:(t[i]) ) ) - r[i]
} else {
val <- 0
}
return (val)
}, ins=mat.Instal.Use, r=vec.ReceiptsPV.CD, t=vec.maturity.CD)
vec.ArrearsPV.MD <- sapply(1:n, function(i,ins,r,t) {
if (t[i] > 0) {
val <- sum( ins[1:t[i],i] * (1+i_p.alt/12)^(-1*1:(t[i]) ) ) - r[i]
} else {
val <- 0
}
return (val)
}, ins=mat.Instal.Use, r=vec.ReceiptsPV.MD, t=vec.maturity.MD)
vec.ArrearsPV.DoD <- sapply(1:n, function(i,ins,r,t) {
if (t[i] > 0) {
val <- sum( ins[1:t[i],i] * (1+i_p.alt/12)^(-1*1:(t[i]) ) ) - r[i]
} else {
val <- 0
}
return (val)
}, ins=mat.Instal.Use, r=vec.ReceiptsPV.DoD, t=vec.maturity.DoD)
# - calculate expected balance, given tenure at (g,d)-default, resulting remaining tenure, instalments, and interest rates
vec.ExpBalance.CD <- sapply(1:n, function(i,ins,intr,t,tt) {
if (t[i] < tt[i]) {
val <- sum( ins[((t[i]+1):tt[i]),i] * (1+intr[((t[i]+1):tt[i]),i]/12)^(-1*1:(tt[i] - t[i]) ) ) ;
} else {
val <- 0
}
# discount to origination
val <- val * (1+i_p.alt/12)^(-1*t[i] )
return (val)
}, ins=mat.Instal.Use, intr=mat.IntRate.Use, t=vec.maturity.CD, tt=vec.Mat.Use)
vec.ExpBalance.MD <- sapply(1:n, function(i,ins,intr,t,tt) {
if (t[i] < tt[i]) {
val <- sum( ins[((t[i]+1):tt[i]),i] * (1+intr[((t[i]+1):tt[i]),i]/12)^(-1*1:(tt[i] - t[i]) ) ) ;
} else {
val <- 0
}
# discount to origination
val <- val * (1+i_p.alt/12)^(-1*t[i] )
return (val)
}, ins=mat.Instal.Use, intr=mat.IntRate.Use, t=vec.maturity.MD, tt=vec.Mat.Use)
vec.ExpBalance.DoD <- sapply(1:n, function(i,ins,intr,t,tt) {
if (t[i] < tt[i]) {
val <- sum( ins[((t[i]+1):tt[i]),i] * (1+intr[((t[i]+1):tt[i]),i]/12)^(-1*1:(tt[i] - t[i]) ) ) ;
} else {
val <- 0
}
# discount to origination
val <- val * (1+i_p.alt/12)^(-1*t[i] )
return (val)
}, ins=mat.Instal.Use, intr=mat.IntRate.Use, t=vec.maturity.DoD, tt=vec.Mat.Use)
# - calculate losses as weighted combination between arrears and expected balance, and associated loss rates
vec.Losses.CD <- pmax(vec.ArrearsPV.CD*Arrears.LossRate + vec.ExpBalance.CD*Outstanding.LossRate, 0)
vec.Losses.MD <- pmax(vec.ArrearsPV.MD*Arrears.LossRate + vec.ExpBalance.MD*Outstanding.LossRate, 0)
vec.Losses.DoD <- pmax(vec.ArrearsPV.DoD*Arrears.LossRate + vec.ExpBalance.DoD*Outstanding.LossRate, 0)
# - calculate actual balance [ancillary information]
vec.bal.CD <- pmax(vec.ArrearsPV.CD + vec.ExpBalance.CD, 0)
vec.bal.MD <- pmax(vec.ArrearsPV.MD + vec.ExpBalance.MD, 0)
vec.bal.DoD <- pmax(vec.ArrearsPV.DoD + vec.ExpBalance.DoD, 0)
# --- curate some vectors for reporting/graphing purposes - remove accounts not to be considered
{
vec.bal.CD[which(vec.Consider==0)] <- NA
vec.bal.MD[which(vec.Consider==0)] <- NA
vec.bal.DoD[which(vec.Consider==0)] <- NA
# - zero the loss if a particular account is not to be considered [sampling]
vec.Losses.CD[which(vec.Consider==0)] <- 0
vec.Losses.MD[which(vec.Consider==0)] <- 0
vec.Losses.DoD[which(vec.Consider==0)] <- 0
}
# ---------- Concatenate relevant information, including profit/loss calculations for optimisation
dat.EL.core <- rbind(data.table(Measure="CD", MeasureName ="g1: CD", Threshold=d.CD,
Vol_Perf=length(perf.CD),Vol_Def=length(def.CD),
Bal_Perf = sum(vec.bal.CD[perf.CD], na.rm = T), Bal_Def = sum(vec.bal.CD[def.CD], na.rm = T),
Loss=sum(vec.Losses.CD, na.rm = T)),
data.table(Measure="MD", MeasureName ="g2: MD", Threshold=d.MD,
Vol_Perf=length(perf.MD),Vol_Def=length(def.MD),
Bal_Perf = sum(vec.bal.MD[perf.MD], na.rm = T), Bal_Def = sum(vec.bal.MD[def.MD], na.rm = T),
Loss=sum(vec.Losses.MD, na.rm = T)),
data.table(Measure="DoD", MeasureName ="g3: DoD", Threshold=d.DoD,
Vol_Perf=length(perf.DoD),Vol_Def=length(def.DoD),
Bal_Perf = sum(vec.bal.DoD[perf.DoD], na.rm = T), Bal_Def = sum(vec.bal.DoD[def.DoD], na.rm = T),
Loss=sum(vec.Losses.DoD, na.rm = T))
)
cat(paste0("\n\t 2)[",it," of ",num.thresholds,"] Loss assessed! "),
file="assesslog.txt", append=TRUE)
return (dat.EL.core)
}
# ====== 3. OPTIMISATION PROCEDURE: Iterating across chosen thresholds (g1, g2, g3 delinquency measures)
ptm <- proc.time() #IGNORE: for computation time calculation
cat(paste("New Job: Assessing delinquency and profitability of given portfolio across various thresholds",sep=''),
file="assesslog.txt", append=FALSE)
# -- parallelization parameters
cl.port<-makeCluster(6) # number of threads to register in the OS for this procedure
registerDoParallel(cl.port)
# using foreach() from foreach package for distributing loop iterations across registered threads: remarkable improvement in run time
# Note: need to import all custom functions used within the loss assessment.
dat.EL.par <- foreach(it=1:it.max, .combine='rbind', .verbose=T, .inorder=F, .packages ='data.table',
.export=c('default.start.first.v2', 'coreJob')) %dopar%
{
dat.EL.core <- coreJob(vec.Maturity.Use=vec.Maturity.Use, vec.Mat.Use=vec.Mat.Use,
mat.Receipt.Use=mat.Receipt.Use, mat.Instal.Use=mat.Instal.Use, mat.IntRate.Use=mat.IntRate.Use,
sc.Thres=sc.Thres, period.term=period.term, n=n, vec.LoanAmt=vec.LoanAmt, vec.DoD.lambda=vec.DoD.lambda,
it=it, num.thresholds=num.thresholds,
d.CD=vec.k.CD[it], d.MD=vec.k.MD[it], d.DoD=vec.k.DoD[it],
mat.CD.Use=mat.CD.Use, mat.MD.Use=mat.MD.Use, mat.DoD.Use=mat.DoD.Use)
}
stopCluster(cl.port) # release threads back to the OS
# - last data preparation
setDT(dat.EL.par, key=c("Measure","Threshold"))
# - zip and save optimisation results to disk
pack.ffdf(paste0("EL",it.name),dat.EL.par)
# =========== OPTIMISATION RESULTS: Isolated optima and graphs
# -- Balance and Volume graphs across chosen threshold range, using g1-measure [CD]
# just for inspection purposes
ex.g <- "CD"
toplot <- gather(dat.EL.par[Measure==ex.g, list(Threshold, Bal_Perf, Bal_Def)], key=Type, value=Value, -Threshold)
label.vec <- c("Defaulting Balance", "Performing Balance")
ggplot(toplot, aes(x=Threshold, group=Type)) + theme_minimal() +
geom_bar(aes(x = Threshold, y=Value, fill = Type), position="fill", stat="identity") +
scale_y_continuous(breaks=pretty_breaks(), labels=percent) + theme(legend.position = "bottom") +
labs(y="Proportionate Balances (%)",x=paste0("Default Threshold (",ex.g,")")) +
scale_fill_manual(values=c("paleturquoise4","paleturquoise"),labels=label.vec, name="")
toplot <- gather(dat.EL.par[Measure==ex.g,list(Threshold, Vol_Perf, Vol_Def)], key=Type, value=Value, -Threshold)
label.vec <- c("Defaulting Volume", "Performing Volume")
ggplot(toplot, aes(x=Threshold, group=Type)) + theme_minimal() +
geom_bar(aes(x = Threshold, y=Value, fill = Type), position="fill", stat="identity") +
scale_y_continuous(breaks=pretty_breaks(), labels=percent) + theme(legend.position = "bottom") +
labs(y="Proportionate Volume (%)",x=paste0("Default Threshold (",ex.g,")")) +
scale_fill_manual(values=c("paleturquoise4","paleturquoise"),labels=label.vec, name="")
# save graphs to disk
dpi <- 110
ggsave(g.Bal, file=paste0("EL-Balances-",it.name,".png"),width=600/dpi, height=450/dpi,dpi=dpi)
ggsave(g.Vol, file=paste0("EL-Volumes-",it.name,".png"),width=600/dpi, height=450/dpi,dpi=dpi)
# -- Optimisation results across chosen threshold range, by delinquency measure
g <- ggplot(dat.EL.par, aes(x=Threshold, y=Loss)) +
geom_point(aes(x=Threshold,y=Loss, color=MeasureName, shape=MeasureName), size=1.5) +
geom_line(aes(x=Threshold, y=Loss, color=MeasureName), size = 1) +
labs(y="PV of Losses (R)",x="Thresholds (w)") + theme_minimal() +
theme(text=element_text(family="Calibri", size=12),
legend.position="bottom") +
scale_color_economist(name="Delinquency Measure",guide = guide_legend(ncol=2)) +
scale_shape_manual(values=c(1,16,8),
name="Delinquency Measure",guide = guide_legend(ncol=2)) +
scale_y_continuous(breaks= pretty_breaks(), labels=unit_format(unit="b", scale=0.000000001))
g
# -- Optimisation results across chosen threshold range, for the g1_measure [CD] only
toplot <- dat.EL.par[Measure==ex.g, list(MeasureName, Threshold, Loss)]
g.CDOnly <- ggplot(toplot, aes(x=Threshold, y=Loss)) +
geom_point(aes(x=Threshold,y=Loss, color=MeasureName, shape=MeasureName), size=1.5) +
geom_line(aes(x=Threshold, y=Loss, color=MeasureName), size = 1) +
labs(y="PV of Losses (R)",x="Thresholds (w)") + theme_minimal() +
theme(text=element_text(family="Calibri", size=12),
legend.position="bottom") +
scale_color_economist(name="Delinquency Measure",guide = guide_legend(ncol=2)) +
scale_shape_manual(values=c(1,16,8),
name="Delinquency Measure",guide = guide_legend(ncol=2)) +
scale_y_continuous(breaks= pretty_breaks(), labels=unit_format(unit="b", scale=0.000000001))
g.CDOnly
# save graphs to disk
ggsave(g, file=paste0(plot.name,".png"),width=600/100, height=450/100,dpi=100)
ggsave(g.CDOnly, file=paste0(plot.name,"-CD.png"),width=600/100, height=450/100,dpi=100)
# -- Loss-optimal thresholds found across chosen threshold range, by delinquency measure
minima <- function(given) {
dat.min <- given[Measure=="CD", list(MeasureName, Threshold, Loss)]
min.pos <- Position(function(x) x==min(dat.min$Loss), dat.min$Loss)
cat("CD: Minimum Loss of", comma(min(dat.min$Loss)),"at threshold d =", dat.min[min.pos, Threshold], "at position", min.pos, "in threshold vector")
dat.min <- given[Measure=="MD", list(MeasureName, Threshold, Loss)]
min.pos <- Position(function(x) x==min(dat.min$Loss), dat.min$Loss)
cat("\nMD: Minimum Loss of", comma(min(dat.min$Loss)),"at threshold d =", dat.min[min.pos, Threshold], "at position", min.pos, "in threshold vector")
dat.min <- given[Measure=="DoD", list(MeasureName, Threshold, Loss)]
min.pos <- Position(function(x) x==min(dat.min$Loss), dat.min$Loss)
cat("\nDoD: Minimum Loss of", comma(min(dat.min$Loss)),"at threshold d =", dat.min[min.pos, Threshold], "at position", min.pos, "in threshold vector")
}
minima(dat.EL.par)
proc.time() - ptm #IGNORE: computation time taken
|
closeAllConnections()
rm(list=ls())
dir = getwd()
out = "./elastic_net_weights.csv"
library(glmnet)
library(grpreg)
data(Birthwt)
X <- Birthwt$X
Y <- Birthwt$bwt
# groups <- c(1,1,1,2,2,2,3,3,4,5,5,6,7,8,8,8)
glm_object <- cv.glmnet(x=X, y=Y, nfolds=4, type.measure = "mse", grouped=TRUE, type.multinomial = "grouped")
weights <- coef(glm_object, s=c(0.001, 0.01, 0.05, 0.1, 0.2, 0.5, 1))
colnames(weights, do.NULL = FALSE)
colnames(weights) <- c(0.001, 0.01, 0.05, 0.1, 0.2, 0.5, 1)
write.csv(as.array(weights), file = out)
weights | /Experimental section/Models/Elastic Net/elastic-net.R | no_license | adityagc/Graduate-Research | R | false | false | 532 | r | closeAllConnections()
rm(list=ls())
dir = getwd()
out = "./elastic_net_weights.csv"
library(glmnet)
library(grpreg)
data(Birthwt)
X <- Birthwt$X
Y <- Birthwt$bwt
# groups <- c(1,1,1,2,2,2,3,3,4,5,5,6,7,8,8,8)
glm_object <- cv.glmnet(x=X, y=Y, nfolds=4, type.measure = "mse", grouped=TRUE, type.multinomial = "grouped")
weights <- coef(glm_object, s=c(0.001, 0.01, 0.05, 0.1, 0.2, 0.5, 1))
colnames(weights, do.NULL = FALSE)
colnames(weights) <- c(0.001, 0.01, 0.05, 0.1, 0.2, 0.5, 1)
write.csv(as.array(weights), file = out)
weights |
#' Convert timestamps to frame numbers
#'
#' Provide the frame rate to use to create break points from millisecond time data
#'
#' @param x Vector of timestamps
#' @param fps Frames per second of the video source.
#' Defaults to 30 Frames Per Second. The smaller the value, the more likely
#' two events will be chunked in the same frame.
#' @param tstart Start timestamp. Anything below start will not be converted.
#' @param tend End timestamp. Anything above will be NA. Defaults to max of x if not set.
#' @param chunked If set to TRUE, will return a time back to you instead of frame number,
#' but the chunked/cut value corresponding to that frame.
#' @param warn Turn on/off warnings for NAs
#' @param tstart time to be used as the initial level in a factor. Assumes 0 time.
#'
#' @examples
#' # sequence of milliseconds
#' x <- seq(1, 1009, 12)
#'
#' # 30 fps video
#' ts2frame(x, fps=30)
#'
#' # first frames are NA until start frame is encountered
#' ts2frame(x, fps=29.97, tstart=333)
#'
#' # compare chunked time to actual time
#' cbind(sprintf("%.2f", ts2frame(x, tstart=100, tend=1000, fps=30, chunked=TRUE)), x)
#' @export
ts2frame <- function(x,
fps=30,
tstart=0,
tend,
chunked=FALSE,
warn=TRUE)
{
foa <- 1000 / fps
if (missing(tend)) tend <- max(x)
tinterval <- seq(tstart, tend + foa - ((tend-tstart) %% foa), foa)
f <- findInterval(x, tinterval, rightmost.closed=FALSE, all.inside=FALSE)
f[x < tstart | x > tend] <- NA
if (any(is.na(f)) && warn) warning(simpleWarning("Found NAs for some frames"))
if (chunked) {
return(tinterval[f])
} else {
return(f)
}
}
#' Age calculater (months)
#'
#' Calculates ages in months with decimal days from date of birth until up to some point
#'
#' If you're going to use a reference date, make sure the format for both dob
#' and ref are the same. For example, don't use ymd for dob and mdy for ref.
#' You'll get wrong values.
#'
#' @param dob Date of birth string that the function \code{\link{ymd}} and
#' others like it can understand. Typically in the format "yyyy-mm-dd" or "yyyy/mm/dd"
#' @param ref Reference date string. Either today's date or some other time after
#' \code{dob} Defaults to today.
#' @param lub.fmt Lubridate function for the input dates, such as
#' \code{\link{ymd}} or any function that returns a \code{POSIXct} format.
#' Defaults to \code{\link{mdy}}
#' @return Numeric value of age in months
#' @export
#' @examples
#' age_calc("01-10-2013")
#' age_calc(c("05-13-1983", "01-10-2013"), c("05-13-2000", "10-07-2014"))
#' age_calc("2013/01/10", lub.fmt=lubridate::ymd)
age_calc <- function(dob, ref, lub.fmt=lubridate::mdy) {
# avg_days_month <- 30.436875
today <- lubridate::ymd(Sys.Date())
if (missing(ref)) {
end <- today
} else {
end <- lub.fmt(ref)
}
start <- lub.fmt(dob)
period <- lubridate::as.period(
lubridate::interval(start, end),
unit = "months")
as.numeric(period$month + (period$day / lubridate::days_in_month(today)))
}
| /R/unsorted.R | no_license | danniekim/mejr | R | false | false | 3,277 | r |
#' Convert timestamps to frame numbers
#'
#' Provide the frame rate to use to create break points from millisecond time data
#'
#' @param x Vector of timestamps
#' @param fps Frames per second of the video source.
#' Defaults to 30 Frames Per Second. The smaller the value, the more likely
#' two events will be chunked in the same frame.
#' @param tstart Start timestamp. Anything below start will not be converted.
#' @param tend End timestamp. Anything above will be NA. Defaults to max of x if not set.
#' @param chunked If set to TRUE, will return a time back to you instead of frame number,
#' but the chunked/cut value corresponding to that frame.
#' @param warn Turn on/off warnings for NAs
#' @param tstart time to be used as the initial level in a factor. Assumes 0 time.
#'
#' @examples
#' # sequence of milliseconds
#' x <- seq(1, 1009, 12)
#'
#' # 30 fps video
#' ts2frame(x, fps=30)
#'
#' # first frames are NA until start frame is encountered
#' ts2frame(x, fps=29.97, tstart=333)
#'
#' # compare chunked time to actual time
#' cbind(sprintf("%.2f", ts2frame(x, tstart=100, tend=1000, fps=30, chunked=TRUE)), x)
#' @export
ts2frame <- function(x,
fps=30,
tstart=0,
tend,
chunked=FALSE,
warn=TRUE)
{
foa <- 1000 / fps
if (missing(tend)) tend <- max(x)
tinterval <- seq(tstart, tend + foa - ((tend-tstart) %% foa), foa)
f <- findInterval(x, tinterval, rightmost.closed=FALSE, all.inside=FALSE)
f[x < tstart | x > tend] <- NA
if (any(is.na(f)) && warn) warning(simpleWarning("Found NAs for some frames"))
if (chunked) {
return(tinterval[f])
} else {
return(f)
}
}
#' Age calculater (months)
#'
#' Calculates ages in months with decimal days from date of birth until up to some point
#'
#' If you're going to use a reference date, make sure the format for both dob
#' and ref are the same. For example, don't use ymd for dob and mdy for ref.
#' You'll get wrong values.
#'
#' @param dob Date of birth string that the function \code{\link{ymd}} and
#' others like it can understand. Typically in the format "yyyy-mm-dd" or "yyyy/mm/dd"
#' @param ref Reference date string. Either today's date or some other time after
#' \code{dob} Defaults to today.
#' @param lub.fmt Lubridate function for the input dates, such as
#' \code{\link{ymd}} or any function that returns a \code{POSIXct} format.
#' Defaults to \code{\link{mdy}}
#' @return Numeric value of age in months
#' @export
#' @examples
#' age_calc("01-10-2013")
#' age_calc(c("05-13-1983", "01-10-2013"), c("05-13-2000", "10-07-2014"))
#' age_calc("2013/01/10", lub.fmt=lubridate::ymd)
age_calc <- function(dob, ref, lub.fmt=lubridate::mdy) {
# avg_days_month <- 30.436875
today <- lubridate::ymd(Sys.Date())
if (missing(ref)) {
end <- today
} else {
end <- lub.fmt(ref)
}
start <- lub.fmt(dob)
period <- lubridate::as.period(
lubridate::interval(start, end),
unit = "months")
as.numeric(period$month + (period$day / lubridate::days_in_month(today)))
}
|
testlist <- list(cost = structure(c(1.44888560957826e+135, 1.6249392498385e+65, 5.27956628994611e-134, 1.56839475268612e-251, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 5L)), flow = structure(c(3.80768289350145e+125, 8.58414828913381e+155, 3.37787969964034e+43, 2.83184518248624e-19, 7.49487861616974e+223, 8.52929466674086e+86, 2.51852491380534e-303, 3.12954510408264e-253, 2.45741775099414e-215, 6.59159492364721e+70, 2.33952815237705e+77, 3.1674929214459e+282, 1.0709591854537e+63, 7.43876613929257e+191, 8.31920980250172e+78, 1.26747339146319e+161, 5.68076251052666e-141, 9.98610641272026e+182, 232631829426.491, 3.75587249552337e-34, 8.67688084914444e+71, 2.85936996201565e+135, 5.49642980516022e+268, 854537881567133, 1.33507119962914e+95, 2.76994725819545e+63, 4.08029273738449e+275, 4.93486427894025e+289, 1.24604061502336e+294, 3.2125809174767e-185, 9.58716852715016e+39, 6.94657888227078e+275, 3.46330348083089e+199, 3.28318446108869e-286, 6.12239214969922e-296 ), .Dim = c(5L, 7L)))
result <- do.call(epiphy:::costTotCPP,testlist)
str(result) | /epiphy/inst/testfiles/costTotCPP/AFL_costTotCPP/costTotCPP_valgrind_files/1615926978-test.R | no_license | akhikolla/updatedatatype-list2 | R | false | false | 1,101 | r | testlist <- list(cost = structure(c(1.44888560957826e+135, 1.6249392498385e+65, 5.27956628994611e-134, 1.56839475268612e-251, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 5L)), flow = structure(c(3.80768289350145e+125, 8.58414828913381e+155, 3.37787969964034e+43, 2.83184518248624e-19, 7.49487861616974e+223, 8.52929466674086e+86, 2.51852491380534e-303, 3.12954510408264e-253, 2.45741775099414e-215, 6.59159492364721e+70, 2.33952815237705e+77, 3.1674929214459e+282, 1.0709591854537e+63, 7.43876613929257e+191, 8.31920980250172e+78, 1.26747339146319e+161, 5.68076251052666e-141, 9.98610641272026e+182, 232631829426.491, 3.75587249552337e-34, 8.67688084914444e+71, 2.85936996201565e+135, 5.49642980516022e+268, 854537881567133, 1.33507119962914e+95, 2.76994725819545e+63, 4.08029273738449e+275, 4.93486427894025e+289, 1.24604061502336e+294, 3.2125809174767e-185, 9.58716852715016e+39, 6.94657888227078e+275, 3.46330348083089e+199, 3.28318446108869e-286, 6.12239214969922e-296 ), .Dim = c(5L, 7L)))
result <- do.call(epiphy:::costTotCPP,testlist)
str(result) |
#parameters and settings;
#seed
ran.seed <- 1
#Number of new infections per day within the whole ward 58 & 59 is Poisson distributed;
#lambda <- 1 #lambda for the peak season, now assume no seasonality;
n.sim <- 1000
n.pop <- 160000
n.household <-30000 #ask the student for details;
n.person.per.house <- 16/3
n.house.per.latrine <- 10
n.latrine <- n.pop/n.house.per.latrine/n.person.per.house
#parameters of simulation sewage network and latrine points
num.lines <- 1000 #The sewage lines will be set to 1000, 2500, 5000.
num.points <- n.latrine
total.vol <- 2e+10 #mL from Peter and Andrew's rough calculation;
LLOD.test <- 2 #number 10^3 of DNA per 500 mL;
LLOD.uf.test <- 0.025 #number 10^3 of DNA per 40L ultrafiltration samples;
# #low sensitivity;
# LLOD.test.low <- 2
# LLOD.uf.test.low <- 0.025
# high sensitivity;
# LLOD.test.high <- 0.02
# LLOD.uf.test.high <- 0.00025
dir.store <- paste0("./",version,".SimIterative",Sys.Date(),".ssn")
#parameters of transmission;
mu.shed <- 10^8
sigma.shed <- 1
n.days.shed <- 14
#decay and lost parameters;
#low decay rate;
gamma.shape <- 1
gamma.rate <- 0.25
#high decay rate;
# gamma.shape <- 1
# gamma.rate <- 2
#pooling samples;
# n.row <- 4
# n.col <- 5
n.row <- 3
n.col <- 3
n.sample <- 9
n.sample.pooling <- 5
percent.x <- 0.02
percent.y <- 0.02
n.minimum.latrine <- 10
#adaptive sampling settings;
n.days.per.sample <- 7
n.week.per.update <- 12
n.drop <- 1
n.drop.add <- 2
n.update <- 20
n.site <- n.latrine
n.days <- n.update*n.week.per.update*n.days.per.sample + 100
n.days <- 2000
#outbreak
#outbreak geographic size;
outbreak.range <- 0.01
#outbreak area number;
n.outbreak.area <- 5
#outbreak amplification factor;
outbreak.factor <- 10
#outbreak zone changes every 10000 days;
outbreak.days <- 10000
| /v7/v7.param.R | no_license | YWAN446/Adaptive-Sampling-Site-Allocation | R | false | false | 1,771 | r | #parameters and settings;
#seed
ran.seed <- 1
#Number of new infections per day within the whole ward 58 & 59 is Poisson distributed;
#lambda <- 1 #lambda for the peak season, now assume no seasonality;
n.sim <- 1000
n.pop <- 160000
n.household <-30000 #ask the student for details;
n.person.per.house <- 16/3
n.house.per.latrine <- 10
n.latrine <- n.pop/n.house.per.latrine/n.person.per.house
#parameters of simulation sewage network and latrine points
num.lines <- 1000 #The sewage lines will be set to 1000, 2500, 5000.
num.points <- n.latrine
total.vol <- 2e+10 #mL from Peter and Andrew's rough calculation;
LLOD.test <- 2 #number 10^3 of DNA per 500 mL;
LLOD.uf.test <- 0.025 #number 10^3 of DNA per 40L ultrafiltration samples;
# #low sensitivity;
# LLOD.test.low <- 2
# LLOD.uf.test.low <- 0.025
# high sensitivity;
# LLOD.test.high <- 0.02
# LLOD.uf.test.high <- 0.00025
dir.store <- paste0("./",version,".SimIterative",Sys.Date(),".ssn")
#parameters of transmission;
mu.shed <- 10^8
sigma.shed <- 1
n.days.shed <- 14
#decay and lost parameters;
#low decay rate;
gamma.shape <- 1
gamma.rate <- 0.25
#high decay rate;
# gamma.shape <- 1
# gamma.rate <- 2
#pooling samples;
# n.row <- 4
# n.col <- 5
n.row <- 3
n.col <- 3
n.sample <- 9
n.sample.pooling <- 5
percent.x <- 0.02
percent.y <- 0.02
n.minimum.latrine <- 10
#adaptive sampling settings;
n.days.per.sample <- 7
n.week.per.update <- 12
n.drop <- 1
n.drop.add <- 2
n.update <- 20
n.site <- n.latrine
n.days <- n.update*n.week.per.update*n.days.per.sample + 100
n.days <- 2000
#outbreak
#outbreak geographic size;
outbreak.range <- 0.01
#outbreak area number;
n.outbreak.area <- 5
#outbreak amplification factor;
outbreak.factor <- 10
#outbreak zone changes every 10000 days;
outbreak.days <- 10000
|
library(ggplot2)
dat = readLines("newrep.txt")
# ltna load time and number analysis
ltna = function(x, dropstr="built|methods.found") {
zl = which(nchar(x)==0)
bad = c(zl, grep(dropstr, x))
if (length(bad)>0) x = x[-bad]
pre = grep("elapsed", x)
nms = grep("----", x, value=TRUE)
tms = x[pre+1]
sapply(tms, function(x) scan(text=x, quiet=TRUE)[3]) -> allt
libpos = grep("time.library", x)
natt = x[libpos+1]
numatt = sapply(strsplit(natt, "/"), "[", 1)
length(numatt)
numlo = sapply(strsplit(natt, "/"), "[", 2)
numloa = sapply(strsplit(numlo, " "), "[", 1)
newdf = data.frame(pkg=nms, time=allt, nload=as.numeric(numloa), nattach=as.numeric(numatt))
newdf$pkg = gsub(" ----", "", newdf$pkg)
ggplot(newdf, aes(x=nload, y=time, text=pkg)) + geom_text(aes(label=pkg))
}
ltna(dat) + ylim(4,13)
| /procTimes.R | no_license | vjcitn/stackweight | R | false | false | 791 | r | library(ggplot2)
dat = readLines("newrep.txt")
# ltna load time and number analysis
ltna = function(x, dropstr="built|methods.found") {
zl = which(nchar(x)==0)
bad = c(zl, grep(dropstr, x))
if (length(bad)>0) x = x[-bad]
pre = grep("elapsed", x)
nms = grep("----", x, value=TRUE)
tms = x[pre+1]
sapply(tms, function(x) scan(text=x, quiet=TRUE)[3]) -> allt
libpos = grep("time.library", x)
natt = x[libpos+1]
numatt = sapply(strsplit(natt, "/"), "[", 1)
length(numatt)
numlo = sapply(strsplit(natt, "/"), "[", 2)
numloa = sapply(strsplit(numlo, " "), "[", 1)
newdf = data.frame(pkg=nms, time=allt, nload=as.numeric(numloa), nattach=as.numeric(numatt))
newdf$pkg = gsub(" ----", "", newdf$pkg)
ggplot(newdf, aes(x=nload, y=time, text=pkg)) + geom_text(aes(label=pkg))
}
ltna(dat) + ylim(4,13)
|
# Schedule_Task_Run_R_Script.r
argv <- commandArgs(TRUE)
x <- as.numeric(argv[1])
y <- as.numeric(argv[2])
cat("x =", x, "\n")
cat("y =", y, "\n")
cat("x + y = ", x + y, "\n")
cat("x - y = ", x - y, "\n")
cat("x * y = ", x * y, "\n")
cat("x / y = ", x / y, "\n") | /R_Learning/Schedule_Task_Run_R_Script.r | no_license | xiangxing98/xiangxing98.github.io | R | false | false | 262 | r | # Schedule_Task_Run_R_Script.r
argv <- commandArgs(TRUE)
x <- as.numeric(argv[1])
y <- as.numeric(argv[2])
cat("x =", x, "\n")
cat("y =", y, "\n")
cat("x + y = ", x + y, "\n")
cat("x - y = ", x - y, "\n")
cat("x * y = ", x * y, "\n")
cat("x / y = ", x / y, "\n") |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/spatial_tools.R
\name{spatial.clusters}
\alias{spatial.clusters}
\title{Visualize spatial distribution of clusters.}
\usage{
spatial.clusters(object, clusters = NULL, HE.list = NULL,
arrange = T, ncols = NULL, xlim = c(1, 33), ylim = c(1, 35),
cols = NULL, ...)
}
\arguments{
\item{object}{Object of class spaceST.}
\item{clusters}{Integer/numeric vector specifying clusters.}
\item{HE.list}{List of paths to HE images in jpeg format that should be used as a background for the
spatial heatmap.}
\item{arrange}{Arrange plots.}
\item{ncols}{Number of columns in arranged plot table.}
\item{xlim, ylim}{Set limits of x/y axes. [default: xlim = c(1, 34), ylim = c(1, 36)]}
\item{cols}{Set custom cluster colors by specifying a character vector with color codes. You can specify
specific colors for each cluster by naming the character vector with the cluster ids.}
\item{...}{additional parameters passed to geom_point(), defining the attributes of the feature coordinate points.}
}
\value{
Plot of clustered featured overlayed on black and white HE image.
}
\description{
Visualize spatial distribution of clusters.
}
| /man/spatial.clusters.Rd | no_license | ludvigla/spaceST | R | false | true | 1,205 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/spatial_tools.R
\name{spatial.clusters}
\alias{spatial.clusters}
\title{Visualize spatial distribution of clusters.}
\usage{
spatial.clusters(object, clusters = NULL, HE.list = NULL,
arrange = T, ncols = NULL, xlim = c(1, 33), ylim = c(1, 35),
cols = NULL, ...)
}
\arguments{
\item{object}{Object of class spaceST.}
\item{clusters}{Integer/numeric vector specifying clusters.}
\item{HE.list}{List of paths to HE images in jpeg format that should be used as a background for the
spatial heatmap.}
\item{arrange}{Arrange plots.}
\item{ncols}{Number of columns in arranged plot table.}
\item{xlim, ylim}{Set limits of x/y axes. [default: xlim = c(1, 34), ylim = c(1, 36)]}
\item{cols}{Set custom cluster colors by specifying a character vector with color codes. You can specify
specific colors for each cluster by naming the character vector with the cluster ids.}
\item{...}{additional parameters passed to geom_point(), defining the attributes of the feature coordinate points.}
}
\value{
Plot of clustered featured overlayed on black and white HE image.
}
\description{
Visualize spatial distribution of clusters.
}
|
# Copyright 2014-2017 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Bayesian dynamic diffusion-regression state-space model for computing
# counterfactual predictions in a time series. Uses an MCMC algorithm
# implemented in the \code{bsts} package to compute samples from the posterior
# (smoothing) densities over states and parameters.
#
# Author: kbrodersen@google.com (Kay Brodersen)
# Some model priors are fixed, others can be adjusted through model.args.
# For full flexibility, construct your own bsts model and feed it into
# CausalImpactForBsts().
kLocalLevelPriorSampleSize <- 32
kStaticRegressionExpectedModelSize <- 3
kStaticRegressionExpectedR2 <- 0.8
kStaticRegressionPriorDf <- 50
kDynamicRegressionPriorSampleSize <- 32
ObservationsAreIllConditioned <- function(y) {
# Checks whether the response variable (i.e., the series of observations for
# the dependent variable y) are ill-conditioned. For example, the series might
# contain too few non-NA values. In such cases, inference will be aborted.
#
# Args:
# y: observed series (numeric vector or single zoo series)
#
# Returns:
# TRUE if something is wrong with the observations; FALSE otherwise.
assert_that(!is.null(y), length(y) >= 1)
ill.conditioned <- FALSE
# All NA?
if (all(is.na(y))) {
warning("Aborting inference due to input series being all NA.")
ill.conditioned <- TRUE
# Fewer than 3 non-NA values?
} else if (length(y[!is.na(y)]) < 3) {
warning("Aborting inference due to fewer than 3 non-NA values in input")
ill.conditioned <- TRUE
# Constant series?
} else if (sd(y, na.rm = TRUE) == 0) {
warning(paste0("Aborting inference due to input series being constant: ",
y[!is.na(y)][1]))
ill.conditioned <- TRUE
}
return(ill.conditioned)
}
FormatInputForConstructModel <- function(data, model.args) {
# Checks the input arguments supplied to ConstructModel(). Missing arguments
# in \code{model.args} will be filled using \code{.defaults} (see top of file
# impact_analysis.R).
#
# Args:
# data: time series of response variable and covariates
# model.args: list of additional arguments
#
# Returns:
# list of checked and correctly formatted arguments
# Check <data>
assert_that(!is.null(data))
data <- as.zoo(data)
if (is.null(ncol(data))) {
dim(data) <- c(length(data), 1)
}
assert_that(is.numeric(data))
assert_that(nrow(data) > 0)
# If <data> has no names, assign: y, x1, x2, ...
if (is.null(names(data))) {
if (ncol(data) == 1) {
names(data)[1] <- "y"
} else {
names(data) <- c("y", paste0("x", 2:ncol(data) - 1))
}
}
# Check covariates
if (ncol(data) >= 2) {
assert(all(!is.na(data[, -1])), "covariates must not be NA")
}
# (Re-)parse <model.args>, fill gaps using <.defaults>
# (defined in impact_analysis.R)
model.args <- ParseArguments(model.args, .defaults)
# Check those parts of <model.args> that are used in this file
# Check <niter>
assert_that(is.scalar(model.args$niter))
assert_that(is.numeric(model.args$niter))
assert_that(!is.na(model.args$niter))
assert_that(is.wholenumber(model.args$niter))
model.args$niter <- round(model.args$niter)
assert(model.args$niter >= 10,
"must draw, at the very least, 10 MCMC samples; recommending 1000")
if (model.args$niter < 1000) {
warning("Results potentially inaccurate. Consider using more MCMC samples.")
}
# Check <prior.level.sd>
assert_that(is.scalar(model.args$prior.level.sd))
assert_that(is.numeric(model.args$prior.level.sd))
assert_that(!is.na(model.args$prior.level.sd))
assert_that(model.args$prior.level.sd > 0)
# Check <nseasons>
assert_that(is.scalar(model.args$nseasons))
assert_that(is.numeric(model.args$nseasons))
assert_that(!is.na(model.args$nseasons))
assert_that(is.wholenumber(model.args$nseasons))
assert(model.args$nseasons >= 1,
"nseasons cannot be 0; use 1 in order not to have seaonsal components")
# Check <season.duration>
assert_that(is.scalar(model.args$season.duration))
assert_that(is.numeric(model.args$season.duration))
assert_that(!is.na(model.args$season.duration))
assert_that(is.wholenumber(model.args$season.duration))
assert_that(model.args$season.duration >= 1)
# Check <dynamic.regression>
assert_that(is.scalar(model.args$dynamic.regression))
assert_that(is.logical(model.args$dynamic.regression))
assert_that(!is.na(model.args$dynamic.regression))
# Return updated args
return(list(data = data, model.args = model.args))
}
# Tell 'R CMD check' to treat `BstsOptions()` as global variable to avoid
# false positives as long as 'bsts' version 0.7.x is not published.
# TODO(alhauser): remove this when 'bsts' version 0.7.x is published.
if(getRversion() >= "2.15.1") {
utils::globalVariables("BstsOptions")
}
ConstructModel <- function(data, model.args = NULL) {
# Specifies the model and performs inference. Inference means using the data
# to pass from a prior distribution over parameters and states to a posterior
# distribution. In a Bayesian framework, estimating a model means to obtain
# p(parameters | data) from p(data | parameters) and p(parameters). This
# involves multiplying the prior with the likelihood and normalising the
# resulting distribution using the marginal likelihood or model evidence,
# p(data). Computing the evidence poses a virtually intractable
# high-dimensional integration problem which can be turned into an easier
# optimization problem using, for instance, an approximate stochastic
# inference strategy. Here, we use a Markov chain Monte Carlo algorithm, as
# implemented in the \code{bsts} package.
#
# Args:
# data: time series of response variable and optional covariates
# model.args: optional list of additional model arguments
#
# Returns:
# \code{bsts.model}, as returned by \code{bsts()}
# Check and format input
checked <- FormatInputForConstructModel(data, model.args)
data <- checked$data
model.args <- checked$model.args
y <- data[, 1]
# If the series is ill-conditioned, abort inference and return NULL
if (ObservationsAreIllConditioned(y)) {
return(NULL)
}
# Local level
# sigma.guess: standard deviation of the random walk of the level
sdy <- sd(y, na.rm = TRUE)
ss <- list()
sd.prior <- SdPrior(sigma.guess = model.args$prior.level.sd * sdy,
upper.limit = sdy,
sample.size = kLocalLevelPriorSampleSize)
ss <- AddLocalLevel(ss, y, sigma.prior = sd.prior)
# Add seasonal component?
if (model.args$nseasons > 1) {
ss <- AddSeasonal(ss, y,
nseasons = model.args$nseasons,
season.duration = model.args$season.duration)
}
# No regression?
if (ncol(data) == 1) {
# TODO(alhauser): remove this if-else-block, and add bsts version to the
# 'Depends' statement in the 'DESCRIPTION' file when bsts 0.7.x is
# published.
if (utils::packageVersion("bsts") < "0.7.0") {
bsts.model <- bsts(y, state.specification = ss, niter = model.args$niter,
ping = 0, save.prediction.errors = TRUE, seed = 1)
} else {
bsts.model <- bsts(y, state.specification = ss, niter = model.args$niter,
seed = 1, ping = 0,
model.options =
BstsOptions(save.prediction.errors = TRUE))
}
} else {
formula <- paste0(names(data)[1], " ~ .")
# Static regression?
if (!model.args$dynamic.regression) {
if (utils::packageVersion("bsts") < "0.7.0") {
bsts.model <- bsts(formula, data = data, state.specification = ss,
expected.model.size =
kStaticRegressionExpectedModelSize,
expected.r2 = kStaticRegressionExpectedR2,
prior.df = kStaticRegressionPriorDf,
save.prediction.errors = TRUE,
niter = model.args$niter, seed = 1, ping = 0)
} else {
bsts.model <- bsts(formula, data = data, state.specification = ss,
expected.model.size =
kStaticRegressionExpectedModelSize,
expected.r2 = kStaticRegressionExpectedR2,
prior.df = kStaticRegressionPriorDf,
niter = model.args$niter, seed = 1, ping = 0,
model.options =
BstsOptions(save.prediction.errors = TRUE))
}
time(bsts.model$original.series) <- time(data)
# Dynamic regression?
} else {
# Since we have predictor variables in the model, we need to explicitly
# make their coefficients time-varying using AddDynamicRegression(). In
# bsts(), we are therefore not giving a formula but just the response
# variable. We are then using SdPrior to only specify the prior on the
# residual standard deviation.
# prior.mean: precision of random walk of coefficients
sigma.mean.prior <- GammaPrior(prior.mean = 1, a = 4)
ss <- AddDynamicRegression(ss, formula, data = data,
sigma.mean.prior = sigma.mean.prior)
sd.prior <- SdPrior(sigma.guess = model.args$prior.level.sd * sdy,
upper.limit = 0.1 * sdy,
sample.size = kDynamicRegressionPriorSampleSize)
bsts.model <- bsts(y, state.specification = ss, niter = model.args$niter,
expected.model.size = 3, ping = 0, seed = 1,
prior = sd.prior)
}
}
return(bsts.model)
}
| /R/impact_model.R | permissive | chenwendi/CausalImpactLite | R | false | false | 10,309 | r | # Copyright 2014-2017 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Bayesian dynamic diffusion-regression state-space model for computing
# counterfactual predictions in a time series. Uses an MCMC algorithm
# implemented in the \code{bsts} package to compute samples from the posterior
# (smoothing) densities over states and parameters.
#
# Author: kbrodersen@google.com (Kay Brodersen)
# Some model priors are fixed, others can be adjusted through model.args.
# For full flexibility, construct your own bsts model and feed it into
# CausalImpactForBsts().
kLocalLevelPriorSampleSize <- 32
kStaticRegressionExpectedModelSize <- 3
kStaticRegressionExpectedR2 <- 0.8
kStaticRegressionPriorDf <- 50
kDynamicRegressionPriorSampleSize <- 32
ObservationsAreIllConditioned <- function(y) {
# Checks whether the response variable (i.e., the series of observations for
# the dependent variable y) are ill-conditioned. For example, the series might
# contain too few non-NA values. In such cases, inference will be aborted.
#
# Args:
# y: observed series (numeric vector or single zoo series)
#
# Returns:
# TRUE if something is wrong with the observations; FALSE otherwise.
assert_that(!is.null(y), length(y) >= 1)
ill.conditioned <- FALSE
# All NA?
if (all(is.na(y))) {
warning("Aborting inference due to input series being all NA.")
ill.conditioned <- TRUE
# Fewer than 3 non-NA values?
} else if (length(y[!is.na(y)]) < 3) {
warning("Aborting inference due to fewer than 3 non-NA values in input")
ill.conditioned <- TRUE
# Constant series?
} else if (sd(y, na.rm = TRUE) == 0) {
warning(paste0("Aborting inference due to input series being constant: ",
y[!is.na(y)][1]))
ill.conditioned <- TRUE
}
return(ill.conditioned)
}
FormatInputForConstructModel <- function(data, model.args) {
# Checks the input arguments supplied to ConstructModel(). Missing arguments
# in \code{model.args} will be filled using \code{.defaults} (see top of file
# impact_analysis.R).
#
# Args:
# data: time series of response variable and covariates
# model.args: list of additional arguments
#
# Returns:
# list of checked and correctly formatted arguments
# Check <data>
assert_that(!is.null(data))
data <- as.zoo(data)
if (is.null(ncol(data))) {
dim(data) <- c(length(data), 1)
}
assert_that(is.numeric(data))
assert_that(nrow(data) > 0)
# If <data> has no names, assign: y, x1, x2, ...
if (is.null(names(data))) {
if (ncol(data) == 1) {
names(data)[1] <- "y"
} else {
names(data) <- c("y", paste0("x", 2:ncol(data) - 1))
}
}
# Check covariates
if (ncol(data) >= 2) {
assert(all(!is.na(data[, -1])), "covariates must not be NA")
}
# (Re-)parse <model.args>, fill gaps using <.defaults>
# (defined in impact_analysis.R)
model.args <- ParseArguments(model.args, .defaults)
# Check those parts of <model.args> that are used in this file
# Check <niter>
assert_that(is.scalar(model.args$niter))
assert_that(is.numeric(model.args$niter))
assert_that(!is.na(model.args$niter))
assert_that(is.wholenumber(model.args$niter))
model.args$niter <- round(model.args$niter)
assert(model.args$niter >= 10,
"must draw, at the very least, 10 MCMC samples; recommending 1000")
if (model.args$niter < 1000) {
warning("Results potentially inaccurate. Consider using more MCMC samples.")
}
# Check <prior.level.sd>
assert_that(is.scalar(model.args$prior.level.sd))
assert_that(is.numeric(model.args$prior.level.sd))
assert_that(!is.na(model.args$prior.level.sd))
assert_that(model.args$prior.level.sd > 0)
# Check <nseasons>
assert_that(is.scalar(model.args$nseasons))
assert_that(is.numeric(model.args$nseasons))
assert_that(!is.na(model.args$nseasons))
assert_that(is.wholenumber(model.args$nseasons))
assert(model.args$nseasons >= 1,
"nseasons cannot be 0; use 1 in order not to have seaonsal components")
# Check <season.duration>
assert_that(is.scalar(model.args$season.duration))
assert_that(is.numeric(model.args$season.duration))
assert_that(!is.na(model.args$season.duration))
assert_that(is.wholenumber(model.args$season.duration))
assert_that(model.args$season.duration >= 1)
# Check <dynamic.regression>
assert_that(is.scalar(model.args$dynamic.regression))
assert_that(is.logical(model.args$dynamic.regression))
assert_that(!is.na(model.args$dynamic.regression))
# Return updated args
return(list(data = data, model.args = model.args))
}
# Tell 'R CMD check' to treat `BstsOptions()` as global variable to avoid
# false positives as long as 'bsts' version 0.7.x is not published.
# TODO(alhauser): remove this when 'bsts' version 0.7.x is published.
if(getRversion() >= "2.15.1") {
utils::globalVariables("BstsOptions")
}
ConstructModel <- function(data, model.args = NULL) {
# Specifies the model and performs inference. Inference means using the data
# to pass from a prior distribution over parameters and states to a posterior
# distribution. In a Bayesian framework, estimating a model means to obtain
# p(parameters | data) from p(data | parameters) and p(parameters). This
# involves multiplying the prior with the likelihood and normalising the
# resulting distribution using the marginal likelihood or model evidence,
# p(data). Computing the evidence poses a virtually intractable
# high-dimensional integration problem which can be turned into an easier
# optimization problem using, for instance, an approximate stochastic
# inference strategy. Here, we use a Markov chain Monte Carlo algorithm, as
# implemented in the \code{bsts} package.
#
# Args:
# data: time series of response variable and optional covariates
# model.args: optional list of additional model arguments
#
# Returns:
# \code{bsts.model}, as returned by \code{bsts()}
# Check and format input
checked <- FormatInputForConstructModel(data, model.args)
data <- checked$data
model.args <- checked$model.args
y <- data[, 1]
# If the series is ill-conditioned, abort inference and return NULL
if (ObservationsAreIllConditioned(y)) {
return(NULL)
}
# Local level
# sigma.guess: standard deviation of the random walk of the level
sdy <- sd(y, na.rm = TRUE)
ss <- list()
sd.prior <- SdPrior(sigma.guess = model.args$prior.level.sd * sdy,
upper.limit = sdy,
sample.size = kLocalLevelPriorSampleSize)
ss <- AddLocalLevel(ss, y, sigma.prior = sd.prior)
# Add seasonal component?
if (model.args$nseasons > 1) {
ss <- AddSeasonal(ss, y,
nseasons = model.args$nseasons,
season.duration = model.args$season.duration)
}
# No regression?
if (ncol(data) == 1) {
# TODO(alhauser): remove this if-else-block, and add bsts version to the
# 'Depends' statement in the 'DESCRIPTION' file when bsts 0.7.x is
# published.
if (utils::packageVersion("bsts") < "0.7.0") {
bsts.model <- bsts(y, state.specification = ss, niter = model.args$niter,
ping = 0, save.prediction.errors = TRUE, seed = 1)
} else {
bsts.model <- bsts(y, state.specification = ss, niter = model.args$niter,
seed = 1, ping = 0,
model.options =
BstsOptions(save.prediction.errors = TRUE))
}
} else {
formula <- paste0(names(data)[1], " ~ .")
# Static regression?
if (!model.args$dynamic.regression) {
if (utils::packageVersion("bsts") < "0.7.0") {
bsts.model <- bsts(formula, data = data, state.specification = ss,
expected.model.size =
kStaticRegressionExpectedModelSize,
expected.r2 = kStaticRegressionExpectedR2,
prior.df = kStaticRegressionPriorDf,
save.prediction.errors = TRUE,
niter = model.args$niter, seed = 1, ping = 0)
} else {
bsts.model <- bsts(formula, data = data, state.specification = ss,
expected.model.size =
kStaticRegressionExpectedModelSize,
expected.r2 = kStaticRegressionExpectedR2,
prior.df = kStaticRegressionPriorDf,
niter = model.args$niter, seed = 1, ping = 0,
model.options =
BstsOptions(save.prediction.errors = TRUE))
}
time(bsts.model$original.series) <- time(data)
# Dynamic regression?
} else {
# Since we have predictor variables in the model, we need to explicitly
# make their coefficients time-varying using AddDynamicRegression(). In
# bsts(), we are therefore not giving a formula but just the response
# variable. We are then using SdPrior to only specify the prior on the
# residual standard deviation.
# prior.mean: precision of random walk of coefficients
sigma.mean.prior <- GammaPrior(prior.mean = 1, a = 4)
ss <- AddDynamicRegression(ss, formula, data = data,
sigma.mean.prior = sigma.mean.prior)
sd.prior <- SdPrior(sigma.guess = model.args$prior.level.sd * sdy,
upper.limit = 0.1 * sdy,
sample.size = kDynamicRegressionPriorSampleSize)
bsts.model <- bsts(y, state.specification = ss, niter = model.args$niter,
expected.model.size = 3, ping = 0, seed = 1,
prior = sd.prior)
}
}
return(bsts.model)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/f_03_to_model_data.r
\name{f_to_model_data}
\alias{f_to_model_data}
\title{Data transformation: keep model variables and expand categorical variables (internal use)}
\usage{
f_to_model_data(formula, data, id_name, time_name)
}
\arguments{
\item{formula}{Surv(entry_time,exit_time,outcome)~loglin(loglin_var1,..,loglin_varn)+\cr
lin(lin_var1,..,lin_varm)+strata(strat_var1,...strat_varp)}
\item{data}{data set}
\item{id_name}{name of variable containing the names of subjects}
\item{time_name}{name of the time variable}
}
\value{
data set described below
}
\description{
Transform the data set in a closed form n_row | id_name | n_pe | entry_name | exit_name | outcome | time | \cr
linear_covariates | loglinear_covariates.\cr
Expand if a variable is categorical to pure logical n_categories variables (excluding the reference category)
}
\examples{
\dontrun{f_to_model_data(formula,data,id_name='patientids',time_name='time')}
}
| /man/f_to_model_data.Rd | no_license | KartikeyaSethi/rERR | R | false | true | 1,040 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/f_03_to_model_data.r
\name{f_to_model_data}
\alias{f_to_model_data}
\title{Data transformation: keep model variables and expand categorical variables (internal use)}
\usage{
f_to_model_data(formula, data, id_name, time_name)
}
\arguments{
\item{formula}{Surv(entry_time,exit_time,outcome)~loglin(loglin_var1,..,loglin_varn)+\cr
lin(lin_var1,..,lin_varm)+strata(strat_var1,...strat_varp)}
\item{data}{data set}
\item{id_name}{name of variable containing the names of subjects}
\item{time_name}{name of the time variable}
}
\value{
data set described below
}
\description{
Transform the data set in a closed form n_row | id_name | n_pe | entry_name | exit_name | outcome | time | \cr
linear_covariates | loglinear_covariates.\cr
Expand if a variable is categorical to pure logical n_categories variables (excluding the reference category)
}
\examples{
\dontrun{f_to_model_data(formula,data,id_name='patientids',time_name='time')}
}
|
# uci car data
uciCar <- read.table("http://www.win-vector.com/dfiles/car.data.csv", sep=",", header=T);
# uci formatted german bank data
d <- read.table('http://archive.ics.uci.edu/ml/machine-learning-databases/statlog/german/german.data', stringsAsFactors=F,header=F)
colnames(d) <- c(
'Status.of.existing.checking.account', 'Duration.in.month', 'Credit.history', 'Purpose',
'Credit.amount', 'Savings account/bonds',
'Present.employment.since',
'Installment.rate.in.percentage.of.disposable.income',
'Personal.status.and.sex', 'Other.debtors/guarantors',
'Present.residence.since', 'Property', 'Age.in.years',
'Other.installment.plans', 'Housing',
'Number.of.existing.credits.at.this.bank', 'Job',
'Number.of.people.being.liable.to.provide.maintenance.for',
'Telephone', 'foreign.worker', 'Good.Loan'
)
mapping <- list(
'A11'='... < 0 DM',
'A12'='0 <= ... < 200 DM',
'A13'='... >= 200 DM / salary assignments for at least 1 year',
'A14'='no checking account',
'A30'='no credits taken/all credits paid back duly',
'A31'='all credits at this bank paid back duly',
'A32'='existing credits paid back duly till now',
'A33'='delay in paying off in the past',
'A34'='critical account/other credits existing (not at this bank)',
'A40'='car (new)',
'A41'='car (used)',
'A42'='furniture/equipment',
'A43'='radio/television',
'A44'='domestic appliances',
'A45'='repairs',
'A46'='education',
'A47'='(vacation - does not exist?)',
'A48'='retraining',
'A49'='business',
'A410'='others',
'A61'='... < 100 DM',
'A62'='100 <= ... < 500 DM',
'A63'='500 <= ... < 1000 DM',
'A64'='.. >= 1000 DM',
'A65'='unknown/ no savings account',
'A71'='unemployed',
'A72'='... < 1 year',
'A73'='1 <= ... < 4 years',
'A74'='4 <= ... < 7 years',
'A75'='.. >= 7 years',
'A91'='male : divorced/separated',
'A92'='female : divorced/separated/married',
'A93'='male : single',
'A94'='male : married/widowed',
'A95'='female : single',
'A101'='none',
'A102'='co-applicant',
'A103'='guarantor',
'A121'='real estate',
'A122'='if not A121 : building society savings agreement/life insurance',
'A123'='if not A121/A122 : car or other, not in attribute 6',
'A124'='unknown / no property',
'A141'='bank',
'A142'='stores',
'A143'='none',
'A151'='rent',
'A152'='own',
'A153'='for free',
'A171'='unemployed/ unskilled - non-resident',
'A172'='unskilled - resident',
'A173'='skilled employee / official',
'A174'='management/ self-employed/highly qualified employee/ officer',
'A191'='none',
'A192'='yes, registered under the customers name',
'A201'='yes',
'A202'='no'
)
for(i in 1:(dim(d))[2]) {
if(class(d[,i])=='character') {
d[,i] <- as.factor(as.character(mapping[d[,i]]))
}
}
# explore
t <- table(d$Purpose,d$Good.Loan)
colnames(t) <- c("BadLoan", "GoodLoan")
df <- data.frame(t)
colnames(df) = c("Type", "LoanOutcome", "Freq")
total = sum(df$Freq)
bad = sum(df[df$LoanOutcome=="BadLoan",]$Freq)
good = sum(df[df$LoanOutcome=="GoodLoan",]$Freq)
good/total # 0.3!
# http://stackoverflow.com/questions/7715723/sourcing-r-script-over-https
wget.and.source <- function(url) {
fname <- tempfile()
download.file(url, fname, method="wget")
source(fname)
unlink(fname)
}
# fails with SSL error
# wget.and.source("http://github.com/WinVector/zmPDSwR/blob/master/PUMS/phsample.RData?raw=true")
load("~/Downloads/phsample.RData")
| /R/ch2.R | no_license | drsnyder/practical-data-science | R | false | false | 3,570 | r | # uci car data
uciCar <- read.table("http://www.win-vector.com/dfiles/car.data.csv", sep=",", header=T);
# uci formatted german bank data
d <- read.table('http://archive.ics.uci.edu/ml/machine-learning-databases/statlog/german/german.data', stringsAsFactors=F,header=F)
colnames(d) <- c(
'Status.of.existing.checking.account', 'Duration.in.month', 'Credit.history', 'Purpose',
'Credit.amount', 'Savings account/bonds',
'Present.employment.since',
'Installment.rate.in.percentage.of.disposable.income',
'Personal.status.and.sex', 'Other.debtors/guarantors',
'Present.residence.since', 'Property', 'Age.in.years',
'Other.installment.plans', 'Housing',
'Number.of.existing.credits.at.this.bank', 'Job',
'Number.of.people.being.liable.to.provide.maintenance.for',
'Telephone', 'foreign.worker', 'Good.Loan'
)
mapping <- list(
'A11'='... < 0 DM',
'A12'='0 <= ... < 200 DM',
'A13'='... >= 200 DM / salary assignments for at least 1 year',
'A14'='no checking account',
'A30'='no credits taken/all credits paid back duly',
'A31'='all credits at this bank paid back duly',
'A32'='existing credits paid back duly till now',
'A33'='delay in paying off in the past',
'A34'='critical account/other credits existing (not at this bank)',
'A40'='car (new)',
'A41'='car (used)',
'A42'='furniture/equipment',
'A43'='radio/television',
'A44'='domestic appliances',
'A45'='repairs',
'A46'='education',
'A47'='(vacation - does not exist?)',
'A48'='retraining',
'A49'='business',
'A410'='others',
'A61'='... < 100 DM',
'A62'='100 <= ... < 500 DM',
'A63'='500 <= ... < 1000 DM',
'A64'='.. >= 1000 DM',
'A65'='unknown/ no savings account',
'A71'='unemployed',
'A72'='... < 1 year',
'A73'='1 <= ... < 4 years',
'A74'='4 <= ... < 7 years',
'A75'='.. >= 7 years',
'A91'='male : divorced/separated',
'A92'='female : divorced/separated/married',
'A93'='male : single',
'A94'='male : married/widowed',
'A95'='female : single',
'A101'='none',
'A102'='co-applicant',
'A103'='guarantor',
'A121'='real estate',
'A122'='if not A121 : building society savings agreement/life insurance',
'A123'='if not A121/A122 : car or other, not in attribute 6',
'A124'='unknown / no property',
'A141'='bank',
'A142'='stores',
'A143'='none',
'A151'='rent',
'A152'='own',
'A153'='for free',
'A171'='unemployed/ unskilled - non-resident',
'A172'='unskilled - resident',
'A173'='skilled employee / official',
'A174'='management/ self-employed/highly qualified employee/ officer',
'A191'='none',
'A192'='yes, registered under the customers name',
'A201'='yes',
'A202'='no'
)
for(i in 1:(dim(d))[2]) {
if(class(d[,i])=='character') {
d[,i] <- as.factor(as.character(mapping[d[,i]]))
}
}
# explore
t <- table(d$Purpose,d$Good.Loan)
colnames(t) <- c("BadLoan", "GoodLoan")
df <- data.frame(t)
colnames(df) = c("Type", "LoanOutcome", "Freq")
total = sum(df$Freq)
bad = sum(df[df$LoanOutcome=="BadLoan",]$Freq)
good = sum(df[df$LoanOutcome=="GoodLoan",]$Freq)
good/total # 0.3!
# http://stackoverflow.com/questions/7715723/sourcing-r-script-over-https
wget.and.source <- function(url) {
fname <- tempfile()
download.file(url, fname, method="wget")
source(fname)
unlink(fname)
}
# fails with SSL error
# wget.and.source("http://github.com/WinVector/zmPDSwR/blob/master/PUMS/phsample.RData?raw=true")
load("~/Downloads/phsample.RData")
|
#' @title Set up R project
#' @description This function allows you to set up your R project structure with ease
#'
#' @param data Create folder called data? TRUE or FALSE
#' @param output Create folder called output? TRUE or FALSE
#' @param plots Create folder called plots in output? TRUE or FALSE
#' @param stan Use stan in this project? Create folder called stan? TRUE or FALSE
#' @param text Will this project contain text? Create folder called text? TRUE or FALSE
#' If TRUE and output is TRUE a folder output/text will be crated as well
#' @param drake Use drake structure for this project? TRUE or FALSE
#' @param rfiles Optional. A character vector with the names of r files to add
#' @param readme Add README.Rmd to project? TRUE or FALSE
#'
#' @return Creates structure based on settings
#'
#' @import usethis
#' @import here
#'
#' @export
#'
#' @examples
#' # create_project(stan = TRUE, rfiles = c("01_load", "02_analyze"))
create_project <- function(
data = TRUE,
output = TRUE,
plots = TRUE,
text = TRUE,
stan = FALSE,
drake = TRUE,
rfiles = NULL,
readme = TRUE
){
if (readme){
if (!file.exists("README.Rmd")){
usethis::use_readme_rmd(open = FALSE)
message("-> created README.Rmd")
}
}
path <- here::here("R")
if (!dir.exists(path)){dir.create(path)
message("-> created R/")
}
if (data){
path <- here::here("data")
if (!dir.exists(path)){dir.create(path)
message("-> created data/")
}
}
if (output){
path <- here::here("output")
if (!dir.exists(path)){dir.create(path)
message("-> created output/")
message("-> created output/data")}
}
if (stan){
path <- here::here("stan")
if (!dir.exists(path)){dir.create(path)
message("-> created stan/")
}
}
if (text){
path <- here::here("text")
if (!dir.exists(path)){dir.create(path)
message("-> created text/")}
}
if (text & output){
path <- here::here("output/text")
if (!dir.exists(path)){dir.create(path)
message("-> created output/text/")
}
}
if (plots & output){
path <- here::here("output", "plots")
if (!dir.exists(path)){dir.create(path)
message("-> created output/plots/")
}
}
if (drake){
rfiles <- c("packages", "functions", "plan")
silent <-
lapply(rfiles, function(x){
path <- here::here("R", paste0(x, ".R"))
if (!file.exists(path)){file.create(path)
message(paste0("-> created ", x, ".R"))
}
})
path <- here::here("make.R")
if (!file.exists(path)){file.create(path)
message("-> created make.R")
}
}
if (!is.null(rfiles)){
silent <-
lapply(rfiles, function(x){
path <- here::here("R", paste0(x, ".R"))
if (!file.exists(path)){file.create(path)
message(paste0("-> created ", x, ".R"))
}
})
}
}
| /R/create_project.R | permissive | nschiett/fishproject | R | false | false | 2,909 | r | #' @title Set up R project
#' @description This function allows you to set up your R project structure with ease
#'
#' @param data Create folder called data? TRUE or FALSE
#' @param output Create folder called output? TRUE or FALSE
#' @param plots Create folder called plots in output? TRUE or FALSE
#' @param stan Use stan in this project? Create folder called stan? TRUE or FALSE
#' @param text Will this project contain text? Create folder called text? TRUE or FALSE
#' If TRUE and output is TRUE a folder output/text will be crated as well
#' @param drake Use drake structure for this project? TRUE or FALSE
#' @param rfiles Optional. A character vector with the names of r files to add
#' @param readme Add README.Rmd to project? TRUE or FALSE
#'
#' @return Creates structure based on settings
#'
#' @import usethis
#' @import here
#'
#' @export
#'
#' @examples
#' # create_project(stan = TRUE, rfiles = c("01_load", "02_analyze"))
create_project <- function(
data = TRUE,
output = TRUE,
plots = TRUE,
text = TRUE,
stan = FALSE,
drake = TRUE,
rfiles = NULL,
readme = TRUE
){
if (readme){
if (!file.exists("README.Rmd")){
usethis::use_readme_rmd(open = FALSE)
message("-> created README.Rmd")
}
}
path <- here::here("R")
if (!dir.exists(path)){dir.create(path)
message("-> created R/")
}
if (data){
path <- here::here("data")
if (!dir.exists(path)){dir.create(path)
message("-> created data/")
}
}
if (output){
path <- here::here("output")
if (!dir.exists(path)){dir.create(path)
message("-> created output/")
message("-> created output/data")}
}
if (stan){
path <- here::here("stan")
if (!dir.exists(path)){dir.create(path)
message("-> created stan/")
}
}
if (text){
path <- here::here("text")
if (!dir.exists(path)){dir.create(path)
message("-> created text/")}
}
if (text & output){
path <- here::here("output/text")
if (!dir.exists(path)){dir.create(path)
message("-> created output/text/")
}
}
if (plots & output){
path <- here::here("output", "plots")
if (!dir.exists(path)){dir.create(path)
message("-> created output/plots/")
}
}
if (drake){
rfiles <- c("packages", "functions", "plan")
silent <-
lapply(rfiles, function(x){
path <- here::here("R", paste0(x, ".R"))
if (!file.exists(path)){file.create(path)
message(paste0("-> created ", x, ".R"))
}
})
path <- here::here("make.R")
if (!file.exists(path)){file.create(path)
message("-> created make.R")
}
}
if (!is.null(rfiles)){
silent <-
lapply(rfiles, function(x){
path <- here::here("R", paste0(x, ".R"))
if (!file.exists(path)){file.create(path)
message(paste0("-> created ", x, ".R"))
}
})
}
}
|
## Put comments here that give an overall description of what your
## functions do
## The function returns a list with the properties to set and get the
## initial input, and to set and get the cached inverse
makeCacheMatrix <- function(x = matrix()) {
inverse <- NULL
set <- function(x) {
m <<- x
inverse <- NULL
}
get <- function()
m
setInverse <- function(x) {
inverse <<- x
}
getInverse <- function()
inverse
list(
set = set, get = get,
setInverse = setInverse,
getInverse = getInverse
)
}
## Return the inverse of a matrix if it exists
## Compute it and cache it if it doesn't, and then return it
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inverse <- x$getInverse()
if (!is.null(inverse)) {
return (inverse)
}
m <- x$get()
inverse <- solve(m, ...)
x$setInverse(inverse)
inverse
}
| /cachematrix.R | no_license | cristisor/ProgrammingAssignment2 | R | false | false | 1,004 | r | ## Put comments here that give an overall description of what your
## functions do
## The function returns a list with the properties to set and get the
## initial input, and to set and get the cached inverse
makeCacheMatrix <- function(x = matrix()) {
inverse <- NULL
set <- function(x) {
m <<- x
inverse <- NULL
}
get <- function()
m
setInverse <- function(x) {
inverse <<- x
}
getInverse <- function()
inverse
list(
set = set, get = get,
setInverse = setInverse,
getInverse = getInverse
)
}
## Return the inverse of a matrix if it exists
## Compute it and cache it if it doesn't, and then return it
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inverse <- x$getInverse()
if (!is.null(inverse)) {
return (inverse)
}
m <- x$get()
inverse <- solve(m, ...)
x$setInverse(inverse)
inverse
}
|
rm(list=ls())
set.seed(2016)
options(digits=15)
source('utility_functions.R')
source('data.R')
V=Vcr
G = k1- rankMatrix(V)[1] # number of islands
diag.V = diag(V)
ts = format(Sys.time(), "%d%m%y_%H%M%S")
folderName = paste0("marginal_", ts, sep = "")
dir.create(path = folderName)
start.time=Sys.time()
M=12
rr=rep(0, M)
for(i in 1:M) rr[i]=dim(XX[[i]])[2]
#==========================================
# Marginal densities
#==========================================
marg_num1=rep(0,12)
a.e=0.01; b.e=0.01
a.s=0.01; b.s=0.01
for( i in 1:M)
{
# i=7
r=dim(XX[[i]])[2]
X=as.matrix(XX[[i]])
XT=as.matrix(X[1:k1,])
XS=as.matrix(X[(k1+1):p,])
B=as.Symmetric.matrix(XT %*% solve(t(XT)%*% XT) %*% t(XT))
eigB=eigen(B)
if(i == 1)
{
K=as.matrix(t(eigB$vectors))
K=as.matrix(t(K[eigB$values >0.9, ]))
KT=K[,1:k1 ]
}
if(i != 1)
{
K=as.matrix(t(eigB$vectors))
K=as.matrix(K[eigB$values >0.9, ])
KT=K[,1:k1 ]
}
Bc=diag(k1)-B
eigBc=eigen(Bc)
L=t(eigBc$vectors)
L=L[eigBc$values >0.9, ]
LT=L[,1:k1 ]
V.curl=L%*%V%*%t(L)
topaste=paste0('Model ',i)
print(topaste)
numint=cuhre(ndim=2, ncomp=1, integrand=fmarg, XT = XT, L = L, V.curl = V.curl,
lower=rep(10^(-10),2), upper=rep(20,2),
rel.tol=10^(-10), abs.tol = 0,
flags=list(verbose=0, final=0, pseudo.random=0, mersenne.seed=NULL),
min.eval=0, max.eval=20000)
marg_num1[i]=numint$value
print(marg_num1[i])
} # end of for(i in 1:M)
BF_num1=marg_num1/max(marg_num1)
bf.out=cbind(marg_num1, BF_num1)
dimnames(bf.out)=list(as.character(c(1:12)), c('marg_cuhre',' BF_cuhre'))
print(bf.out)
fname6=paste(folderName, "/marginal_densities.csv", sep = "")
write.csv(bf.out, file=fname6, quote = F,row.names=T)
| /marginal.R | no_license | soumenstat89/multiplesources | R | false | false | 1,728 | r |
rm(list=ls())
set.seed(2016)
options(digits=15)
source('utility_functions.R')
source('data.R')
V=Vcr
G = k1- rankMatrix(V)[1] # number of islands
diag.V = diag(V)
ts = format(Sys.time(), "%d%m%y_%H%M%S")
folderName = paste0("marginal_", ts, sep = "")
dir.create(path = folderName)
start.time=Sys.time()
M=12
rr=rep(0, M)
for(i in 1:M) rr[i]=dim(XX[[i]])[2]
#==========================================
# Marginal densities
#==========================================
marg_num1=rep(0,12)
a.e=0.01; b.e=0.01
a.s=0.01; b.s=0.01
for( i in 1:M)
{
# i=7
r=dim(XX[[i]])[2]
X=as.matrix(XX[[i]])
XT=as.matrix(X[1:k1,])
XS=as.matrix(X[(k1+1):p,])
B=as.Symmetric.matrix(XT %*% solve(t(XT)%*% XT) %*% t(XT))
eigB=eigen(B)
if(i == 1)
{
K=as.matrix(t(eigB$vectors))
K=as.matrix(t(K[eigB$values >0.9, ]))
KT=K[,1:k1 ]
}
if(i != 1)
{
K=as.matrix(t(eigB$vectors))
K=as.matrix(K[eigB$values >0.9, ])
KT=K[,1:k1 ]
}
Bc=diag(k1)-B
eigBc=eigen(Bc)
L=t(eigBc$vectors)
L=L[eigBc$values >0.9, ]
LT=L[,1:k1 ]
V.curl=L%*%V%*%t(L)
topaste=paste0('Model ',i)
print(topaste)
numint=cuhre(ndim=2, ncomp=1, integrand=fmarg, XT = XT, L = L, V.curl = V.curl,
lower=rep(10^(-10),2), upper=rep(20,2),
rel.tol=10^(-10), abs.tol = 0,
flags=list(verbose=0, final=0, pseudo.random=0, mersenne.seed=NULL),
min.eval=0, max.eval=20000)
marg_num1[i]=numint$value
print(marg_num1[i])
} # end of for(i in 1:M)
BF_num1=marg_num1/max(marg_num1)
bf.out=cbind(marg_num1, BF_num1)
dimnames(bf.out)=list(as.character(c(1:12)), c('marg_cuhre',' BF_cuhre'))
print(bf.out)
fname6=paste(folderName, "/marginal_densities.csv", sep = "")
write.csv(bf.out, file=fname6, quote = F,row.names=T)
|
library(webchem)
### Name: etox_basic
### Title: Get basic information from a ETOX ID
### Aliases: etox_basic
### ** Examples
## Not run:
##D id <- get_etoxid('Triclosan', match = 'best')
##D etox_basic(id$etoxid)
##D
##D # Retrieve CAS for multiple inputs
##D ids <- c("20179", "9051")
##D out <- etox_basic(ids)
##D out
##D
##D # extract ec numbers
##D sapply(out, function(y) y$ec)
## End(Not run)
| /data/genthat_extracted_code/webchem/examples/etox_basic.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 411 | r | library(webchem)
### Name: etox_basic
### Title: Get basic information from a ETOX ID
### Aliases: etox_basic
### ** Examples
## Not run:
##D id <- get_etoxid('Triclosan', match = 'best')
##D etox_basic(id$etoxid)
##D
##D # Retrieve CAS for multiple inputs
##D ids <- c("20179", "9051")
##D out <- etox_basic(ids)
##D out
##D
##D # extract ec numbers
##D sapply(out, function(y) y$ec)
## End(Not run)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cv.R
\name{exh.train.cv}
\alias{exh.train.cv}
\title{Cross-validation of a panel}
\usage{
exh.train.cv(
data,
predictors,
response,
fixed.predictors = NULL,
id = NULL,
levels = base::levels(as.factor(data[[response]])),
na.rm = FALSE,
nreps = 10,
k = 10,
stratified = TRUE,
verbose = FALSE,
working.dir = NULL,
java.keep.files = TRUE,
...
)
}
\arguments{
\item{data}{the dataset}
\item{predictors}{the name of the columns to be tested as predictors in the panel. May or may not be used.}
\item{response}{the binary response column name}
\item{fixed.predictors}{predictors to force into the panel}
\item{id}{the name of a column storing unique IDs for the observations}
\item{levels}{the values of \code{response} to use as negative and positive data}
\item{na.rm}{whether to omit rows with \code{NA} observations}
\item{nreps}{number of cross-validation repeats}
\item{k}{number of folds of the cross-validation (use \code{k = nrow(data)}) for leave-one-out CV}
\item{stratified}{whether to keep the balance of positive/negative samples in all the CV folds}
\item{verbose}{enables additional output for debugging}
\item{working.dir, java.keep.files}{same as for \code{\link{exh.train}}}
\item{...}{further arguments passed to \code{\link{exh.train}}. \code{constrain.on}, \code{min.constr}, \code{panels.of.num} or \code{limit.java.threads} are probably the most useful ones.}
}
\description{
Calls exh.train in nreps k-fold CV round to evaluate performance
}
\examples{
data(aSAH, package="pROC")
exh.train.cv(aSAH, c("age", "s100b", "ndka"), "outcome")
}
| /man/exh.train.cv.Rd | no_license | xrobin/PanelomiX | R | false | true | 1,675 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cv.R
\name{exh.train.cv}
\alias{exh.train.cv}
\title{Cross-validation of a panel}
\usage{
exh.train.cv(
data,
predictors,
response,
fixed.predictors = NULL,
id = NULL,
levels = base::levels(as.factor(data[[response]])),
na.rm = FALSE,
nreps = 10,
k = 10,
stratified = TRUE,
verbose = FALSE,
working.dir = NULL,
java.keep.files = TRUE,
...
)
}
\arguments{
\item{data}{the dataset}
\item{predictors}{the name of the columns to be tested as predictors in the panel. May or may not be used.}
\item{response}{the binary response column name}
\item{fixed.predictors}{predictors to force into the panel}
\item{id}{the name of a column storing unique IDs for the observations}
\item{levels}{the values of \code{response} to use as negative and positive data}
\item{na.rm}{whether to omit rows with \code{NA} observations}
\item{nreps}{number of cross-validation repeats}
\item{k}{number of folds of the cross-validation (use \code{k = nrow(data)}) for leave-one-out CV}
\item{stratified}{whether to keep the balance of positive/negative samples in all the CV folds}
\item{verbose}{enables additional output for debugging}
\item{working.dir, java.keep.files}{same as for \code{\link{exh.train}}}
\item{...}{further arguments passed to \code{\link{exh.train}}. \code{constrain.on}, \code{min.constr}, \code{panels.of.num} or \code{limit.java.threads} are probably the most useful ones.}
}
\description{
Calls exh.train in nreps k-fold CV round to evaluate performance
}
\examples{
data(aSAH, package="pROC")
exh.train.cv(aSAH, c("age", "s100b", "ndka"), "outcome")
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.