content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Functions.R
\name{ROSBoost.R2}
\alias{ROSBoost.R2}
\title{ROSBoost variant of AdaBoost.R2}
\usage{
ROSBoost.R2(
form,
train,
test,
t_final = 100,
power = 2,
perc.O = 1.5,
rel.thr = 0.9,
coef = 1.5,
...
)
}
\arguments{
\item{form}{The model formula.}
\item{train}{A data.frame with the training data.}
\item{test}{A data.frame with the test data.}
\item{t_final}{The number of maximum boosting iterations. Default is 100.}
\item{power}{Type of loss function, e.g. linear (1), squared (2). Default is 2.}
\item{perc.O}{Percentage for Oversampling via Random Oversampling, i.e. percentage of extreme cases to be generated. Default is 1.5.}
\item{rel.thr}{Relevance threshold. Default is 0.9.}
\item{coef}{Coefficient used in boxplot statistics, which is used to create the relevance function. Default is 1.5.}
\item{...}{Dots are passed to rpart}
}
\value{
Returns a vector with the predictions made by ROSBoost.R2.
}
\description{
ROSBoost variant of AdaBoost.R2
}
\examples{
data(Boston,package="MASS")
idx <- sample(1:nrow(Boston),nrow(Boston)*0.75)
form <- medv ~ .
train <- Boston[idx,]
test <- Boston[-idx,]
preds <- ROSBoost.R2(form,train,test)
}
| /man/ROSBoost.R2.Rd | no_license | nunompmoniz/ReBoost | R | false | true | 1,259 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Functions.R
\name{ROSBoost.R2}
\alias{ROSBoost.R2}
\title{ROSBoost variant of AdaBoost.R2}
\usage{
ROSBoost.R2(
form,
train,
test,
t_final = 100,
power = 2,
perc.O = 1.5,
rel.thr = 0.9,
coef = 1.5,
...
)
}
\arguments{
\item{form}{The model formula.}
\item{train}{A data.frame with the training data.}
\item{test}{A data.frame with the test data.}
\item{t_final}{The number of maximum boosting iterations. Default is 100.}
\item{power}{Type of loss function, e.g. linear (1), squared (2). Default is 2.}
\item{perc.O}{Percentage for Oversampling via Random Oversampling, i.e. percentage of extreme cases to be generated. Default is 1.5.}
\item{rel.thr}{Relevance threshold. Default is 0.9.}
\item{coef}{Coefficient used in boxplot statistics, which is used to create the relevance function. Default is 1.5.}
\item{...}{Dots are passed to rpart}
}
\value{
Returns a vector with the predictions made by ROSBoost.R2.
}
\description{
ROSBoost variant of AdaBoost.R2
}
\examples{
data(Boston,package="MASS")
idx <- sample(1:nrow(Boston),nrow(Boston)*0.75)
form <- medv ~ .
train <- Boston[idx,]
test <- Boston[-idx,]
preds <- ROSBoost.R2(form,train,test)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/functions.R
\name{is.empty}
\alias{is.empty}
\title{Check if an object is empty (has length 0)}
\usage{
is.empty(x)
}
\arguments{
\item{x}{the object}
}
\description{
Check if an object is empty (has length 0)
}
\details{
A \code{NULL} value, zero-length vector or list have length zero,
which is called empty.
}
| /man/is.empty.Rd | permissive | renkun-ken/rlist | R | false | true | 393 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/functions.R
\name{is.empty}
\alias{is.empty}
\title{Check if an object is empty (has length 0)}
\usage{
is.empty(x)
}
\arguments{
\item{x}{the object}
}
\description{
Check if an object is empty (has length 0)
}
\details{
A \code{NULL} value, zero-length vector or list have length zero,
which is called empty.
}
|
################################################################################
# Server logic of the Shiny app
#
# Author: Lathan Liou
# Created: Fri Sep 18 09:57:20 2020 ------------------------------
################################################################################
source("src/server-mods.R")
# 1. Shiny ----
library(shiny)
library(shinyWidgets) #custom widgets, allows for shinydashboard elements
library(shinycssloaders) #custom loading icons
library(shinyjs) #improved user exp
library(shinyBS) #custom widgets
library(bsplus)
# library(shinyalert)
library(shinyFeedback) #for user feedback messages
library(tippy) #for hovers
# library(highcharter) #for animated plots
library(plotly)
library(waiter) #for loading screen
library(sever) #for waiting screen
library(knitr)
library(shinydashboard)
library(shinydashboardPlus)
# library(shinyanimate)
# 2. Data Manipulation
library(tidyverse)
library(dplyr)
library(lubridate)
# library(reactable)
#make sure github dev version is installed
# devtools::install_github("https://github.com/dsrobertson/onlineFDR")
library(onlineFDR)
#for alg recommendation feature
demodata <- read_csv("powerFDRdata.csv") %>%
mutate(pi.vec = round(pi.vec, 2))
#for hover functionality
with_tooltip <- function(value, tooltip, ...) {
div(style = "text-decoration: underline; text-decoration-style: dotted; cursor: help",
tippy(value, tooltip, ...))
}
`%!in%` = Negate(`%in%`)
server <- function(input, output, session) {
sever()
Sys.sleep(0.5)
waiter_hide()
#Load in data
in_data <- reactive({
req(input$file)
ext <- tools::file_ext(input$file$name)
shiny::validate(need(ext %in% c(
'text/csv',
'text/comma-separated-values',
'text/tab-separated-values',
'text/plain',
'csv',
'tsv'),
"Please upload a csv file!"))
data <- read_csv(input$file$datapath) %>%
dplyr::mutate(across(any_of("date"), ~as.Date(.x, format = "%m/%d/%y")))
})
#warning if wrong file type
observeEvent(input$file, {
ext <- tools::file_ext(input$file$name)
if (ext %!in% c(
'text/csv',
'text/comma-separated-values',
'text/tab-separated-values',
'text/plain',
'csv',
'tsv')) {
shiny::showNotification("Your file format is not supported. Please upload a CSV file!", type = "err",
duration = NULL)
}
})
#### LOND ####
LOND_result <- callModule(LONDServer, id = "inputLOND", data = in_data)
callModule(LONDcountServer, "LONDcount", LOND_result)
callModule(LONDplotServer, "LONDplot", LOND_result)
#### LORD ####
LORD_result <- callModule(LORDServer, id = "inputLORD", data = in_data)
callModule(LORDcountServer, "LORDcount", LORD_result)
callModule(LORDplotServer, "LORDplot", LORD_result)
# gray out inputs conditionally
shinyjs::onclick("advanced2",
shinyjs::toggle(id = "advanced2", anim = TRUE))
#### SAFFRON ####
SAFFRON_result <- callModule(SAFFRONServer, id = "inputSAFFRON", data = in_data)
callModule(SAFFRONcountServer, "SAFFRONcount", SAFFRON_result)
callModule(SAFFRONplotServer, "SAFFRONplot", SAFFRON_result)
#### ADDIS Sync ####
ADDIS_result <- callModule(ADDISServer, id = "inputADDIS", data = in_data)
callModule(ADDIScountServer, "ADDIScount", ADDIS_result)
callModule(ADDISplotServer, "ADDISplot", ADDIS_result)
#### Alpha-Investing ####
alphainvesting_result <- callModule(alphainvestingServer, id = "inputalphainvesting", data = in_data)
callModule(alphainvestingcountServer, "alphainvestcount", alphainvesting_result)
callModule(alphainvestingplotServer, "alphainvestplot", alphainvesting_result)
#### ADDIS Async ####
ADDISa_result <- callModule(ADDISServer, id = "inputADDISa", data = in_data)
callModule(ADDIScountServer, "ADDISacount", ADDIS_result)
callModule(ADDISplotServer, "ADDISaplot", ADDIS_result)
#### LONDstar ####
LONDSTAR_result <- callModule(LONDSTARServer, id = "inputLONDSTAR", data = in_data)
callModule(LONDSTARcountServer, "LONDSTARcount", LONDSTAR_result)
callModule(LONDSTARplotServer, "LONDSTARplot", LONDSTAR_result)
#### LORDstar ####
LORDSTAR_result <- callModule(LORDSTARServer, id = "inputLORDSTAR", data = in_data)
callModule(LORDSTARcountServer, "LORDSTARcount", LORDSTAR_result)
callModule(LORDSTARplotServer, "LORDSTARplot", LORDSTAR_result)
#### SAFFRONstar ####
SAFFRONSTAR_result <- callModule(SAFFRONSTARServer, id = "inputSAFFRONSTAR", data = in_data)
callModule(SAFFRONSTARcountServer, "SAFFRONSTARcount", SAFFRONSTAR_result)
callModule(SAFFRONSTARplotServer, "SAFFRONSTARplot", SAFFRONSTAR_result)
#### get started page ####
observe({
toggle(id = "novice", condition = input$checkbox)
})
filter_data <- reactive({
size = as.numeric(input$size)
boundstat = ifelse(input$bound == "Bounded", 1, 0)
out <- demodata %>%
filter(n == size,
bound == boundstat,
pi.vec == input$prop) %>%
select(-c(pi.vec, n, bound)) %>%
arrange(desc(power))
})
output$demores <- renderText({
paste(filter_data() %>%
head(1) %>%
pull(procedure), "has the highest power.")
})
# output$saffronwarn <- renderText({
# if(input$size == 1000 & input$prop > 0.5) {
# paste("Using SAFFRON may overestimate the FDR.")
# }
# })
output$addiswarn <- renderText({
if(input$size == 100 & input$prop == 0.4 |
input$size == 1000 & input$prop < 0.5 & input$prop > 0.2) {
paste("Using ADDIS on a dataset > 100,000 may be too slow. Using onlineFDR::ADDIS() is recommended. ")
}
})
}
| /server.R | no_license | latlio/onlineFDRstream | R | false | false | 5,713 | r | ################################################################################
# Server logic of the Shiny app
#
# Author: Lathan Liou
# Created: Fri Sep 18 09:57:20 2020 ------------------------------
################################################################################
source("src/server-mods.R")
# 1. Shiny ----
library(shiny)
library(shinyWidgets) #custom widgets, allows for shinydashboard elements
library(shinycssloaders) #custom loading icons
library(shinyjs) #improved user exp
library(shinyBS) #custom widgets
library(bsplus)
# library(shinyalert)
library(shinyFeedback) #for user feedback messages
library(tippy) #for hovers
# library(highcharter) #for animated plots
library(plotly)
library(waiter) #for loading screen
library(sever) #for waiting screen
library(knitr)
library(shinydashboard)
library(shinydashboardPlus)
# library(shinyanimate)
# 2. Data Manipulation
library(tidyverse)
library(dplyr)
library(lubridate)
# library(reactable)
#make sure github dev version is installed
# devtools::install_github("https://github.com/dsrobertson/onlineFDR")
library(onlineFDR)
#for alg recommendation feature
demodata <- read_csv("powerFDRdata.csv") %>%
mutate(pi.vec = round(pi.vec, 2))
#for hover functionality
with_tooltip <- function(value, tooltip, ...) {
div(style = "text-decoration: underline; text-decoration-style: dotted; cursor: help",
tippy(value, tooltip, ...))
}
`%!in%` = Negate(`%in%`)
server <- function(input, output, session) {
sever()
Sys.sleep(0.5)
waiter_hide()
#Load in data
in_data <- reactive({
req(input$file)
ext <- tools::file_ext(input$file$name)
shiny::validate(need(ext %in% c(
'text/csv',
'text/comma-separated-values',
'text/tab-separated-values',
'text/plain',
'csv',
'tsv'),
"Please upload a csv file!"))
data <- read_csv(input$file$datapath) %>%
dplyr::mutate(across(any_of("date"), ~as.Date(.x, format = "%m/%d/%y")))
})
#warning if wrong file type
observeEvent(input$file, {
ext <- tools::file_ext(input$file$name)
if (ext %!in% c(
'text/csv',
'text/comma-separated-values',
'text/tab-separated-values',
'text/plain',
'csv',
'tsv')) {
shiny::showNotification("Your file format is not supported. Please upload a CSV file!", type = "err",
duration = NULL)
}
})
#### LOND ####
LOND_result <- callModule(LONDServer, id = "inputLOND", data = in_data)
callModule(LONDcountServer, "LONDcount", LOND_result)
callModule(LONDplotServer, "LONDplot", LOND_result)
#### LORD ####
LORD_result <- callModule(LORDServer, id = "inputLORD", data = in_data)
callModule(LORDcountServer, "LORDcount", LORD_result)
callModule(LORDplotServer, "LORDplot", LORD_result)
# gray out inputs conditionally
shinyjs::onclick("advanced2",
shinyjs::toggle(id = "advanced2", anim = TRUE))
#### SAFFRON ####
SAFFRON_result <- callModule(SAFFRONServer, id = "inputSAFFRON", data = in_data)
callModule(SAFFRONcountServer, "SAFFRONcount", SAFFRON_result)
callModule(SAFFRONplotServer, "SAFFRONplot", SAFFRON_result)
#### ADDIS Sync ####
ADDIS_result <- callModule(ADDISServer, id = "inputADDIS", data = in_data)
callModule(ADDIScountServer, "ADDIScount", ADDIS_result)
callModule(ADDISplotServer, "ADDISplot", ADDIS_result)
#### Alpha-Investing ####
alphainvesting_result <- callModule(alphainvestingServer, id = "inputalphainvesting", data = in_data)
callModule(alphainvestingcountServer, "alphainvestcount", alphainvesting_result)
callModule(alphainvestingplotServer, "alphainvestplot", alphainvesting_result)
#### ADDIS Async ####
ADDISa_result <- callModule(ADDISServer, id = "inputADDISa", data = in_data)
callModule(ADDIScountServer, "ADDISacount", ADDIS_result)
callModule(ADDISplotServer, "ADDISaplot", ADDIS_result)
#### LONDstar ####
LONDSTAR_result <- callModule(LONDSTARServer, id = "inputLONDSTAR", data = in_data)
callModule(LONDSTARcountServer, "LONDSTARcount", LONDSTAR_result)
callModule(LONDSTARplotServer, "LONDSTARplot", LONDSTAR_result)
#### LORDstar ####
LORDSTAR_result <- callModule(LORDSTARServer, id = "inputLORDSTAR", data = in_data)
callModule(LORDSTARcountServer, "LORDSTARcount", LORDSTAR_result)
callModule(LORDSTARplotServer, "LORDSTARplot", LORDSTAR_result)
#### SAFFRONstar ####
SAFFRONSTAR_result <- callModule(SAFFRONSTARServer, id = "inputSAFFRONSTAR", data = in_data)
callModule(SAFFRONSTARcountServer, "SAFFRONSTARcount", SAFFRONSTAR_result)
callModule(SAFFRONSTARplotServer, "SAFFRONSTARplot", SAFFRONSTAR_result)
#### get started page ####
observe({
toggle(id = "novice", condition = input$checkbox)
})
filter_data <- reactive({
size = as.numeric(input$size)
boundstat = ifelse(input$bound == "Bounded", 1, 0)
out <- demodata %>%
filter(n == size,
bound == boundstat,
pi.vec == input$prop) %>%
select(-c(pi.vec, n, bound)) %>%
arrange(desc(power))
})
output$demores <- renderText({
paste(filter_data() %>%
head(1) %>%
pull(procedure), "has the highest power.")
})
# output$saffronwarn <- renderText({
# if(input$size == 1000 & input$prop > 0.5) {
# paste("Using SAFFRON may overestimate the FDR.")
# }
# })
output$addiswarn <- renderText({
if(input$size == 100 & input$prop == 0.4 |
input$size == 1000 & input$prop < 0.5 & input$prop > 0.2) {
paste("Using ADDIS on a dataset > 100,000 may be too slow. Using onlineFDR::ADDIS() is recommended. ")
}
})
}
|
calculate_percentile=function(data=mydata,
week_length=input$comet_map,
percentile_value=input$Centile_map,
disease=input$diseases
)
{
max_deb_sem= max(as.Date(data$deb_sem,origin="1970-01-01"))
max_code=paste0(year(max_deb_sem),"_",isoweek(max_deb_sem))
if (as.numeric(week_length)==4 )
{
week1 = Sys.Date()-1*7
week2 = Sys.Date()-2*7
week3 = Sys.Date()-3*7
week4 = Sys.Date()-4*7
week_range = c(week1,week2,week3,week4)
}
if (as.numeric(week_length)==3 )
{
week1 = Sys.Date()-1*7
week2 = Sys.Date()-2*7
week3 = Sys.Date()-3*7
week_range = c(week1,week2,week3)
}
if (as.numeric(week_length)==2 )
{
week1 = Sys.Date()-1*7
week2 = Sys.Date()-2*7
week_range =c(week1,week2)
}
if (as.numeric(week_length)==1)
{
week1 = Sys.Date()-1*7
week_range = week1
}
cat('retrieve epidemiological weeks corresponding to parameters...')
code_range = unique(unlist(lapply(week_range,function(i) paste0( year(i),"_",
ifelse(isoweek(i)<10,paste0("0",isoweek(i)),isoweek(i)) ) )))[1:week_length]
cat('DONE\n')
cat("code range defined by user encompasses:",code_range,"\n")
cat('calculate ',percentile_value,'-th percentile for all the data except for the current week...')
setkey(data,deb_sem)
if (max_code==paste0(year(Sys.Date()),"_",isoweek(Sys.Date())) ) {
#if max_date == current week then exclude this current week
#from calculation of alert , otherwise include
mypercentile=data[as.Date(deb_sem,origin="1970-01-01")<max_deb_sem,
quantile(occurence,probs=percentile_value/100,na.rm=T),by="sites"]
} else {
mypercentile=data[,quantile(occurence,probs=percentile_value/100,na.rm=T),by="sites"]
}
setnames(mypercentile,old="V1",new="n_percentile")
cat("DONE\n")
cat("merge",percentile_value,"-percentile with all the data (selected disease)...")
setkey(data,sites);setkey(mypercentile,sites)
data=merge(data,mypercentile,by.x="sites",by.y="sites")
cat("DONE\n")
cat("initialize alert by cleaning NA's values...","\n")
data[,occurence_cleaned:=ifelse(is.na(occurence)==T,0,occurence)]
cat("detection of alerts for all weeks during which selected disease cases exceed",percentile_value,"%...")
data[, alert:=ifelse(occurence_cleaned>n_percentile,1,0)]
cat("DONE\n")
#new algo for detecting alert:
data[,deb_sem:=as.Date(deb_sem,origin="1970-01-01")]
cat("reorder date of the week by decreasing order, by sites...")
setorder(data,sites,deb_sem)
cat("DONE\n")
#NEW ALGORITHM TO OUTPUT ALERT:
lg = function (x) c( NA, x[-length(x)])
data[,mylag1:=lg(occurence),by="sites"]
data[,mylag2:=lg(mylag1),by="sites"]
data[,mylag3:=lg(mylag2),by="sites"]
data[,alert_status4:=ifelse(occurence>=n_percentile & occurence>0 & mylag1>=n_percentile & mylag2>=n_percentile & mylag3>=n_percentile, "alert","normal")]
data[,alert_status3:=ifelse(occurence>=n_percentile & occurence>0 & mylag1>=n_percentile & mylag2>=n_percentile,"alert","normal")]
data[,alert_status2:=ifelse(occurence>=n_percentile & occurence>0 & mylag1>=n_percentile ,"alert","normal")]
data[,alert_status1:=ifelse(occurence>=n_percentile,"alert","normal")]
#chosen week length to be renamed:
setnames(data,paste0("alert_status",week_length),"alert_status")
#END OF NEW ALGORITHM
cat('recode alert_status into NA for those without data...')
data[is.na(occurence)==T,alert_status:=NA]
cat('DONE\n')
cat('calculate radius per site for percentile algorithm alert...')
##################### new algo to determine radius of circle (outdated!)
# TODO: en fonction de valeur du percentile
# if no data then set radius to 5
# if alert then set radius to 15
# if not normal then 15*(weighted sum of # cases)
# l'idée c'est de différencier les sites selon nombre de cas selon les sites mais de se
#fixer un max size de 15
# data[,nbsite_alerte:=1.0]; data[,nbsite_normal:=1.0];
# data[,myradius:=1.0]
# data[alert_status=="alert", myradius:=15.0]
# data[alert_status=="normal",sum_occurence_week:=sum(occurence,na.rm=T),by="code"]
# data[alert_status=="normal", myradius:=15.0*occurence/sum_occurence_week,by="sites,code"]
# #set a minimum value if less than 2.5 in radius (for visibility purpose):
# data[alert_status=="normal" & is.na(myradius)==F, myradius:=ifelse(myradius<2.5,2.5,myradius),by="sites,code"]
#
# data[alert_status %in% NA | myradius %in% NA , myradius:=5.0]
cat('DONE\n')
cat("selected sites for:",code_range,"with alert status\n")
percentile_alerte=data[code %in% code_range,list(sites,code,alert_status,deb_sem)]
cat('DONE\n')
##################### new algo(juin 2016) to determine radius of circle
##################### radius of circle depends on percentile rank #######
if (disease=="Malaria") {
Malaria_rank=fread("percentile_rank/Malaria_rank.csv")
percentile_alerte=merge(percentile_alerte,
Malaria_rank[,list(code,sites,perc_rank)],
by.x=c("code","sites"),
by.y=c("code","sites"))
percentile_alerte[,myradius:=round(sqrt(perc_rank))+1]
percentile_alerte[,perc_rank:=NULL]
}
if (disease=="Diarrhea") {
Diarrhea_rank=fread("percentile_rank/Diarrhea_rank.csv")
percentile_alerte=merge(percentile_alerte,
Diarrhea_rank[,list(code,sites,perc_rank)],
by.x=c("code","sites"),
by.y=c("code","sites"))
percentile_alerte[,myradius:=round(sqrt(perc_rank))+1]
percentile_alerte[,perc_rank:=NULL]
}
if (disease=="ILI") {
ILI_rank=fread("percentile_rank/ILI_rank.csv")
percentile_alerte=merge(percentile_alerte,
ILI_rank[,list(code,sites,perc_rank)],
by.x=c("code","sites"),
by.y=c("code","sites"))
percentile_alerte[,myradius:=round(sqrt(perc_rank))+1]
percentile_alerte[,perc_rank:=NULL]
}
##############################################
if (max_code==paste0(year(Sys.Date()),"_",isoweek(Sys.Date())) ) {
#if max_date == current week then exclude this current week
#from calculation of alert , otherwise include
mycode=paste0(year(max_deb_sem-7),"_",isoweek(max_deb_sem-7))
percentile_alerte_currentweek=percentile_alerte[code==mycode,]
} else {
percentile_alerte_currentweek=percentile_alerte[code==max_code,]
}
cat("DONE\n")
# print(head(percentile_alerte));print(tail(percentile_alerte))
# print(max_code); print(paste0(year(Sys.Date()),"_",isoweek(Sys.Date())))
# print(percentile_alerte_currentweek);Sys.sleep(25)
cat("calculate weekly prop of sites in alert (all)...")
Nbsite_beyond=data[is.na(occurence)==F & alert_status=="alert",length(unique(sites)),by="code"]
setnames(Nbsite_beyond,"V1","eff_beyond")
Nbsite_withdata=data[is.na(occurence)==F,length(unique(sites)),by="code"]
setnames(Nbsite_withdata,"V1","eff_total")
propsite_alerte_percentile=merge(x=Nbsite_withdata,
y=Nbsite_beyond,
by.x="code",by.y="code",all.x=T)
propsite_alerte_percentile[,prop:=ifelse(is.na(eff_beyond/eff_total)==T,0.0,eff_beyond/eff_total)]
cat("DONE\n")
cat("merge with deb_sem and sites to reorder time series (all)...\n")
propsite_alerte_percentile=merge(propsite_alerte_percentile,
data[,list(code,deb_sem,sites,alert_status,East,
South,High_land,Fringe,excepted_East,
excepted_High_land)],
by.x="code",by.y="code")
rm(Nbsite_withdata);rm(Nbsite_beyond);gc()
cat('DONE\n')
#################new method to handle facies ###################################
list_facies= c("East","South","High_land","Fringe","excepted_East","excepted_High_land")
datalist_facies=list()
for ( f in list_facies)
{
cat("calculate weekly prop of sites in alert using percentile in ",f,"...")
Nbsite_beyond=data[is.na(occurence)==F & alert_status=="alert" & get(f)==1,length(unique(sites)),by=c("code",f)]
setnames(Nbsite_beyond,"V1","eff_beyond")
Nbsite_withdata=data[is.na(occurence)==F & get(f)==1,length(unique(sites)),by=c("code",f)]
setnames(Nbsite_withdata,"V1","eff_total")
myfacies=merge(x=Nbsite_withdata,
y=Nbsite_beyond,
by.x=c("code",f),
by.y=c("code",f),all.x=T)
myfacies[,prop:=ifelse(is.na(eff_beyond/eff_total)==T,0.0,eff_beyond/eff_total)]
datalist_facies[[f]]=myfacies
#append to a single and unique dataframe:
if ( f==list_facies[1])
{
propsite_alerte_percentile_byfacies= datalist_facies[[f]]
# merge with deb_sem:
propsite_alerte_percentile_byfacies=merge(propsite_alerte_percentile_byfacies,
data[get(f)==1,list(code,deb_sem)],
by.x="code",by.y="code")
propsite_alerte_percentile_byfacies[,f:=NULL,with=F]
propsite_alerte_percentile_byfacies[,eff_total:=NULL]
propsite_alerte_percentile_byfacies[,eff_beyond:=NULL]
propsite_alerte_percentile_byfacies[,facies:=f]
} else {
tmp= datalist_facies[[f]]
# merge with deb_sem:
tmp=merge(tmp,data[get(f)==1,list(code,deb_sem)],by.x="code",by.y="code")
tmp[,f:=NULL,with=F]
tmp[,eff_total:=NULL]
tmp[,eff_beyond:=NULL]
tmp[,facies:=f]
propsite_alerte_percentile_byfacies=rbind(propsite_alerte_percentile_byfacies,tmp)
rm(tmp);gc()
}
cat("DONE\n")
}
write.table(data,"PaluConfAlert.csv",sep=",",row.names = F)
return (list(percentile_alerte=percentile_alerte,
percentile_alerte_currentweek=percentile_alerte_currentweek,
propsite_alerte_percentile=propsite_alerte_percentile,
propsite_alerte_percentile_byfacies=propsite_alerte_percentile_byfacies,
mydata=data
))
}
| /algorithms/percentile.R | no_license | RanaivosonHerimanitra/Sentinel | R | false | false | 10,742 | r | calculate_percentile=function(data=mydata,
week_length=input$comet_map,
percentile_value=input$Centile_map,
disease=input$diseases
)
{
max_deb_sem= max(as.Date(data$deb_sem,origin="1970-01-01"))
max_code=paste0(year(max_deb_sem),"_",isoweek(max_deb_sem))
if (as.numeric(week_length)==4 )
{
week1 = Sys.Date()-1*7
week2 = Sys.Date()-2*7
week3 = Sys.Date()-3*7
week4 = Sys.Date()-4*7
week_range = c(week1,week2,week3,week4)
}
if (as.numeric(week_length)==3 )
{
week1 = Sys.Date()-1*7
week2 = Sys.Date()-2*7
week3 = Sys.Date()-3*7
week_range = c(week1,week2,week3)
}
if (as.numeric(week_length)==2 )
{
week1 = Sys.Date()-1*7
week2 = Sys.Date()-2*7
week_range =c(week1,week2)
}
if (as.numeric(week_length)==1)
{
week1 = Sys.Date()-1*7
week_range = week1
}
cat('retrieve epidemiological weeks corresponding to parameters...')
code_range = unique(unlist(lapply(week_range,function(i) paste0( year(i),"_",
ifelse(isoweek(i)<10,paste0("0",isoweek(i)),isoweek(i)) ) )))[1:week_length]
cat('DONE\n')
cat("code range defined by user encompasses:",code_range,"\n")
cat('calculate ',percentile_value,'-th percentile for all the data except for the current week...')
setkey(data,deb_sem)
if (max_code==paste0(year(Sys.Date()),"_",isoweek(Sys.Date())) ) {
#if max_date == current week then exclude this current week
#from calculation of alert , otherwise include
mypercentile=data[as.Date(deb_sem,origin="1970-01-01")<max_deb_sem,
quantile(occurence,probs=percentile_value/100,na.rm=T),by="sites"]
} else {
mypercentile=data[,quantile(occurence,probs=percentile_value/100,na.rm=T),by="sites"]
}
setnames(mypercentile,old="V1",new="n_percentile")
cat("DONE\n")
cat("merge",percentile_value,"-percentile with all the data (selected disease)...")
setkey(data,sites);setkey(mypercentile,sites)
data=merge(data,mypercentile,by.x="sites",by.y="sites")
cat("DONE\n")
cat("initialize alert by cleaning NA's values...","\n")
data[,occurence_cleaned:=ifelse(is.na(occurence)==T,0,occurence)]
cat("detection of alerts for all weeks during which selected disease cases exceed",percentile_value,"%...")
data[, alert:=ifelse(occurence_cleaned>n_percentile,1,0)]
cat("DONE\n")
#new algo for detecting alert:
data[,deb_sem:=as.Date(deb_sem,origin="1970-01-01")]
cat("reorder date of the week by decreasing order, by sites...")
setorder(data,sites,deb_sem)
cat("DONE\n")
#NEW ALGORITHM TO OUTPUT ALERT:
lg = function (x) c( NA, x[-length(x)])
data[,mylag1:=lg(occurence),by="sites"]
data[,mylag2:=lg(mylag1),by="sites"]
data[,mylag3:=lg(mylag2),by="sites"]
data[,alert_status4:=ifelse(occurence>=n_percentile & occurence>0 & mylag1>=n_percentile & mylag2>=n_percentile & mylag3>=n_percentile, "alert","normal")]
data[,alert_status3:=ifelse(occurence>=n_percentile & occurence>0 & mylag1>=n_percentile & mylag2>=n_percentile,"alert","normal")]
data[,alert_status2:=ifelse(occurence>=n_percentile & occurence>0 & mylag1>=n_percentile ,"alert","normal")]
data[,alert_status1:=ifelse(occurence>=n_percentile,"alert","normal")]
#chosen week length to be renamed:
setnames(data,paste0("alert_status",week_length),"alert_status")
#END OF NEW ALGORITHM
cat('recode alert_status into NA for those without data...')
data[is.na(occurence)==T,alert_status:=NA]
cat('DONE\n')
cat('calculate radius per site for percentile algorithm alert...')
##################### new algo to determine radius of circle (outdated!)
# TODO: en fonction de valeur du percentile
# if no data then set radius to 5
# if alert then set radius to 15
# if not normal then 15*(weighted sum of # cases)
# l'idée c'est de différencier les sites selon nombre de cas selon les sites mais de se
#fixer un max size de 15
# data[,nbsite_alerte:=1.0]; data[,nbsite_normal:=1.0];
# data[,myradius:=1.0]
# data[alert_status=="alert", myradius:=15.0]
# data[alert_status=="normal",sum_occurence_week:=sum(occurence,na.rm=T),by="code"]
# data[alert_status=="normal", myradius:=15.0*occurence/sum_occurence_week,by="sites,code"]
# #set a minimum value if less than 2.5 in radius (for visibility purpose):
# data[alert_status=="normal" & is.na(myradius)==F, myradius:=ifelse(myradius<2.5,2.5,myradius),by="sites,code"]
#
# data[alert_status %in% NA | myradius %in% NA , myradius:=5.0]
cat('DONE\n')
cat("selected sites for:",code_range,"with alert status\n")
percentile_alerte=data[code %in% code_range,list(sites,code,alert_status,deb_sem)]
cat('DONE\n')
##################### new algo(juin 2016) to determine radius of circle
##################### radius of circle depends on percentile rank #######
if (disease=="Malaria") {
Malaria_rank=fread("percentile_rank/Malaria_rank.csv")
percentile_alerte=merge(percentile_alerte,
Malaria_rank[,list(code,sites,perc_rank)],
by.x=c("code","sites"),
by.y=c("code","sites"))
percentile_alerte[,myradius:=round(sqrt(perc_rank))+1]
percentile_alerte[,perc_rank:=NULL]
}
if (disease=="Diarrhea") {
Diarrhea_rank=fread("percentile_rank/Diarrhea_rank.csv")
percentile_alerte=merge(percentile_alerte,
Diarrhea_rank[,list(code,sites,perc_rank)],
by.x=c("code","sites"),
by.y=c("code","sites"))
percentile_alerte[,myradius:=round(sqrt(perc_rank))+1]
percentile_alerte[,perc_rank:=NULL]
}
if (disease=="ILI") {
ILI_rank=fread("percentile_rank/ILI_rank.csv")
percentile_alerte=merge(percentile_alerte,
ILI_rank[,list(code,sites,perc_rank)],
by.x=c("code","sites"),
by.y=c("code","sites"))
percentile_alerte[,myradius:=round(sqrt(perc_rank))+1]
percentile_alerte[,perc_rank:=NULL]
}
##############################################
if (max_code==paste0(year(Sys.Date()),"_",isoweek(Sys.Date())) ) {
#if max_date == current week then exclude this current week
#from calculation of alert , otherwise include
mycode=paste0(year(max_deb_sem-7),"_",isoweek(max_deb_sem-7))
percentile_alerte_currentweek=percentile_alerte[code==mycode,]
} else {
percentile_alerte_currentweek=percentile_alerte[code==max_code,]
}
cat("DONE\n")
# print(head(percentile_alerte));print(tail(percentile_alerte))
# print(max_code); print(paste0(year(Sys.Date()),"_",isoweek(Sys.Date())))
# print(percentile_alerte_currentweek);Sys.sleep(25)
cat("calculate weekly prop of sites in alert (all)...")
Nbsite_beyond=data[is.na(occurence)==F & alert_status=="alert",length(unique(sites)),by="code"]
setnames(Nbsite_beyond,"V1","eff_beyond")
Nbsite_withdata=data[is.na(occurence)==F,length(unique(sites)),by="code"]
setnames(Nbsite_withdata,"V1","eff_total")
propsite_alerte_percentile=merge(x=Nbsite_withdata,
y=Nbsite_beyond,
by.x="code",by.y="code",all.x=T)
propsite_alerte_percentile[,prop:=ifelse(is.na(eff_beyond/eff_total)==T,0.0,eff_beyond/eff_total)]
cat("DONE\n")
cat("merge with deb_sem and sites to reorder time series (all)...\n")
propsite_alerte_percentile=merge(propsite_alerte_percentile,
data[,list(code,deb_sem,sites,alert_status,East,
South,High_land,Fringe,excepted_East,
excepted_High_land)],
by.x="code",by.y="code")
rm(Nbsite_withdata);rm(Nbsite_beyond);gc()
cat('DONE\n')
#################new method to handle facies ###################################
list_facies= c("East","South","High_land","Fringe","excepted_East","excepted_High_land")
datalist_facies=list()
for ( f in list_facies)
{
cat("calculate weekly prop of sites in alert using percentile in ",f,"...")
Nbsite_beyond=data[is.na(occurence)==F & alert_status=="alert" & get(f)==1,length(unique(sites)),by=c("code",f)]
setnames(Nbsite_beyond,"V1","eff_beyond")
Nbsite_withdata=data[is.na(occurence)==F & get(f)==1,length(unique(sites)),by=c("code",f)]
setnames(Nbsite_withdata,"V1","eff_total")
myfacies=merge(x=Nbsite_withdata,
y=Nbsite_beyond,
by.x=c("code",f),
by.y=c("code",f),all.x=T)
myfacies[,prop:=ifelse(is.na(eff_beyond/eff_total)==T,0.0,eff_beyond/eff_total)]
datalist_facies[[f]]=myfacies
#append to a single and unique dataframe:
if ( f==list_facies[1])
{
propsite_alerte_percentile_byfacies= datalist_facies[[f]]
# merge with deb_sem:
propsite_alerte_percentile_byfacies=merge(propsite_alerte_percentile_byfacies,
data[get(f)==1,list(code,deb_sem)],
by.x="code",by.y="code")
propsite_alerte_percentile_byfacies[,f:=NULL,with=F]
propsite_alerte_percentile_byfacies[,eff_total:=NULL]
propsite_alerte_percentile_byfacies[,eff_beyond:=NULL]
propsite_alerte_percentile_byfacies[,facies:=f]
} else {
tmp= datalist_facies[[f]]
# merge with deb_sem:
tmp=merge(tmp,data[get(f)==1,list(code,deb_sem)],by.x="code",by.y="code")
tmp[,f:=NULL,with=F]
tmp[,eff_total:=NULL]
tmp[,eff_beyond:=NULL]
tmp[,facies:=f]
propsite_alerte_percentile_byfacies=rbind(propsite_alerte_percentile_byfacies,tmp)
rm(tmp);gc()
}
cat("DONE\n")
}
write.table(data,"PaluConfAlert.csv",sep=",",row.names = F)
return (list(percentile_alerte=percentile_alerte,
percentile_alerte_currentweek=percentile_alerte_currentweek,
propsite_alerte_percentile=propsite_alerte_percentile,
propsite_alerte_percentile_byfacies=propsite_alerte_percentile_byfacies,
mydata=data
))
}
|
# ===========================================================================
#
# Differentlt expressed genes annotation application
#
# The code are written based on:
# empritical bayes and moderated t-test
#
# ===========================================================================
#
# Author: Ran D (413677671@qq.com)
#
# Program Features:
# Read and merged affy-metrix transcripts and do the quality control
# Normalized the DE transcipts (RMA)
# Identified DE transcripts by fitting a linear regression model (moderated t-test)
# Retrieved the visualized results
#
# ========================
source("http://bioconductor.org/biocLite.R")
biocLite()
library(GEOquery)
library(simpleaffy)
library(GenomicRanges)
library(oligoClasses)
library(oligo)
library(oligoData)
#Read File
#The normal human data accessories are ranged from GSM2350873 to GSM2352692(60 normal; 1760 SLe patients;)
celFiles <- list.celfiles('./newdata', full.names=TRUE)
rawData <- read.celfiles(celFiles)
geneCore <- rma(rawData, target = "core")
##This two normalization methods to caculating Expression
genePS <- rma(rawData, target = "probeset")
genePS.matrix = exprs(genePS)
geneCore.matrix = exprs(geneCore)
featureData(genePS) <- getNetAffx(genePS, "probeset")
featureData(geneCore) <- getNetAffx(geneCore, "transcript")
ph = rawData@phenoData
# Transcript Better for the research
#QC
#biocLite("RColorBrewer")
library(RColorBrewer)
cols <- brewer.pal(8, "Set1") #Set color panal
#Illustated box plots from the raw data befor been normalized
#biocLite("affyPLM")
library(affyPLM)
dir.create("outcome")
setwd("./outcome")
dir.create("QC")
setwd("QC")
svg(file="boxplot_genePS.svg",width=12,height=8)
boxplot(genePS, col=cols)
dev.off()
svg(file="boxplotgeneCore.svg",width=12,height=8)
boxplot(geneCore, col=cols)
dev.off()
#Illustrate intensity graphs
svg(file="hist_genePS.svg",width=12,height=8)
hist(genePS, col=cols)
dev.off()
svg(file="hist_geneCore.svg",width=12,height=8)
hist(geneCore, col=cols)
dev.off()
#GG plot
pmexp = pm(rawData)
sampleNames = vector()
#biocLite("ggplot2")
library(ggplot2)
logs = vector()
dir.create("RawIndensity")
setwd("./RawIndensity")
#Created microarray pictures
######################CHANGE###############################################################################
for (i in 1:240)
{
name = paste("sample",i,".svg",sep="")
svg(file=name,width=12,height=8)
image(rawData[,i],main=ph@data$index[i])
dev.off()
}
# Clustering Dendrogram
setwd("..")
svg(file="Clustering_Dendrogram_for_GenePS.svg",width=12,height=8)
eset <- exprs(genePS)
distance <- dist(t(eset),method="maximum")
clusters <- hclust(distance)
plot(clusters, cex.axis=0.25)
dev.off()
svg(file="Clustering_Dendrogram_for_GeneCore.svg",width=12,height=8)
eset <- exprs(geneCore)
distance <- dist(t(eset),method="maximum")
clusters <- hclust(distance)
plot(clusters, cex.axis=0.25)
dev.off()
# Get probe indensity from CEL files
#celfiles.qc <- fitPLM(genePS)
# Visualization
#image(celfiles.qc, which=1, add.legend=TRUE)
# The chip data may contain some artificial error
#image(celfiles.qc, which=4, add.legend=TRUE)
# Box plots are also available for the package affyPLM
# RLE (Relative Log Expression ) should be close to zero without artificial error
#RLE(getNetAffx(genePS, "probeset"), main="RLE")
# Most mideans for NUSE (Normalised Unscaled Standard Errors are supposed to be 1.
#NUSE(celfiles.qc, main="NUSE")
#Filtering features exhibiting little variation or low signal
#biocLite("pd.hta.2.0")
library(pd.hta.2.0)
# Load Main Transcript
load(paste0(path.package("pd.hta.2.0"), "/extdata/netaffxTranscript.rda"))
transcriptAnnot <- pData(netaffxTranscript)
transcriptAnnot <- transcriptAnnot[transcriptAnnot$category == 'main', ]
transcriptMain <- transcriptAnnot$transcriptclusterid
length(transcriptMain)
# 67528 Main Transcript
# Load Main Probeset
load(paste0(path.package("pd.hta.2.0"), "/extdata/netaffxProbeset.rda"))
probesetAnnot <- pData(netaffxProbeset)
probesetAnnot <- probesetAnnot[probesetAnnot$probesettype == 'main', ]
probesetMain <- probesetAnnot$probesetid
length(probesetMain)
# 911590 Main Probes
filtered <- nsFilter(genePS, require.entrez=FALSE, remove.dupEntrez=FALSE)
filtered <- nsFilter(geneCore, require.entrez=FALSE, remove.dupEntrez=FALSE)
#Show the features filtered by the two steps(exhibiting little viriation and low signal)
filtered$filter.log$numLowVar #Show the number of filtered features with low viriance
#
filtered$filter.log$feature.exclude #Show the number of filtered features with low signal
#
geneCore.matrix <- exprs(filtered$eset)
geneCore.matrix <- geneCore.matrix[rownames(geneCore.matrix) %in% transcriptMain,]
genePS.matrix <- genePS.matrix[rownames(genePS.matrix) %in% probesetMain,]
#DE Genes
library(limma)
######################CHANGE###############################################################################
#ph@data[ ,2] = c("normal","normal","normal","normal","normal","SLE","SLE","SLE","SLE","SLE")
ph@data[ ,2] = rep(c("normal","SLE"),c(60,180))
colnames(ph@data)[2]="source"
ph@data
groups = ph@data$source
f = factor(groups,levels=c("normal","SLE"))
design = model.matrix(~ 0 + f)
colnames(design) = c("normal","SLE")
fit <- lmFit(geneCore.matrix, design)
contrast.matrix <- makeContrasts(SLE_nolmal = SLE - normal, levels=design)
# The estimates of the fit for the first 10 probe sets
fit$coefficients[1:10,]
normal_fits <- contrasts.fit(fit, contrast.matrix)
normal_ebFit <- eBayes(normal_fits)
normal_ebFitDiverged <- eBayes(fit)
# The names of dataset
names(normal_ebFit)
normal_ebFit$coefficients[1:10,]
# The t-statistics and p-values of the moderated t-test for the first 10 probe sets
normal_ebFit$t[1:10,]
normal_ebFit$p.value[1:10,]
# Generating a Volcano plot
setwd("..")
dir.create("DEGenes")
setwd("./DEGenes")
name = "Volcano.svg"
svg(file=name,width=12,height=8)
volcanoplot(normal_ebFit,coef=1,highlight=10)
dev.off()
# Creating probe IDs of DE genes for functional analysis
options(digits=1)
tab <- topTable(normal_ebFit,coef=1,number=1000,adjust.method="fdr",lfc=1)
tab
# P-values (last column of tab called adj.P.Val) below a threshold (in this example the threshold is set at 0.001)
topgenes=tab
topups = topgenes[topgenes[, "logFC"] >= 1, ]
topups
topdowns = topgenes[topgenes[, "logFC"] <= -1, ]
topdowns
#DE down&low
IDs.up = rownames(topups)
IDs.down = rownames(topdowns)
DEresults = decideTests(normal_ebFit,method='global',adjust.method="BH",p.value=0.05,lfc=1)
DEresultsDiverged = decideTests(normal_ebFitDiverged,method='global',adjust.method="BH",p.value=0.05,lfc=1)
DEresults
DEresultsDiverged
save.image("myfile")
topdowns
topups
DE <- topTable(normal_ebFit, number=100000, coef=1,adjust.method="BH", lfc=5)
write.table(DE,row.names=TRUE,col.names=TRUE,quote=TRUE,file="./DEIDsinfo_ifc5.txt")
write.table(DE,row.names=FALSE,col.names=FALSE,quote=FALSE,file="./DEIDs.txt")
DE <- topTable(normal_ebFit, number=100000, coef=1,adjust.method="BH", lfc=1)
write.table(DE,row.names=TRUE,col.names=TRUE,quote=TRUE,file="./DEIDsinfo_ifc1.txt")
write.table(DE,row.names=FALSE,col.names=FALSE,quote=FALSE,file="./DEIDs_ifc1.txt")
DE <- topTable(normal_ebFit, number=100000, coef=1,adjust.method="BH", lfc=2)
write.table(DE,row.names=TRUE,col.names=TRUE,quote=TRUE,file="./DEIDsinfo_ifc2.txt")
write.table(DE,row.names=FALSE,col.names=FALSE,quote=FALSE,file="./DEIDs_ifc2.txt")
DE <- topTable(normal_ebFit, number=100000, coef=1,adjust.method="BH", lfc=4)
write.table(DE,row.names=TRUE,col.names=TRUE,quote=TRUE,file="./DEIDsinfo_ifc4.txt")
write.table(DE,row.names=FALSE,col.names=FALSE,quote=FALSE,file="./DEIDs_ifc4.txt")
DE <- topTable(normal_ebFit, number=100000, coef=1,adjust.method="BH", lfc=3)
write.table(DE,row.names=TRUE,col.names=TRUE,quote=TRUE,file="./DEIDsinfo_ifc3.txt")
write.table(DE,row.names=FALSE,col.names=FALSE,quote=FALSE,file="./DEIDs_ifc3.txt")
DE_ifc5 <- topTable(normal_ebFit, number=100000, coef=1, lfc=5)
nrow(topTable(normal_ebFit, coef=1, number=10000, lfc=5))
nrow(topTable(normal_ebFit, coef=1, number=10000, lfc=4))
nrow(topTable(normal_ebFit, coef=1, number=10000, lfc=3))
nrow(topTable(normal_ebFit, coef=1, number=10000, lfc=2))
nrow(topTable(normal_ebFit, coef=1, number=10000, lfc=1))
dim(topups)
dim(topdowns)
# Heat map
geneCore.matrix.up = geneCore.matrix[(rownames(topups)),]
geneCore.matrix.down = geneCore.matrix[(rownames(topdowns)),]
geneCore.matrix.up
geneCore.matrix.down
sampleNames = vector()
featureNames = vector()
heatlogs = vector()
###################CHANGE################################################################################################3
if (dim(topdowns)[1]!=0)
{
for (i in 1:dim(geneCore.matrix.down)[2])
{
sampleNames = c(sampleNames,rep(colnames(geneCore.matrix.down)[i],dim(topdowns)[1]))
featureNames = c(featureNames,rownames(geneCore.matrix.down[1:dim(topdowns)[1],]))
heatlogs = c(heatlogs,geneCore.matrix.down[1:dim(topdowns)[1],i])
}
heatData = data.frame(norm_logInt=heatlogs,sampleName=sampleNames,featureName=featureNames)
svg(file="Heat_Map_GeneCore_up.svg",width=12,height=8)
dataHeat = ggplot(heatData, aes(sampleName,featureName))
dataHeat + geom_tile(aes(fill=norm_logInt)) + scale_fill_gradient(low="green", high="red")
dev.off()
}
if(dim(topups)[1]!=0)
{
for (i in 1:dim(geneCore.matrix.up)[2])
{
sampleNames = c(sampleNames,rep(colnames(geneCore.matrix.up)[i],dim(topups)[1]))
featureNames = c(featureNames,rownames(geneCore.matrix.up[1:dim(topups)[1],]))
heatlogs = c(heatlogs,geneCore.matrix.up[1:dim(topups)[1],i])
}
heatData = data.frame(norm_logInt=heatlogs,sampleName=sampleNames,featureName=featureNames)
svg(file="Heat_Map_GeneCore_up.svg",width=12,height=8)
dataHeat = ggplot(heatData, aes(sampleName,featureName))
dataHeat + geom_tile(aes(fill=norm_logInt)) + scale_fill_gradient(low="green", high="red")
dev.off()
}
# Illustrate the venn plot for genes
svg(file="Venn_GeneCore_normal_SLE.svg",width=12,height=8)
vennDiagram(DEresults)
dev.off()
svg(file="Venn_GeneCore_ori.svg",width=12,height=8)
vennDiagram(DEresultsDiverged)
dev.off()
| /DE-IDENTIFICATION.R | no_license | dr413677671/A-combanation-method-of-moderated-t-test-and-radom-forest-algorithm-in-micro-array | R | false | false | 10,111 | r | # ===========================================================================
#
# Differentlt expressed genes annotation application
#
# The code are written based on:
# empritical bayes and moderated t-test
#
# ===========================================================================
#
# Author: Ran D (413677671@qq.com)
#
# Program Features:
# Read and merged affy-metrix transcripts and do the quality control
# Normalized the DE transcipts (RMA)
# Identified DE transcripts by fitting a linear regression model (moderated t-test)
# Retrieved the visualized results
#
# ========================
source("http://bioconductor.org/biocLite.R")
biocLite()
library(GEOquery)
library(simpleaffy)
library(GenomicRanges)
library(oligoClasses)
library(oligo)
library(oligoData)
#Read File
#The normal human data accessories are ranged from GSM2350873 to GSM2352692(60 normal; 1760 SLe patients;)
celFiles <- list.celfiles('./newdata', full.names=TRUE)
rawData <- read.celfiles(celFiles)
geneCore <- rma(rawData, target = "core")
##This two normalization methods to caculating Expression
genePS <- rma(rawData, target = "probeset")
genePS.matrix = exprs(genePS)
geneCore.matrix = exprs(geneCore)
featureData(genePS) <- getNetAffx(genePS, "probeset")
featureData(geneCore) <- getNetAffx(geneCore, "transcript")
ph = rawData@phenoData
# Transcript Better for the research
#QC
#biocLite("RColorBrewer")
library(RColorBrewer)
cols <- brewer.pal(8, "Set1") #Set color panal
#Illustated box plots from the raw data befor been normalized
#biocLite("affyPLM")
library(affyPLM)
dir.create("outcome")
setwd("./outcome")
dir.create("QC")
setwd("QC")
svg(file="boxplot_genePS.svg",width=12,height=8)
boxplot(genePS, col=cols)
dev.off()
svg(file="boxplotgeneCore.svg",width=12,height=8)
boxplot(geneCore, col=cols)
dev.off()
#Illustrate intensity graphs
svg(file="hist_genePS.svg",width=12,height=8)
hist(genePS, col=cols)
dev.off()
svg(file="hist_geneCore.svg",width=12,height=8)
hist(geneCore, col=cols)
dev.off()
#GG plot
pmexp = pm(rawData)
sampleNames = vector()
#biocLite("ggplot2")
library(ggplot2)
logs = vector()
dir.create("RawIndensity")
setwd("./RawIndensity")
#Created microarray pictures
######################CHANGE###############################################################################
for (i in 1:240)
{
name = paste("sample",i,".svg",sep="")
svg(file=name,width=12,height=8)
image(rawData[,i],main=ph@data$index[i])
dev.off()
}
# Clustering Dendrogram
setwd("..")
svg(file="Clustering_Dendrogram_for_GenePS.svg",width=12,height=8)
eset <- exprs(genePS)
distance <- dist(t(eset),method="maximum")
clusters <- hclust(distance)
plot(clusters, cex.axis=0.25)
dev.off()
svg(file="Clustering_Dendrogram_for_GeneCore.svg",width=12,height=8)
eset <- exprs(geneCore)
distance <- dist(t(eset),method="maximum")
clusters <- hclust(distance)
plot(clusters, cex.axis=0.25)
dev.off()
# Get probe indensity from CEL files
#celfiles.qc <- fitPLM(genePS)
# Visualization
#image(celfiles.qc, which=1, add.legend=TRUE)
# The chip data may contain some artificial error
#image(celfiles.qc, which=4, add.legend=TRUE)
# Box plots are also available for the package affyPLM
# RLE (Relative Log Expression ) should be close to zero without artificial error
#RLE(getNetAffx(genePS, "probeset"), main="RLE")
# Most mideans for NUSE (Normalised Unscaled Standard Errors are supposed to be 1.
#NUSE(celfiles.qc, main="NUSE")
#Filtering features exhibiting little variation or low signal
#biocLite("pd.hta.2.0")
library(pd.hta.2.0)
# Load Main Transcript
load(paste0(path.package("pd.hta.2.0"), "/extdata/netaffxTranscript.rda"))
transcriptAnnot <- pData(netaffxTranscript)
transcriptAnnot <- transcriptAnnot[transcriptAnnot$category == 'main', ]
transcriptMain <- transcriptAnnot$transcriptclusterid
length(transcriptMain)
# 67528 Main Transcript
# Load Main Probeset
load(paste0(path.package("pd.hta.2.0"), "/extdata/netaffxProbeset.rda"))
probesetAnnot <- pData(netaffxProbeset)
probesetAnnot <- probesetAnnot[probesetAnnot$probesettype == 'main', ]
probesetMain <- probesetAnnot$probesetid
length(probesetMain)
# 911590 Main Probes
filtered <- nsFilter(genePS, require.entrez=FALSE, remove.dupEntrez=FALSE)
filtered <- nsFilter(geneCore, require.entrez=FALSE, remove.dupEntrez=FALSE)
#Show the features filtered by the two steps(exhibiting little viriation and low signal)
filtered$filter.log$numLowVar #Show the number of filtered features with low viriance
#
filtered$filter.log$feature.exclude #Show the number of filtered features with low signal
#
geneCore.matrix <- exprs(filtered$eset)
geneCore.matrix <- geneCore.matrix[rownames(geneCore.matrix) %in% transcriptMain,]
genePS.matrix <- genePS.matrix[rownames(genePS.matrix) %in% probesetMain,]
#DE Genes
library(limma)
######################CHANGE###############################################################################
#ph@data[ ,2] = c("normal","normal","normal","normal","normal","SLE","SLE","SLE","SLE","SLE")
ph@data[ ,2] = rep(c("normal","SLE"),c(60,180))
colnames(ph@data)[2]="source"
ph@data
groups = ph@data$source
f = factor(groups,levels=c("normal","SLE"))
design = model.matrix(~ 0 + f)
colnames(design) = c("normal","SLE")
fit <- lmFit(geneCore.matrix, design)
contrast.matrix <- makeContrasts(SLE_nolmal = SLE - normal, levels=design)
# The estimates of the fit for the first 10 probe sets
fit$coefficients[1:10,]
normal_fits <- contrasts.fit(fit, contrast.matrix)
normal_ebFit <- eBayes(normal_fits)
normal_ebFitDiverged <- eBayes(fit)
# The names of dataset
names(normal_ebFit)
normal_ebFit$coefficients[1:10,]
# The t-statistics and p-values of the moderated t-test for the first 10 probe sets
normal_ebFit$t[1:10,]
normal_ebFit$p.value[1:10,]
# Generating a Volcano plot
setwd("..")
dir.create("DEGenes")
setwd("./DEGenes")
name = "Volcano.svg"
svg(file=name,width=12,height=8)
volcanoplot(normal_ebFit,coef=1,highlight=10)
dev.off()
# Creating probe IDs of DE genes for functional analysis
options(digits=1)
tab <- topTable(normal_ebFit,coef=1,number=1000,adjust.method="fdr",lfc=1)
tab
# P-values (last column of tab called adj.P.Val) below a threshold (in this example the threshold is set at 0.001)
topgenes=tab
topups = topgenes[topgenes[, "logFC"] >= 1, ]
topups
topdowns = topgenes[topgenes[, "logFC"] <= -1, ]
topdowns
#DE down&low
IDs.up = rownames(topups)
IDs.down = rownames(topdowns)
DEresults = decideTests(normal_ebFit,method='global',adjust.method="BH",p.value=0.05,lfc=1)
DEresultsDiverged = decideTests(normal_ebFitDiverged,method='global',adjust.method="BH",p.value=0.05,lfc=1)
DEresults
DEresultsDiverged
save.image("myfile")
topdowns
topups
DE <- topTable(normal_ebFit, number=100000, coef=1,adjust.method="BH", lfc=5)
write.table(DE,row.names=TRUE,col.names=TRUE,quote=TRUE,file="./DEIDsinfo_ifc5.txt")
write.table(DE,row.names=FALSE,col.names=FALSE,quote=FALSE,file="./DEIDs.txt")
DE <- topTable(normal_ebFit, number=100000, coef=1,adjust.method="BH", lfc=1)
write.table(DE,row.names=TRUE,col.names=TRUE,quote=TRUE,file="./DEIDsinfo_ifc1.txt")
write.table(DE,row.names=FALSE,col.names=FALSE,quote=FALSE,file="./DEIDs_ifc1.txt")
DE <- topTable(normal_ebFit, number=100000, coef=1,adjust.method="BH", lfc=2)
write.table(DE,row.names=TRUE,col.names=TRUE,quote=TRUE,file="./DEIDsinfo_ifc2.txt")
write.table(DE,row.names=FALSE,col.names=FALSE,quote=FALSE,file="./DEIDs_ifc2.txt")
DE <- topTable(normal_ebFit, number=100000, coef=1,adjust.method="BH", lfc=4)
write.table(DE,row.names=TRUE,col.names=TRUE,quote=TRUE,file="./DEIDsinfo_ifc4.txt")
write.table(DE,row.names=FALSE,col.names=FALSE,quote=FALSE,file="./DEIDs_ifc4.txt")
DE <- topTable(normal_ebFit, number=100000, coef=1,adjust.method="BH", lfc=3)
write.table(DE,row.names=TRUE,col.names=TRUE,quote=TRUE,file="./DEIDsinfo_ifc3.txt")
write.table(DE,row.names=FALSE,col.names=FALSE,quote=FALSE,file="./DEIDs_ifc3.txt")
DE_ifc5 <- topTable(normal_ebFit, number=100000, coef=1, lfc=5)
nrow(topTable(normal_ebFit, coef=1, number=10000, lfc=5))
nrow(topTable(normal_ebFit, coef=1, number=10000, lfc=4))
nrow(topTable(normal_ebFit, coef=1, number=10000, lfc=3))
nrow(topTable(normal_ebFit, coef=1, number=10000, lfc=2))
nrow(topTable(normal_ebFit, coef=1, number=10000, lfc=1))
dim(topups)
dim(topdowns)
# Heat map
geneCore.matrix.up = geneCore.matrix[(rownames(topups)),]
geneCore.matrix.down = geneCore.matrix[(rownames(topdowns)),]
geneCore.matrix.up
geneCore.matrix.down
sampleNames = vector()
featureNames = vector()
heatlogs = vector()
###################CHANGE################################################################################################3
if (dim(topdowns)[1]!=0)
{
for (i in 1:dim(geneCore.matrix.down)[2])
{
sampleNames = c(sampleNames,rep(colnames(geneCore.matrix.down)[i],dim(topdowns)[1]))
featureNames = c(featureNames,rownames(geneCore.matrix.down[1:dim(topdowns)[1],]))
heatlogs = c(heatlogs,geneCore.matrix.down[1:dim(topdowns)[1],i])
}
heatData = data.frame(norm_logInt=heatlogs,sampleName=sampleNames,featureName=featureNames)
svg(file="Heat_Map_GeneCore_up.svg",width=12,height=8)
dataHeat = ggplot(heatData, aes(sampleName,featureName))
dataHeat + geom_tile(aes(fill=norm_logInt)) + scale_fill_gradient(low="green", high="red")
dev.off()
}
if(dim(topups)[1]!=0)
{
for (i in 1:dim(geneCore.matrix.up)[2])
{
sampleNames = c(sampleNames,rep(colnames(geneCore.matrix.up)[i],dim(topups)[1]))
featureNames = c(featureNames,rownames(geneCore.matrix.up[1:dim(topups)[1],]))
heatlogs = c(heatlogs,geneCore.matrix.up[1:dim(topups)[1],i])
}
heatData = data.frame(norm_logInt=heatlogs,sampleName=sampleNames,featureName=featureNames)
svg(file="Heat_Map_GeneCore_up.svg",width=12,height=8)
dataHeat = ggplot(heatData, aes(sampleName,featureName))
dataHeat + geom_tile(aes(fill=norm_logInt)) + scale_fill_gradient(low="green", high="red")
dev.off()
}
# Illustrate the venn plot for genes
svg(file="Venn_GeneCore_normal_SLE.svg",width=12,height=8)
vennDiagram(DEresults)
dev.off()
svg(file="Venn_GeneCore_ori.svg",width=12,height=8)
vennDiagram(DEresultsDiverged)
dev.off()
|
LoadData <- function(filename)
{
library(mzR)
splitname <- strsplit(filename,"\\.")[[1]]
if(tolower(splitname[length(splitname)]) == "cdf")
{
msobj <- openMSfile(filename,backend="netCDF")
}else{
msobj <- openMSfile(filename)
}
peakInfo <- peaks(msobj)
headerInfo <- header(msobj)
whMS1 <- which(headerInfo$msLevel==1)
peakInfo <- peakInfo[whMS1]
peakInfo <- lapply(peakInfo, function(spectrum) {
keep <- spectrum[,2] > 1e-6
output <- as.data.frame(spectrum[keep,,drop = FALSE])
colnames(output) <- c('mz','intensity')
return(output)
})
peakNum <- unlist(lapply(peakInfo,nrow))
index <- unlist(lapply(peakNum,function(PN){1:PN}))
scans <- unlist(lapply(1:length(peakNum),function(s){rep(s,peakNum[s])}))
Mat <- cbind(scans,index, do.call(rbind,peakInfo))
scanTime <- headerInfo$retentionTime[whMS1]
close(msobj)
return(list(Mat=Mat,spectrum=peakInfo,times=scanTime))
}
locateROI <- function(spectrum,ref.scan,roi.scans,roi.mzs,missp){
roi.mat <- list()
roi.inds <- list()
roi.scans <- roi.scans[1]:roi.scans[2]
p <- match(ref.scan,roi.scans)
# left direction
b <- 0
for (s in p:1){
if (b>missp){break}
scan <- roi.scans[s]
inc <- findInterval(roi.mzs,spectrum[[scan]][,1])
if (inc[2]<=inc[1]){
b <- b+1
next}
inc <- (inc[1]+1):inc[2]
inc <- inc[spectrum[[scan]][inc,2]>0]
if (length(inc)<1) {
b <- b+1
next
} # check if used
scan.inds <- cbind(rep(scan,length(inc)),inc)
scan.mat <- spectrum[[scan]][inc,]
roi.mat[[s]] <- scan.mat
roi.inds[[s]] <- scan.inds
b <- 0
}
# right direction
b <- 0
for (s in p:length(roi.scans)){
if (b>missp){break}
scan <- roi.scans[s]
inc <- findInterval(roi.mzs,spectrum[[scan]][,1])
if (inc[2]<=inc[1]){
b <- b+1
next}
inc <- (inc[1]+1):inc[2]
inc <- inc[spectrum[[scan]][inc,2]>0]
if (length(inc)<1) {
b <- b+1
next
} # check if used
scan.inds <- cbind(rep(scan,length(inc)),inc)
scan.mat <- spectrum[[scan]][inc,]
roi.mat[[s]] <- scan.mat
roi.inds[[s]] <- scan.inds
b <- 0
}
roi.mat <- do.call(rbind,roi.mat)
roi.inds <- do.call(rbind,roi.inds)
roi.scans <- c(roi.inds[1,1],roi.inds[nrow(roi.inds),1])
return(list(roi.scans=roi.scans, roi.inds=roi.inds, roi.mat=roi.mat))
}
ionRefine <- function(PIC.scans,ref.scan,ref.inte,select.ind,select.mat,spectrum,fst){
PIC.intensi <- rep(0,length(PIC.scans))
start.point <- which(PIC.scans==ref.scan)
# right side
S1 <- S2 <- ref.inte
for (j in start.point:length(PIC.scans)){
scan <- PIC.scans[j]
# 2' exponential smoothing forecast
tt <- length(S1)
w <- fst
at <- 2*S1[tt]-S2[tt]
bt <- w/(1-w)*(S1[tt]-S2[tt])
fore.intensi <- max(0,at+bt)
this.scan <- which(select.ind[,1]==scan)
if (length(this.scan)>1){
intensi <- select.mat[this.scan,2]
p <- this.scan[which.min(abs(intensi-fore.intensi))]
PIC.intensi[j] <- select.mat[p,2]
spectrum[[select.ind[p,1]]][select.ind[p,2],2] <- 0
} else if(length(this.scan)==1){
p <- this.scan
PIC.intensi[j] <- select.mat[p,2]
spectrum[[select.ind[p,1]]][select.ind[p,2],2] <- 0
}else{
PIC.intensi[j] <- fore.intensi
}
# 2' exponential smoothing
S1 <- c(S1,w*PIC.intensi[j]+(1-w)*S1[tt])
S2 <- c(S2,w*S1[tt+1]+(1-w)*S2[tt])
}
# right side
S1 <- S2 <- ref.inte
for (j in start.point:1){
scan <- PIC.scans[j]
# 2' exponential smoothing forecast
tt <- length(S1)
w <- fst
at <- 2*S1[tt]-S2[tt]
bt <- w/(1-w)*(S1[tt]-S2[tt])
fore.intensi <- max(0,at+bt)
this.scan <- which(select.ind[,1]==scan)
if (length(this.scan)>1){
intensi <- select.mat[this.scan,2]
p <- this.scan[which.min(abs(intensi-fore.intensi))]
PIC.intensi[j] <- select.mat[p,2]
spectrum[[select.ind[p,1]]][select.ind[p,2],2] <- 0
} else if(length(this.scan)==1){
p <- this.scan
PIC.intensi[j] <- select.mat[p,2]
spectrum[[select.ind[p,1]]][select.ind[p,2],2] <- 0
}else{
PIC.intensi[j] <- fore.intensi
}
# 2' exponential smoothing
S1 <- c(S1,w*PIC.intensi[j]+(1-w)*S1[tt])
S2 <- c(S2,w*S1[tt+1]+(1-w)*S2[tt])
}
return(list(spectrum=spectrum,PIC.scans=PIC.scans,PIC.intensi=PIC.intensi))
}
getPIC = function(filename,roi_range=0.1,level=500,min_snr=3,peakwidth=c(5,60),fst=0.3,missp=5){
library(Ckmeans.1d.dp)
library(data.table)
# prepare output
PICs <- list()
Info <- NULL
# load data
data <- LoadData(filename)
mat <- data$Mat
rtlist <- data$times
spectrum <- data$spectrum
rm(data)
min_width <- round(peakwidth[1]/(rtlist[2]-rtlist[1]))
max_width <- round(peakwidth[2]/(rtlist[2]-rtlist[1])/2)
mzrange <- roi_range/2
# set seeds
mat <- mat[mat[,'intensity']>=level,]
mat <- mat[order(mat[,'intensity']),]
for (i in 1:nrow(mat)) {
ref.scan <- as.numeric(mat[i,'scans'])
ref.index <- as.numeric(mat[i,'index'])
ref.inte <- spectrum[[ref.scan]][ref.index,2]
ref.mz <- spectrum[[ref.scan]][ref.index,1]
if (length(ref.inte)<1){next}
if (ref.inte<level){next}
# set range of roi
roi.scans <- c(max(1,ref.scan-max_width),min(length(rtlist),ref.scan+max_width))
roi.mzs <- c(ref.mz-mzrange,ref.mz+mzrange)
roi.mat <- NULL
roi.inds <-NULL
# locate roi
roi <- locateROI(spectrum,ref.scan,roi.scans,roi.mzs,missp)
roi.scans <- roi$roi.scans
roi.inds <- roi$roi.inds
roi.mat <- roi$roi.mat
rm(roi)
# check roi length
if (roi.scans[2]-roi.scans[1]<min_width){
spectrum[[ref.scan]][ref.index,2] <- 0
next}
# calculate m/z difference
mzdiff <- (roi.mat[,1]-ref.mz)^2
# kmeans cluster
r_kmeans <- Ckmeans.1d.dp(mzdiff, k=c(1,5))
mincenter <- min(r_kmeans$centers)
tClu <- which(r_kmeans$centers==mincenter)
sel <- which(r_kmeans$cluster==tClu)
if (length(sel)<min_width){next}
select.mat <- roi.mat[sel,]
select.ind <- roi.inds[sel,]
# refine by exponential smoothing forecasting
PIC.scans <- roi.scans[1]:roi.scans[2]
pic <- ionRefine(PIC.scans,ref.scan,ref.inte,select.ind,select.mat,spectrum,fst)
spectrum <- pic$spectrum
PIC.scans <- pic$PIC.scans
PIC.intensi <- pic$PIC.intensi
rm(pic)
# peak detection
r_peak_detection <- peaks_detection(PIC.intensi,min_snr,level,missp)
mainPeak <- which.max(r_peak_detection$signal)
if (length(r_peak_detection$peakIndex)==0){
next
}
if (r_peak_detection$signals[mainPeak]<level){
next
}
# collect infomation of PIC
peak_rt <- rtlist[PIC.scans[r_peak_detection$peakIndex[mainPeak]]]
peak_snr <- r_peak_detection$snr[mainPeak]
peak_scale <- r_peak_detection$peakScale[mainPeak]
rtmin <- rtlist[PIC.scans[1]]
rtmax <- rtlist[PIC.scans[length(PIC.scans)]]
mz_rsd <- sd(select.mat[,1])/mean(select.mat[,1])*1000000
output <- c(ref.mz,
min(select.mat[,1]),
max(select.mat[,1]),
peak_rt,
rtmin,
rtmax,
max(PIC.intensi),
peak_snr,
peak_scale,
mz_rsd)
# output PIC.i
Info <- rbind(Info,output)
PIC.i <- cbind(rtlist[PIC.scans],PIC.intensi)
colnames(PIC.i) <- c('rt','intensity')
PICs <- c(PICs,list(PIC.i))
}
colnames(Info) <- c("mz","mzmin","mzmax","rt","rtmin","rtmax","maxo","snr","scale","rsd")
index <- 1:nrow(Info)
Info <- cbind(index,Info)
gc()
return(list(Info=Info,PICs=PICs,rt=rtlist))
}
| /R/getPIC.R | no_license | tentrillion/KPIC2 | R | false | false | 7,703 | r | LoadData <- function(filename)
{
library(mzR)
splitname <- strsplit(filename,"\\.")[[1]]
if(tolower(splitname[length(splitname)]) == "cdf")
{
msobj <- openMSfile(filename,backend="netCDF")
}else{
msobj <- openMSfile(filename)
}
peakInfo <- peaks(msobj)
headerInfo <- header(msobj)
whMS1 <- which(headerInfo$msLevel==1)
peakInfo <- peakInfo[whMS1]
peakInfo <- lapply(peakInfo, function(spectrum) {
keep <- spectrum[,2] > 1e-6
output <- as.data.frame(spectrum[keep,,drop = FALSE])
colnames(output) <- c('mz','intensity')
return(output)
})
peakNum <- unlist(lapply(peakInfo,nrow))
index <- unlist(lapply(peakNum,function(PN){1:PN}))
scans <- unlist(lapply(1:length(peakNum),function(s){rep(s,peakNum[s])}))
Mat <- cbind(scans,index, do.call(rbind,peakInfo))
scanTime <- headerInfo$retentionTime[whMS1]
close(msobj)
return(list(Mat=Mat,spectrum=peakInfo,times=scanTime))
}
locateROI <- function(spectrum,ref.scan,roi.scans,roi.mzs,missp){
roi.mat <- list()
roi.inds <- list()
roi.scans <- roi.scans[1]:roi.scans[2]
p <- match(ref.scan,roi.scans)
# left direction
b <- 0
for (s in p:1){
if (b>missp){break}
scan <- roi.scans[s]
inc <- findInterval(roi.mzs,spectrum[[scan]][,1])
if (inc[2]<=inc[1]){
b <- b+1
next}
inc <- (inc[1]+1):inc[2]
inc <- inc[spectrum[[scan]][inc,2]>0]
if (length(inc)<1) {
b <- b+1
next
} # check if used
scan.inds <- cbind(rep(scan,length(inc)),inc)
scan.mat <- spectrum[[scan]][inc,]
roi.mat[[s]] <- scan.mat
roi.inds[[s]] <- scan.inds
b <- 0
}
# right direction
b <- 0
for (s in p:length(roi.scans)){
if (b>missp){break}
scan <- roi.scans[s]
inc <- findInterval(roi.mzs,spectrum[[scan]][,1])
if (inc[2]<=inc[1]){
b <- b+1
next}
inc <- (inc[1]+1):inc[2]
inc <- inc[spectrum[[scan]][inc,2]>0]
if (length(inc)<1) {
b <- b+1
next
} # check if used
scan.inds <- cbind(rep(scan,length(inc)),inc)
scan.mat <- spectrum[[scan]][inc,]
roi.mat[[s]] <- scan.mat
roi.inds[[s]] <- scan.inds
b <- 0
}
roi.mat <- do.call(rbind,roi.mat)
roi.inds <- do.call(rbind,roi.inds)
roi.scans <- c(roi.inds[1,1],roi.inds[nrow(roi.inds),1])
return(list(roi.scans=roi.scans, roi.inds=roi.inds, roi.mat=roi.mat))
}
ionRefine <- function(PIC.scans,ref.scan,ref.inte,select.ind,select.mat,spectrum,fst){
PIC.intensi <- rep(0,length(PIC.scans))
start.point <- which(PIC.scans==ref.scan)
# right side
S1 <- S2 <- ref.inte
for (j in start.point:length(PIC.scans)){
scan <- PIC.scans[j]
# 2' exponential smoothing forecast
tt <- length(S1)
w <- fst
at <- 2*S1[tt]-S2[tt]
bt <- w/(1-w)*(S1[tt]-S2[tt])
fore.intensi <- max(0,at+bt)
this.scan <- which(select.ind[,1]==scan)
if (length(this.scan)>1){
intensi <- select.mat[this.scan,2]
p <- this.scan[which.min(abs(intensi-fore.intensi))]
PIC.intensi[j] <- select.mat[p,2]
spectrum[[select.ind[p,1]]][select.ind[p,2],2] <- 0
} else if(length(this.scan)==1){
p <- this.scan
PIC.intensi[j] <- select.mat[p,2]
spectrum[[select.ind[p,1]]][select.ind[p,2],2] <- 0
}else{
PIC.intensi[j] <- fore.intensi
}
# 2' exponential smoothing
S1 <- c(S1,w*PIC.intensi[j]+(1-w)*S1[tt])
S2 <- c(S2,w*S1[tt+1]+(1-w)*S2[tt])
}
# right side
S1 <- S2 <- ref.inte
for (j in start.point:1){
scan <- PIC.scans[j]
# 2' exponential smoothing forecast
tt <- length(S1)
w <- fst
at <- 2*S1[tt]-S2[tt]
bt <- w/(1-w)*(S1[tt]-S2[tt])
fore.intensi <- max(0,at+bt)
this.scan <- which(select.ind[,1]==scan)
if (length(this.scan)>1){
intensi <- select.mat[this.scan,2]
p <- this.scan[which.min(abs(intensi-fore.intensi))]
PIC.intensi[j] <- select.mat[p,2]
spectrum[[select.ind[p,1]]][select.ind[p,2],2] <- 0
} else if(length(this.scan)==1){
p <- this.scan
PIC.intensi[j] <- select.mat[p,2]
spectrum[[select.ind[p,1]]][select.ind[p,2],2] <- 0
}else{
PIC.intensi[j] <- fore.intensi
}
# 2' exponential smoothing
S1 <- c(S1,w*PIC.intensi[j]+(1-w)*S1[tt])
S2 <- c(S2,w*S1[tt+1]+(1-w)*S2[tt])
}
return(list(spectrum=spectrum,PIC.scans=PIC.scans,PIC.intensi=PIC.intensi))
}
getPIC = function(filename,roi_range=0.1,level=500,min_snr=3,peakwidth=c(5,60),fst=0.3,missp=5){
library(Ckmeans.1d.dp)
library(data.table)
# prepare output
PICs <- list()
Info <- NULL
# load data
data <- LoadData(filename)
mat <- data$Mat
rtlist <- data$times
spectrum <- data$spectrum
rm(data)
min_width <- round(peakwidth[1]/(rtlist[2]-rtlist[1]))
max_width <- round(peakwidth[2]/(rtlist[2]-rtlist[1])/2)
mzrange <- roi_range/2
# set seeds
mat <- mat[mat[,'intensity']>=level,]
mat <- mat[order(mat[,'intensity']),]
for (i in 1:nrow(mat)) {
ref.scan <- as.numeric(mat[i,'scans'])
ref.index <- as.numeric(mat[i,'index'])
ref.inte <- spectrum[[ref.scan]][ref.index,2]
ref.mz <- spectrum[[ref.scan]][ref.index,1]
if (length(ref.inte)<1){next}
if (ref.inte<level){next}
# set range of roi
roi.scans <- c(max(1,ref.scan-max_width),min(length(rtlist),ref.scan+max_width))
roi.mzs <- c(ref.mz-mzrange,ref.mz+mzrange)
roi.mat <- NULL
roi.inds <-NULL
# locate roi
roi <- locateROI(spectrum,ref.scan,roi.scans,roi.mzs,missp)
roi.scans <- roi$roi.scans
roi.inds <- roi$roi.inds
roi.mat <- roi$roi.mat
rm(roi)
# check roi length
if (roi.scans[2]-roi.scans[1]<min_width){
spectrum[[ref.scan]][ref.index,2] <- 0
next}
# calculate m/z difference
mzdiff <- (roi.mat[,1]-ref.mz)^2
# kmeans cluster
r_kmeans <- Ckmeans.1d.dp(mzdiff, k=c(1,5))
mincenter <- min(r_kmeans$centers)
tClu <- which(r_kmeans$centers==mincenter)
sel <- which(r_kmeans$cluster==tClu)
if (length(sel)<min_width){next}
select.mat <- roi.mat[sel,]
select.ind <- roi.inds[sel,]
# refine by exponential smoothing forecasting
PIC.scans <- roi.scans[1]:roi.scans[2]
pic <- ionRefine(PIC.scans,ref.scan,ref.inte,select.ind,select.mat,spectrum,fst)
spectrum <- pic$spectrum
PIC.scans <- pic$PIC.scans
PIC.intensi <- pic$PIC.intensi
rm(pic)
# peak detection
r_peak_detection <- peaks_detection(PIC.intensi,min_snr,level,missp)
mainPeak <- which.max(r_peak_detection$signal)
if (length(r_peak_detection$peakIndex)==0){
next
}
if (r_peak_detection$signals[mainPeak]<level){
next
}
# collect infomation of PIC
peak_rt <- rtlist[PIC.scans[r_peak_detection$peakIndex[mainPeak]]]
peak_snr <- r_peak_detection$snr[mainPeak]
peak_scale <- r_peak_detection$peakScale[mainPeak]
rtmin <- rtlist[PIC.scans[1]]
rtmax <- rtlist[PIC.scans[length(PIC.scans)]]
mz_rsd <- sd(select.mat[,1])/mean(select.mat[,1])*1000000
output <- c(ref.mz,
min(select.mat[,1]),
max(select.mat[,1]),
peak_rt,
rtmin,
rtmax,
max(PIC.intensi),
peak_snr,
peak_scale,
mz_rsd)
# output PIC.i
Info <- rbind(Info,output)
PIC.i <- cbind(rtlist[PIC.scans],PIC.intensi)
colnames(PIC.i) <- c('rt','intensity')
PICs <- c(PICs,list(PIC.i))
}
colnames(Info) <- c("mz","mzmin","mzmax","rt","rtmin","rtmax","maxo","snr","scale","rsd")
index <- 1:nrow(Info)
Info <- cbind(index,Info)
gc()
return(list(Info=Info,PICs=PICs,rt=rtlist))
}
|
# hist() Answers
# read in the safi data, then do the tasks below
safi <- read.csv("data/SAFI_clean.csv",
na = c("", "NULL", "NA"),
stringsAsFactors = FALSE) # for R version >= 4.0, stringsAsFactors = FALSE is the default
# Make a histogram with the hist() function of the years_liv variable in the safi data
hist(safi$years_liv)
# Change the number of bins in the histogram to 20
hist(safi$years_liv, breaks = 20)
# Fill in the bars with gray; remember you can use color names in quotes as one way to specify colors.
hist(safi$years_liv, breaks=20, col="gray")
# Change the title and axis labels of the plot to more reasonable values
hist(safi$years_liv, breaks=20, col="gray",
main="Distribution of Years Lived in House",
xlab="Years", ylab="Number of Respondents")
# Now make a histogram on your own of another variable in the safi dataset, and change the number of bins.
# Remember, histograms are for continuous variables.
# For categorical variables, you use a bar plot.
hist(safi$no_membrs, breaks=5)
# Bonus: barplot() is the function for a bar plot. But you need to count how many of each value exists first.
# You can do this with the table function:
table(safi$village)
barplot(table(safi$village))
| /exercises/part3/hist_answers.R | permissive | han-tun/r-online-2020 | R | false | false | 1,270 | r | # hist() Answers
# read in the safi data, then do the tasks below
safi <- read.csv("data/SAFI_clean.csv",
na = c("", "NULL", "NA"),
stringsAsFactors = FALSE) # for R version >= 4.0, stringsAsFactors = FALSE is the default
# Make a histogram with the hist() function of the years_liv variable in the safi data
hist(safi$years_liv)
# Change the number of bins in the histogram to 20
hist(safi$years_liv, breaks = 20)
# Fill in the bars with gray; remember you can use color names in quotes as one way to specify colors.
hist(safi$years_liv, breaks=20, col="gray")
# Change the title and axis labels of the plot to more reasonable values
hist(safi$years_liv, breaks=20, col="gray",
main="Distribution of Years Lived in House",
xlab="Years", ylab="Number of Respondents")
# Now make a histogram on your own of another variable in the safi dataset, and change the number of bins.
# Remember, histograms are for continuous variables.
# For categorical variables, you use a bar plot.
hist(safi$no_membrs, breaks=5)
# Bonus: barplot() is the function for a bar plot. But you need to count how many of each value exists first.
# You can do this with the table function:
table(safi$village)
barplot(table(safi$village))
|
# set barplot besides
par(mfrow= c(2,2))
# define frequence table for first barplot
freq<-table(giov$SITEC)
# first barplot
barplot(freq,main="Valutazione del livello di soddisfazione economica \n laureati e diplomati (ultimi 12 mesi)",
legend=TRUE,args.legend=list(cex=0.60,x="topright"),adj=0.5,
ylab="frequenze assolute",col=c("darkred", "red", "lightblue", "blue"),
cex.main=0.8, las=1,cex.names = 0.6,cex.axis=0.7,cex.lab=0.6, ylim=c(0,2500), horiz=FALSE)
# define frequence table for second barplot
freq1<-table(giov$SITE)
# second barplot
barplot(freq1,names.arg = c("molto peggiorata", "un pò peggiorata", "quasi inalterata", "un pò migliorata",
"molto migliorata") ,
main="Valutazione della situazione economica familiare \n rispetto all'anno precedente",
cex.main=0.8,las=2,ylab="frequenze assolute ",
legend.text=c("molto peggiorata", "un pò peggiorata", "quasi inalterata", "un pò migliorata","molto migliorata"),
args.legend=list(cex=0.60,x="topright"), cex.names = 0.65,col=c("darkred", "red", "yellow", "green", "darkgreen"),ylim=c(0,3500),cex.lab=0.6,cex.axis=0.7, horiz=FALSE)
# define frequence table for third barplot
freq2<-table(giov$RISEC)
# second barplot
barplot(freq2, names.arg = c("ass. insufficenti", "scarse", "adeguate", "ottime"),
main="Valutazione delle risorse \n economiche familiari (ultimi 12 mesi)",cex.main=0.8,
legend.text=c("assolutamente \n insufficenti","scarse","adeguate","ottime"),
args.legend=list(cex=0.50,x="topright"), ylab="frequenze assolute",col=c("darkred", "darkorange", "green", "darkgreen"), las=1.5,cex.names = 0.6,las=1,cex.axis=0.7,cex.lab=0.6, ylim=c(0,3500), horiz=FALSE)
# set piecharts besides
par(mfcol=c(1,2))
# first piechart
pct<-round(table(na.omit(giov_laur_ita$SITEC))/length(giov_laur_ita$FUTUASP)*100,2)
et<-paste("",pct,"%",sep=" ")
pie(pct,labels=et,
main="Valutazione del livello di soddisfazione \n economica laureati (ultimi 12 mesi)",
cex.main=0.8,
col=c("darkred", "red", "lightblue", "blue"))
legend(x=0.3,y=-0.8, legend=c("per niente", "poco", "abbastanza", "molto"),pch=22,border="grey", col=c("darkred", "red", "lightblue", "blue"),cex=0.6)
# second piechart
pct1<-round(table(na.omit(giov_dipl_ita$SITEC))/length(giov_dipl_ita$FUTUASP)*100,2)
et<-paste("",pct1,"%",sep=" ")
pie(pct1,labels=et,
main="Valutazione del livello di soddisfazione \n economica diplomati (ultimi 12 mesi)",
cex.main=0.8,
col=c("darkred", "red", "lightblue", "blue"))
| /3.Monovariate Analysis/b_economic_satisfaction.r | no_license | IacopoTesti/Data_Science_for_Sustainability | R | false | false | 2,571 | r | # set barplot besides
par(mfrow= c(2,2))
# define frequence table for first barplot
freq<-table(giov$SITEC)
# first barplot
barplot(freq,main="Valutazione del livello di soddisfazione economica \n laureati e diplomati (ultimi 12 mesi)",
legend=TRUE,args.legend=list(cex=0.60,x="topright"),adj=0.5,
ylab="frequenze assolute",col=c("darkred", "red", "lightblue", "blue"),
cex.main=0.8, las=1,cex.names = 0.6,cex.axis=0.7,cex.lab=0.6, ylim=c(0,2500), horiz=FALSE)
# define frequence table for second barplot
freq1<-table(giov$SITE)
# second barplot
barplot(freq1,names.arg = c("molto peggiorata", "un pò peggiorata", "quasi inalterata", "un pò migliorata",
"molto migliorata") ,
main="Valutazione della situazione economica familiare \n rispetto all'anno precedente",
cex.main=0.8,las=2,ylab="frequenze assolute ",
legend.text=c("molto peggiorata", "un pò peggiorata", "quasi inalterata", "un pò migliorata","molto migliorata"),
args.legend=list(cex=0.60,x="topright"), cex.names = 0.65,col=c("darkred", "red", "yellow", "green", "darkgreen"),ylim=c(0,3500),cex.lab=0.6,cex.axis=0.7, horiz=FALSE)
# define frequence table for third barplot
freq2<-table(giov$RISEC)
# second barplot
barplot(freq2, names.arg = c("ass. insufficenti", "scarse", "adeguate", "ottime"),
main="Valutazione delle risorse \n economiche familiari (ultimi 12 mesi)",cex.main=0.8,
legend.text=c("assolutamente \n insufficenti","scarse","adeguate","ottime"),
args.legend=list(cex=0.50,x="topright"), ylab="frequenze assolute",col=c("darkred", "darkorange", "green", "darkgreen"), las=1.5,cex.names = 0.6,las=1,cex.axis=0.7,cex.lab=0.6, ylim=c(0,3500), horiz=FALSE)
# set piecharts besides
par(mfcol=c(1,2))
# first piechart
pct<-round(table(na.omit(giov_laur_ita$SITEC))/length(giov_laur_ita$FUTUASP)*100,2)
et<-paste("",pct,"%",sep=" ")
pie(pct,labels=et,
main="Valutazione del livello di soddisfazione \n economica laureati (ultimi 12 mesi)",
cex.main=0.8,
col=c("darkred", "red", "lightblue", "blue"))
legend(x=0.3,y=-0.8, legend=c("per niente", "poco", "abbastanza", "molto"),pch=22,border="grey", col=c("darkred", "red", "lightblue", "blue"),cex=0.6)
# second piechart
pct1<-round(table(na.omit(giov_dipl_ita$SITEC))/length(giov_dipl_ita$FUTUASP)*100,2)
et<-paste("",pct1,"%",sep=" ")
pie(pct1,labels=et,
main="Valutazione del livello di soddisfazione \n economica diplomati (ultimi 12 mesi)",
cex.main=0.8,
col=c("darkred", "red", "lightblue", "blue"))
|
options(digits = 4)
model_results <- data.frame(
LASSO = min(lasso_caret_best$results$RMSE),
XGB = min(xgb_caret$results$RMSE),
XGB_LASSO = min(xgb_caret_lasso$results$RMSE),
RF = min(ranger_rf$results$RMSE),
RF_LASSO = min(ranger_rf_lasso$results$RMSE)
)
## FINDING THE HOLD OUT SET JUST TO SEE AN ACTUAL SUBMISSION SCORE NOT FOR TRAINING!!!
df_real <- read.csv('data/kc_house_data.csv')
df_real <- as.data.table(df_real[,c('id','price')])
df_real <- df_real %>%
group_by(id) %>%
summarise(price = mean(price))
df_test_id <- as.data.table(df_test[,'id'])
df_test_real <- merge(df_test_id, df_real, by.x = "V1", by.y = "id")
colnames(df_test_real) <- c('id','price')
## GATHERING PREDICTIONS
predictions_df <- as.data.frame(df_test_id$V1)
predictions_df <- cbind(predictions_df, prediction_lasso)
predictions_df <- cbind(predictions_df, prediction_rf)
predictions_df <- cbind(predictions_df, prediction_rf_lasso)
predictions_df <- cbind(predictions_df, prediction_xgb)
predictions_df <- cbind(predictions_df, prediction_xgb_lasso)
colnames(predictions_df) <- c('id','LASSO','RF','RF_LASSO','XGB','XGB_LASSO')
predictions_df <- merge(as.data.table(predictions_df),as.data.table(df_test_real), by.x='id',by.y='id')
# Average of Three
predictions_df$MEAN_XGB_LASSO_RF <- rowMeans(subset(predictions_df, select = c(XGB, LASSO, RF)), na.rm = TRUE)
# Weighted Average
weight <- c(2.5,1,0.5)
predictions_df$MEAN_XGB_LASSO_RF_weighted <- apply(subset(predictions_df, select = c(XGB, LASSO, RF)), 1, function(d) weighted.mean(d, weight, na.rm = TRUE))
## NEW METRICS
model_results_test <- data.frame(matrix(c(
RMSE(predictions_df$LASSO,predictions_df$price),
RMSE(predictions_df$RF,predictions_df$price),
RMSE(predictions_df$RF_LASSO,predictions_df$price),
RMSE(predictions_df$XGB,predictions_df$price),
RMSE(predictions_df$XGB_LASSO,predictions_df$price),
RMSE(predictions_df$MEAN_XGB_LASSO_RF,predictions_df$price),
RMSE(predictions_df$MEAN_XGB_LASSO_RF_weighted,predictions_df$price),
MAE(predictions_df$LASSO,predictions_df$price),
MAE(predictions_df$RF,predictions_df$price),
MAE(predictions_df$RF_LASSO,predictions_df$price),
MAE(predictions_df$XGB,predictions_df$price),
MAE(predictions_df$XGB_LASSO,predictions_df$price),
MAE(predictions_df$MEAN_XGB_LASSO_RF,predictions_df$price),
MAE(predictions_df$MEAN_XGB_LASSO_RF_weighted,predictions_df$price),
MAPE(predictions_df$LASSO,predictions_df$price),
MAPE(predictions_df$RF,predictions_df$price),
MAPE(predictions_df$RF_LASSO,predictions_df$price),
MAPE(predictions_df$XGB,predictions_df$price),
MAPE(predictions_df$XGB_LASSO,predictions_df$price),
MAPE(predictions_df$MEAN_XGB_LASSO_RF,predictions_df$price),
MAPE(predictions_df$MEAN_XGB_LASSO_RF_weighted,predictions_df$price)),
nrow = 7,
ncol = 3))
colnames(model_results_test) <- c('RMSE','MAE','MAPE')
rownames(model_results_test) <- c('LASSO','RF','RF_LASSO','XGB','XGB_LASSO','AVG_RF_XGB_LASSO','W_AVG_RF_XGB_LASSO')
write.csv(predictions_df[,c('id','MEAN_XGB_LASSO_RF')], 'submission_stavros_tsentemeidis.csv',row.names = F)
| /scripts/final_metrics.R | no_license | stsentemeidis/Machine_Learning_House_Prices_Seattle | R | false | false | 3,152 | r |
options(digits = 4)
model_results <- data.frame(
LASSO = min(lasso_caret_best$results$RMSE),
XGB = min(xgb_caret$results$RMSE),
XGB_LASSO = min(xgb_caret_lasso$results$RMSE),
RF = min(ranger_rf$results$RMSE),
RF_LASSO = min(ranger_rf_lasso$results$RMSE)
)
## FINDING THE HOLD OUT SET JUST TO SEE AN ACTUAL SUBMISSION SCORE NOT FOR TRAINING!!!
df_real <- read.csv('data/kc_house_data.csv')
df_real <- as.data.table(df_real[,c('id','price')])
df_real <- df_real %>%
group_by(id) %>%
summarise(price = mean(price))
df_test_id <- as.data.table(df_test[,'id'])
df_test_real <- merge(df_test_id, df_real, by.x = "V1", by.y = "id")
colnames(df_test_real) <- c('id','price')
## GATHERING PREDICTIONS
predictions_df <- as.data.frame(df_test_id$V1)
predictions_df <- cbind(predictions_df, prediction_lasso)
predictions_df <- cbind(predictions_df, prediction_rf)
predictions_df <- cbind(predictions_df, prediction_rf_lasso)
predictions_df <- cbind(predictions_df, prediction_xgb)
predictions_df <- cbind(predictions_df, prediction_xgb_lasso)
colnames(predictions_df) <- c('id','LASSO','RF','RF_LASSO','XGB','XGB_LASSO')
predictions_df <- merge(as.data.table(predictions_df),as.data.table(df_test_real), by.x='id',by.y='id')
# Average of Three
predictions_df$MEAN_XGB_LASSO_RF <- rowMeans(subset(predictions_df, select = c(XGB, LASSO, RF)), na.rm = TRUE)
# Weighted Average
weight <- c(2.5,1,0.5)
predictions_df$MEAN_XGB_LASSO_RF_weighted <- apply(subset(predictions_df, select = c(XGB, LASSO, RF)), 1, function(d) weighted.mean(d, weight, na.rm = TRUE))
## NEW METRICS
model_results_test <- data.frame(matrix(c(
RMSE(predictions_df$LASSO,predictions_df$price),
RMSE(predictions_df$RF,predictions_df$price),
RMSE(predictions_df$RF_LASSO,predictions_df$price),
RMSE(predictions_df$XGB,predictions_df$price),
RMSE(predictions_df$XGB_LASSO,predictions_df$price),
RMSE(predictions_df$MEAN_XGB_LASSO_RF,predictions_df$price),
RMSE(predictions_df$MEAN_XGB_LASSO_RF_weighted,predictions_df$price),
MAE(predictions_df$LASSO,predictions_df$price),
MAE(predictions_df$RF,predictions_df$price),
MAE(predictions_df$RF_LASSO,predictions_df$price),
MAE(predictions_df$XGB,predictions_df$price),
MAE(predictions_df$XGB_LASSO,predictions_df$price),
MAE(predictions_df$MEAN_XGB_LASSO_RF,predictions_df$price),
MAE(predictions_df$MEAN_XGB_LASSO_RF_weighted,predictions_df$price),
MAPE(predictions_df$LASSO,predictions_df$price),
MAPE(predictions_df$RF,predictions_df$price),
MAPE(predictions_df$RF_LASSO,predictions_df$price),
MAPE(predictions_df$XGB,predictions_df$price),
MAPE(predictions_df$XGB_LASSO,predictions_df$price),
MAPE(predictions_df$MEAN_XGB_LASSO_RF,predictions_df$price),
MAPE(predictions_df$MEAN_XGB_LASSO_RF_weighted,predictions_df$price)),
nrow = 7,
ncol = 3))
colnames(model_results_test) <- c('RMSE','MAE','MAPE')
rownames(model_results_test) <- c('LASSO','RF','RF_LASSO','XGB','XGB_LASSO','AVG_RF_XGB_LASSO','W_AVG_RF_XGB_LASSO')
write.csv(predictions_df[,c('id','MEAN_XGB_LASSO_RF')], 'submission_stavros_tsentemeidis.csv',row.names = F)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_runtiming_samples.R
\name{get_runtiming_samples}
\alias{get_runtiming_samples}
\title{Estimate parameters of hierarchical logistic regression using MCMC routine.}
\usage{
get_runtiming_samples(runtiming_data, file_dir, niter = 1e+05, ncores = 3)
}
\arguments{
\item{runtiming_data}{Output of \code{\link{prepare_runtiming_data}}.}
\item{niter}{The number of MCMC iterations. 100k will take roughly 6 minutes whereas 500k will take 30 minutes.}
\item{ncores}{The number of cores for parallel chains.}
}
\description{
The sonar species apportionment model uses species priors that are weakly informed by a logistic curve. To estimate these parameters, a hierarchical logistic regression model is used where the non-expanded visual counts for each day are used as input data. Regression coefficients are treated as hierarchically distributed. Since the timing of the chum and king runs for the Chena and Salcha rivers tend to be similar each year, the coefficient pairs (intercepts for both rivers, slopes for both rivers) are assumed to be MVN, allowing the inclusion of a correlation parameter. If one river's sonar season is compromised, this allows the borrowing of information from the other river.
}
\author{
Matt Tyres and Jordy Bernard.
}
| /man/get_runtiming_samples.Rd | no_license | jBernardADFG/ChenaSalchaTowerSonar | R | false | true | 1,329 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_runtiming_samples.R
\name{get_runtiming_samples}
\alias{get_runtiming_samples}
\title{Estimate parameters of hierarchical logistic regression using MCMC routine.}
\usage{
get_runtiming_samples(runtiming_data, file_dir, niter = 1e+05, ncores = 3)
}
\arguments{
\item{runtiming_data}{Output of \code{\link{prepare_runtiming_data}}.}
\item{niter}{The number of MCMC iterations. 100k will take roughly 6 minutes whereas 500k will take 30 minutes.}
\item{ncores}{The number of cores for parallel chains.}
}
\description{
The sonar species apportionment model uses species priors that are weakly informed by a logistic curve. To estimate these parameters, a hierarchical logistic regression model is used where the non-expanded visual counts for each day are used as input data. Regression coefficients are treated as hierarchically distributed. Since the timing of the chum and king runs for the Chena and Salcha rivers tend to be similar each year, the coefficient pairs (intercepts for both rivers, slopes for both rivers) are assumed to be MVN, allowing the inclusion of a correlation parameter. If one river's sonar season is compromised, this allows the borrowing of information from the other river.
}
\author{
Matt Tyres and Jordy Bernard.
}
|
# This is the server logic for a Shiny web application.
library(shiny)
library(dplyr)
library(ggplot2)
# load dataset from the current directory
UMLibrarySalarly2002_2014 <- read.csv("UMLibrarySalary2002_2014.csv")
shinyServer(function(input, output) {
output$salaryPlot <- renderPlot({
# calculating and plotting based on input$appointment_title from ui.R
salarydata <- UMLibrarySalarly2002_2014 %>%
filter(APPOINTMENT.TITLE == input$appointment_title) %>%
group_by(Year) %>%
mutate(count = n())
# draw the plot for trends of salary change for the selected appointment title
ggplot(data = salarydata, aes(x = as.factor(Year), y = APPT.ANNUAL.FTR)) +
geom_boxplot(aes(fill=count)) +
scale_fill_continuous(low = "#edf3f8", high = "#528cbc",
name = "Number of individuals" ) +
stat_summary(fun.y=mean, geom="point", shape=5, size=4) +
ggtitle("Trends of Salary Changes of the Selected Appointment Title\nUniversity of Michigan Library, Ann Arbor, MI 2002 - 2014")+
theme_bw() +
theme(plot.title = element_text(size = 14, face = "bold"),
axis.title.y = element_text(size = 14),
axis.title.x = element_text(size = 14)) +
scale_x_discrete("Year") +
scale_y_continuous("Median Salary ($)")
})
})
| /server.R | no_license | YeLibrarian/DataProduct_projectUMLibrarySalary | R | false | false | 1,474 | r |
# This is the server logic for a Shiny web application.
library(shiny)
library(dplyr)
library(ggplot2)
# load dataset from the current directory
UMLibrarySalarly2002_2014 <- read.csv("UMLibrarySalary2002_2014.csv")
shinyServer(function(input, output) {
output$salaryPlot <- renderPlot({
# calculating and plotting based on input$appointment_title from ui.R
salarydata <- UMLibrarySalarly2002_2014 %>%
filter(APPOINTMENT.TITLE == input$appointment_title) %>%
group_by(Year) %>%
mutate(count = n())
# draw the plot for trends of salary change for the selected appointment title
ggplot(data = salarydata, aes(x = as.factor(Year), y = APPT.ANNUAL.FTR)) +
geom_boxplot(aes(fill=count)) +
scale_fill_continuous(low = "#edf3f8", high = "#528cbc",
name = "Number of individuals" ) +
stat_summary(fun.y=mean, geom="point", shape=5, size=4) +
ggtitle("Trends of Salary Changes of the Selected Appointment Title\nUniversity of Michigan Library, Ann Arbor, MI 2002 - 2014")+
theme_bw() +
theme(plot.title = element_text(size = 14, face = "bold"),
axis.title.y = element_text(size = 14),
axis.title.x = element_text(size = 14)) +
scale_x_discrete("Year") +
scale_y_continuous("Median Salary ($)")
})
})
|
## CACHING AN INVERSE MATRIX
# Finalized 01/19/18
# For JHU Course 2, Week 2
## This function creates a special "matrix" object that can cache its inverse
makeCacheMatrix <- function(x = matrix()) {
i <- NULL #initialized as an object to be used later
set <- function(y) {
x <<- y # superassignment to the ENCLOSING environment (e.g. PARENT or GLOBAL)
i <<- NULL #clears any previous cache which could have been created before from makeCacheMatrix()
}
get <- function() x #value is retrieved from PARENT ENVIRONMENT, because x is not defined locally within get()
setinverse <- function(solve) i <<- solve #uses the solve function to calculate the inverse matrix
getinverse <- function() i #get!
list(set = set, get = get, #assigns each function as an element within a list(), returns to PARENT ENV
setinverse = setinverse, #name vectors to enable use of $ in cacheSolve() as an EXTRACTOR
getinverse = getinverse)
}
## This function computes the inverse of the special "matrix" returned by `makeCacheMatrix` above.
# If the inverse has already been calculated (and the matrix has not changed), then `cacheSolve` retrieves the inverse
# from the cache.
cacheSolve <- function(x, ...) {
i <- x$getinverse() #attemps to retrieve an inverse from the object passed in as the argument
if(!is.null(i)) { #checks to see if the result is NULL. If result is NOT NULL, we have a valid cache to return
message("getting cached data, I'll retrieve that for you right away!")
return(i)
}
data <- x$get() #if result IS NULL, we execute this piece of code. this line gets matrix from input object
i <- solve(data, ...) #calculates the solved inverse matrix
x$setinverse(i) #uses setinverse() on input object to set the inverse in the input object
i #returns value of the inverse to the parent env by printing the object
}
## UNIT TESTS (BASED ON BELOW LINK)
# https://asitarrives.wordpress.com/2014/10/18/understanding-lexical-scoping-in-r-great-guidance-for-community-ta-in-coursera/
bmatrix = makeCacheMatrix(matrix(c(2,3,4,5), nrow=2, ncol=2)) #Example input
bmatrix$get() # Returns original matrix
cacheSolve(bmatrix) # Computes, caches, and returns matrix inverse
bmatrix$getinverse() # Returns matrix inverse
cacheSolve(bmatrix) # Returns cached matrix inverse using previously computed
bmatrix$set(matrix(c(1,7,93,62), nrow=2, ncol=2)) # Modify existing matrix
cacheSolve(bmatrix) # Computes, caches, and returns new matrix inverse
bmatrix$get() # Returns matrix
bmatrix$getinverse() # Returns matrix inverse
## OTHER RESOURCES CONSULTED
# Great Guide - https://github.com/lgreski/datasciencectacontent/blob/master/markdown/rprog-breakingDownMakeVector.md
# Basic superassignment explanation - https://stat.ethz.ch/pipermail/r-help/2011-April/275905.html
| /cachematrix.R | no_license | adamgoren/ProgrammingAssignment2 | R | false | false | 2,896 | r | ## CACHING AN INVERSE MATRIX
# Finalized 01/19/18
# For JHU Course 2, Week 2
## This function creates a special "matrix" object that can cache its inverse
makeCacheMatrix <- function(x = matrix()) {
i <- NULL #initialized as an object to be used later
set <- function(y) {
x <<- y # superassignment to the ENCLOSING environment (e.g. PARENT or GLOBAL)
i <<- NULL #clears any previous cache which could have been created before from makeCacheMatrix()
}
get <- function() x #value is retrieved from PARENT ENVIRONMENT, because x is not defined locally within get()
setinverse <- function(solve) i <<- solve #uses the solve function to calculate the inverse matrix
getinverse <- function() i #get!
list(set = set, get = get, #assigns each function as an element within a list(), returns to PARENT ENV
setinverse = setinverse, #name vectors to enable use of $ in cacheSolve() as an EXTRACTOR
getinverse = getinverse)
}
## This function computes the inverse of the special "matrix" returned by `makeCacheMatrix` above.
# If the inverse has already been calculated (and the matrix has not changed), then `cacheSolve` retrieves the inverse
# from the cache.
cacheSolve <- function(x, ...) {
i <- x$getinverse() #attemps to retrieve an inverse from the object passed in as the argument
if(!is.null(i)) { #checks to see if the result is NULL. If result is NOT NULL, we have a valid cache to return
message("getting cached data, I'll retrieve that for you right away!")
return(i)
}
data <- x$get() #if result IS NULL, we execute this piece of code. this line gets matrix from input object
i <- solve(data, ...) #calculates the solved inverse matrix
x$setinverse(i) #uses setinverse() on input object to set the inverse in the input object
i #returns value of the inverse to the parent env by printing the object
}
## UNIT TESTS (BASED ON BELOW LINK)
# https://asitarrives.wordpress.com/2014/10/18/understanding-lexical-scoping-in-r-great-guidance-for-community-ta-in-coursera/
bmatrix = makeCacheMatrix(matrix(c(2,3,4,5), nrow=2, ncol=2)) #Example input
bmatrix$get() # Returns original matrix
cacheSolve(bmatrix) # Computes, caches, and returns matrix inverse
bmatrix$getinverse() # Returns matrix inverse
cacheSolve(bmatrix) # Returns cached matrix inverse using previously computed
bmatrix$set(matrix(c(1,7,93,62), nrow=2, ncol=2)) # Modify existing matrix
cacheSolve(bmatrix) # Computes, caches, and returns new matrix inverse
bmatrix$get() # Returns matrix
bmatrix$getinverse() # Returns matrix inverse
## OTHER RESOURCES CONSULTED
# Great Guide - https://github.com/lgreski/datasciencectacontent/blob/master/markdown/rprog-breakingDownMakeVector.md
# Basic superassignment explanation - https://stat.ethz.ch/pipermail/r-help/2011-April/275905.html
|
/getDiccionarioPaisesEn.R | no_license | ellibrorojo/percepcion_cine_audiencia_espanola | R | false | false | 337 | r | ||
#!/usr/bin/Rscript
library("RColorBrewer")
data <- read.table("datahgt.txt",sep=",")
datan <- read.table("datanohgt.txt",sep=",")
if(max(data[,82])>=max(datan[,82]))
MAX = max(data[,82]) else
MAX = max(datan[,82])
par(mfrow=c(1,2))
plot(datan[,1],datan[,82],col="red",main="Mean Code Distance Over Time",xlab="Time Steps",ylab="Mean Code Distance",ylim=c(0,MAX))
points(data[,1],data[,82],col="blue")
colors = c("blue","red")
names = c("HGT","No HGT")
legend("topright",names,text.col=colors,bty="n")
for(i in 2:81){
if(i == 2){
smallest = min(data[,i])
biggest = max(data[,i])
si = i
} else {
if(smallest > min(data[,i])){
smallest = min(data[,i])
si = i
}
if(biggest < max(data[,i]))
biggest = max(data[,i])
}
}
smallest
for(i in 2:81){
if(i == 2){
smallestn = min(datan[,i])
biggestn = max(datan[,i])
sin = i
} else {
if(smallestn > min(datan[,i])){
smallestn = min(data[,i])
sin = i
}
if(biggestn < max(datan[,i]))
biggestn = max(datan[,i])
}
}
smallestn
if(biggest < biggestn)
biggest = biggestn
if(smallest <= smallestn)
plot(data[,1],data[,si],type = "l",col="blue",main="Mean Amino Acid Distance Over Time",xlab="Time Steps",ylab="Mean Amino Acid Distance Between Codon Neighbors",ylim=c(smallest,biggest)) else
plot(datan[,1],datan[,sin],type = "l",col="red",main="Mean Amino Acid Distance Over Time",xlab="Time Steps",ylab="Mean Amino Acid Distance Between Codon Neighbors",ylim=c(smallestn,biggest))
for(i in 2:(si-1)){
lines(data[,1],data[,i],col="blue")
}
for(i in (si+1):81){
lines(data[,1],data[,i],col="blue")
}
for(i in 2:(sin-1)){
lines(datan[,1],datan[,i],col="red")
}
for(i in (sin+1):81){
lines(datan[,1],datan[,i],col="red")
}
legend("topright",names,text.col=colors,bty="n")
| /plots.R | no_license | acollins-hed/PDT-HGT | R | false | false | 1,981 | r | #!/usr/bin/Rscript
library("RColorBrewer")
data <- read.table("datahgt.txt",sep=",")
datan <- read.table("datanohgt.txt",sep=",")
if(max(data[,82])>=max(datan[,82]))
MAX = max(data[,82]) else
MAX = max(datan[,82])
par(mfrow=c(1,2))
plot(datan[,1],datan[,82],col="red",main="Mean Code Distance Over Time",xlab="Time Steps",ylab="Mean Code Distance",ylim=c(0,MAX))
points(data[,1],data[,82],col="blue")
colors = c("blue","red")
names = c("HGT","No HGT")
legend("topright",names,text.col=colors,bty="n")
for(i in 2:81){
if(i == 2){
smallest = min(data[,i])
biggest = max(data[,i])
si = i
} else {
if(smallest > min(data[,i])){
smallest = min(data[,i])
si = i
}
if(biggest < max(data[,i]))
biggest = max(data[,i])
}
}
smallest
for(i in 2:81){
if(i == 2){
smallestn = min(datan[,i])
biggestn = max(datan[,i])
sin = i
} else {
if(smallestn > min(datan[,i])){
smallestn = min(data[,i])
sin = i
}
if(biggestn < max(datan[,i]))
biggestn = max(datan[,i])
}
}
smallestn
if(biggest < biggestn)
biggest = biggestn
if(smallest <= smallestn)
plot(data[,1],data[,si],type = "l",col="blue",main="Mean Amino Acid Distance Over Time",xlab="Time Steps",ylab="Mean Amino Acid Distance Between Codon Neighbors",ylim=c(smallest,biggest)) else
plot(datan[,1],datan[,sin],type = "l",col="red",main="Mean Amino Acid Distance Over Time",xlab="Time Steps",ylab="Mean Amino Acid Distance Between Codon Neighbors",ylim=c(smallestn,biggest))
for(i in 2:(si-1)){
lines(data[,1],data[,i],col="blue")
}
for(i in (si+1):81){
lines(data[,1],data[,i],col="blue")
}
for(i in 2:(sin-1)){
lines(datan[,1],datan[,i],col="red")
}
for(i in (sin+1):81){
lines(datan[,1],datan[,i],col="red")
}
legend("topright",names,text.col=colors,bty="n")
|
library(shiny)
library(leaflet)
library(dplyr)
library(tidyr)
library(tidyverse)
df <- read.csv("top_10_vertival_4944.csv",header = TRUE,stringsAsFactors = FALSE)
| /app/global.R | no_license | Sapphirine/Product-Recommendation-Yelp | R | false | false | 175 | r |
library(shiny)
library(leaflet)
library(dplyr)
library(tidyr)
library(tidyverse)
df <- read.csv("top_10_vertival_4944.csv",header = TRUE,stringsAsFactors = FALSE)
|
#==========================================================================================#
#==========================================================================================#
# Leave these commands at the beginning. They will refresh the session. #
#------------------------------------------------------------------------------------------#
rm(list=ls())
graphics.off()
#==========================================================================================#
#==========================================================================================#
#==========================================================================================#
#==========================================================================================#
# Here is the user defined variable section. #
#------------------------------------------------------------------------------------------#
#----- Paths. -----------------------------------------------------------------------------#
here = "thispath" # Current directory.
there = "thatpath" # Directory where analyses/history are
srcdir = "thisrscpath" # Source directory.
outroot = "thisoutroot" # Directory for figures
#------------------------------------------------------------------------------------------#
#----- Time options. ----------------------------------------------------------------------#
monthbeg = thismontha # First month to use
yearbeg = thisyeara # First year to consider
yearend = thisyearz # Maximum year to consider
reload.data = TRUE # Should I reload partially loaded data?
sasmonth.short = c(2,5,8,11) # Months for SAS plots (short runs)
sasmonth.long = 5 # Months for SAS plots (long runs)
nyears.long = 15 # Runs longer than this are considered long runs.
n.density = 256 # Number of density points
#------------------------------------------------------------------------------------------#
#----- Name of the simulations. -----------------------------------------------------------#
myplaces = c("thispoly")
#------------------------------------------------------------------------------------------#
#----- Plot options. ----------------------------------------------------------------------#
outform = thisoutform # Formats for output file. Supported formats are:
# - "X11" - for printing on screen
# - "eps" - for postscript printing
# - "png" - for PNG printing
# - "pdf" - for PDF printing
depth = 96 # PNG resolution, in pixels per inch
paper = "letter" # Paper size, to define the plot shape
ptsz = 17 # Font size.
lwidth = 2.5 # Line width
plotgrid = TRUE # Should I plot the grid in the background?
sasfixlimits = FALSE # Use a fixed scale for size and age-structure
# plots? (FALSE will set a suitable scale for
# each plot)
fcgrid = TRUE # Include a grid on the filled contour plots?
ncolsfc = 80 # Target number of colours for filled contour.
hovgrid = TRUE # Include a grid on the Hovmoller plots?
legwhere = "topleft" # Where should I place the legend?
inset = 0.01 # Inset between legend and edge of plot region.
scalleg = 0.40 # Expand y limits by this relative amount to fit
# the legend
cex.main = 0.8 # Scale coefficient for the title
theta = 315. # Azimuth for perspective projection
phi = 30. # Vertical angle for perspective projection
ltheta = -210. # Azimuth angle for light
shade = 0.125 # Shade intensity
expz = 0.5 # Expansion factor for Z axis
cexmin = 0.5 # Minimum "head" size of the lollipop
cexmax = 3.0 # Maximum "head" size of the lollipop
ylnudge = 0.05 # Nudging factor for ylimit
ptype = "l" # Type of plot
ptyped = "p" # Type of plot
ptypeb = "o" # Type of plot
drought.mark = mydroughtmark # Put a background to highlight droughts?
drought.yeara = mydroughtyeara # First year that has drought
drought.yearz = mydroughtyearz # Last year that has drought
months.drought = mymonthsdrought # Months with drought
ibackground = mybackground # Background settings (check load_everything.r)
#------------------------------------------------------------------------------------------#
#------ Miscellaneous settings. -----------------------------------------------------------#
slz.min = -5.0 # The deepest depth that trees access water.
idbh.type = myidbhtype # Type of DBH class
# 1 -- Every 10 cm until 100cm; > 100cm
# 2 -- 0-10; 10-20; 20-35; 35-50; 50-70; > 70 (cm)
# 3 -- 0-10; 10-35; 35-55; > 55 (cm)
klight = myklight # Weighting factor for maximum carbon balance
corr.growth.storage = mycorrection # Correction factor to be applied to growth and
# storage respiration
#------------------------------------------------------------------------------------------#
#==========================================================================================#
#==========================================================================================#
#==========================================================================================#
#==========================================================================================#
#==========================================================================================#
#==========================================================================================#
# NO NEED TO CHANGE ANYTHING BEYOND THIS POINT UNLESS YOU ARE DEVELOPING THE CODE... #
#==========================================================================================#
#==========================================================================================#
#==========================================================================================#
#==========================================================================================#
#==========================================================================================#
#==========================================================================================#
#----- Loading some packages and scripts. -------------------------------------------------#
source(file.path(srcdir,"load.everything.r"))
#------------------------------------------------------------------------------------------#
#----- Set how many formats we must output. -----------------------------------------------#
outform = tolower(outform)
nout = length (outform)
#------------------------------------------------------------------------------------------#
#----- Avoid unecessary and extremely annoying beeps. -------------------------------------#
options(locatorBell=FALSE)
#------------------------------------------------------------------------------------------#
#----- Load observations. -----------------------------------------------------------------#
obsrfile = paste(srcdir,"LBA_MIP.v8.RData",sep="/")
load(file=obsrfile)
#------------------------------------------------------------------------------------------#
#----- Define plot window size ------------------------------------------------------------#
size = plotsize(proje=FALSE,paper=paper)
#------------------------------------------------------------------------------------------#
#---- Create the main output directory in case there is none. -----------------------------#
if (! file.exists(outroot)) dir.create(outroot)
#------------------------------------------------------------------------------------------#
#------------------------------------------------------------------------------------------#
# Big place loop starts here... #
#------------------------------------------------------------------------------------------#
for (place in myplaces){
#----- Retrieve default information about this place and set up some variables. --------#
thispoi = locations(where=place,here=there,yearbeg=yearbeg,yearend=yearend
,monthbeg=monthbeg)
inpref = thispoi$pathin
outmain = paste(outroot,place,sep="/")
outpref = paste(outmain,"monthly",sep="/")
lieu = thispoi$lieu
iata = thispoi$iata
suffix = thispoi$iata
yeara = thispoi$yeara
yearz = thispoi$yearz
meszz = thispoi$monz
#---------------------------------------------------------------------------------------#
#----- Create the directories in case they don't exist. --------------------------------#
if (! file.exists(outmain)) dir.create(outmain)
if (! file.exists(outpref)) dir.create(outpref)
#---------------------------------------------------------------------------------------#
#----- Decide how frequently the cohort-level variables should be saved. ---------------#
if (yearend - yearbeg + 1 <= nyears.long){
sasmonth = sasmonth.short
emean.line = TRUE
}else{
sasmonth = sasmonth.long
emean.line = FALSE
}#end if
#---------------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------------#
# Find the total number of months that can be loaded this time. #
#---------------------------------------------------------------------------------------#
ntimes = (yearz-yeara-1)*12+meszz+(12-monthbeg+1)
#---------------------------------------------------------------------------------------#
#----- Print a banner to entretain the user. -------------------------------------------#
cat(" + Post-processing output from ",lieu,"...","\n")
#---------------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------------#
# Make the RData file name, then we check whether we must read the files again #
# or use the stored RData. #
#---------------------------------------------------------------------------------------#
path.data = paste(here,place,"rdata_month",sep="/")
if (! file.exists(path.data)) dir.create(path.data)
ed22.rdata = paste(path.data,paste(place,"RData",sep="."),sep="/")
if (reload.data && file.exists(ed22.rdata)){
#----- Load the modelled dataset. ---------------------------------------------------#
cat(" - Loading previous session...","\n")
load(ed22.rdata)
tresume = datum$ntimes + 1
datum = update.monthly( new.ntimes = ntimes
, old.datum = datum
, montha = monthbeg
, yeara = yeara
, inpref = inpref
, slz.min = slz.min
)#end update.monthly
}else{
cat(" - Starting new session...","\n")
tresume = 1
datum = create.monthly( ntimes = ntimes
, montha = monthbeg
, yeara = yeara
, inpref = inpref
, slz.min = slz.min
)#end create.monthly
}#end if
#---------------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------------#
# Make a list with the time span of each drought so we can plot rectangles showing #
# the drought. #
#---------------------------------------------------------------------------------------#
drought = list()
year = drought.yeara
ndrought = length(months.drought)
n = 0
overyear = months.drought[1] > months.drought[ndrought]
for (year in seq(from=drought.yeara,to=drought.yearz-as.integer(overyear),by=1)){
n = n + 1
#----- Define the beginning and the end of the drought. -----------------------------#
month.start = months.drought[1]
month.end = 1 + (months.drought[ndrought] %% 12)
year.start = year
year.end = year + as.integer(month.end == 1) + 1
drought.whena = chron(dates=paste(month.start,1,year.start,sep="/"))
drought.whenz = chron(dates=paste(month.end ,1,year.end ,sep="/"))
drought[[n]] = c(drought.whena,drought.whenz)
}#end for
#----- ndrought becomes the number of blocks with drought. -----------------------------#
ndrought = length(drought)
#---------------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------------#
# Check whether we have anything to update. #
#---------------------------------------------------------------------------------------#
complete = tresume > ntimes
#---------------------------------------------------------------------------------------#
#----- Copy some dimensions to scalars. ------------------------------------------------#
nzg = datum$nzg
nzs = datum$nzs
ndcycle = datum$ndcycle
isoilflg = datum$isoilflg
slz = datum$slz
slxsand = datum$slxsand
slxclay = datum$slxclay
ntext = datum$ntext
soil.prop = datum$soil.prop
dslz = datum$dslz
soil.depth = datum$soil.depth
soil.dry = datum$soil.dry
soil.poro = datum$soil.poro
ka = datum$ka
kz = datum$kz
#---------------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------------#
# Loop over all times in case there is anything new to be read. #
#---------------------------------------------------------------------------------------#
if (! complete){
#------------------------------------------------------------------------------------#
# This function will read the files. #
#------------------------------------------------------------------------------------#
datum = read.q.files(datum=datum,ntimes=ntimes,tresume=tresume,sasmonth=sasmonth)
#------------------------------------------------------------------------------------#
#------ Save the data to the R object. ----------------------------------------------#
cat(" + Saving data to ",basename(ed22.rdata),"...","\n")
save(datum,file=ed22.rdata)
#------------------------------------------------------------------------------------#
}#end if (! complete)
#---------------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------------#
# Define a suitable scale for those time series that uses datum$tomonth... #
#---------------------------------------------------------------------------------------#
whenplot6 = pretty.time(datum$tomonth,n=6)
whenplot8 = pretty.time(datum$tomonth,n=8)
#---------------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------------#
# Define a suitable scale for diurnal cycle... #
#---------------------------------------------------------------------------------------#
thisday = seq(from=0,to=ndcycle,by=1) * 24 / ndcycle
uplot = list()
uplot$levels = c(0,4,8,12,16,20,24)
uplot$n = 7
uplot$scale = "hours"
uplot$padj = rep(0,times=uplot$n)
#---------------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------------#
# Define a suitable scale for soil profile layers... #
#---------------------------------------------------------------------------------------#
znice = -pretty.log(-slz,n=8)
znice = sort(c(znice,slz[1],slz[nzg]))
sel = znice >= slz[1] & znice <= slz[nzg]
znice = znice[sel]
zat = -log(-znice)
nznice = length(znice)
znice = sprintf("%.2f",znice)
#---------------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------------#
# Define a suitable scale for monthly means... #
#---------------------------------------------------------------------------------------#
montmont = seq(from=1,to=12,by=1)
mplot = list()
mplot$levels = montmont
mplot$labels = capwords(mon2mmm(montmont))
mplot$n = 12
mplot$scale = "months"
mplot$padj = rep(0,times=mplot$n)
#---------------------------------------------------------------------------------------#
#----- Make some shorter versions of some variables. -----------------------------------#
mfac = datum$month
emean = datum$emean
emsqu = datum$emsqu
qmean = datum$qmean
qmsqu = datum$qmsqu
szpft = datum$szpft
lu = datum$lu
patch = datum$patch
cohort = datum$cohort
#---------------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------------#
# Find the mean and standard deviation. For variables for which we did not track #
# the mean sum of squares, the standard deviation is just the standard deviation of the #
# means, otherwise we convert the mean sum of squares: #
# ____________________ _____________________________________ #
# / SUM_i[X_i - Xm]^2 / / SUM_i[X_i^2] \ 1 #
# sigma = \ / ------------------ = \ / | ------------ - Xm^2 | --------- #
# \/ N - 1 \/ \ N / 1 - 1/N #
# #
# srnonm1 is the square root of 1 / (1 - 1/N) #
# Find the standard deviation. #
#---------------------------------------------------------------------------------------#
cat (" - Finding the monthly means...","\n")
srnorm1 = sqrt(1./(1. - 1. / datum$montable))
srnorm1[!is.finite(srnorm1)] = 0.
mmean = list()
msdev = list()
for (vname in names(emean)){
if (vname %in% names(emsqu)){
has.emsqu = any(is.finite(emsqu[[vname]]))
}else{
has.emsqu = FALSE
}#end if
#------------------------------------------------------------------------------------#
# Soil variables are multi-dimensional. Use qapply. Otherwise, check whether #
# the mean sum of squares is available or not. #
#------------------------------------------------------------------------------------#
if (vname %in% c("soil.temp","soil.water","soil.mstpot","soil.extracted")){
mmean[[vname]] = qapply(X=emean[[vname]], INDEX=mfac, DIM=1, FUN=mean, na.rm=TRUE)
msdev[[vname]] = qapply(X=emean[[vname]], INDEX=mfac, DIM=1, FUN=sd , na.rm=TRUE)
}else if (has.emsqu){
mmean[[vname]] = tapply(X=emean[[vname]], INDEX=mfac, FUN=mean, na.rm=TRUE)
mmsqu = tapply(X=emsqu[[vname]], INDEX=mfac, FUN=mean, na.rm=TRUE)
msdev[[vname]] = sqrt ( mmsqu - mmean[[vname]]^ 2 ) * srnorm1
}else{
mmean[[vname]] = tapply(X=emean[[vname]], INDEX=mfac, FUN=mean, na.rm=TRUE)
msdev[[vname]] = tapply(X=emean[[vname]], INDEX=mfac, FUN=sd , na.rm=TRUE)
}#end if
#------------------------------------------------------------------------------------#
#----- Fix the bad data. ------------------------------------------------------------#
bad.mmean = ! is.finite(mmean[[vname]])
bad.msdev = ! is.finite(msdev[[vname]])
mmean[[vname]][bad.mmean] = NA
msdev[[vname]][bad.msdev] = 0.
#------------------------------------------------------------------------------------#
}#end for
#---------------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------------#
# Here we find the Mean diurnal cycle for each month, then compute the standard #
# deviation, similar to the monthly mean. #
#---------------------------------------------------------------------------------------#
cat (" - Aggregating the monthly mean of the diurnal cycle...","\n")
umean = list()
usdev = list()
for (vname in names(qmean)){
#------------------------------------------------------------------------------------#
# Soil variables are multi-dimensional. Use qapply. Otherwise, check whether #
# the mean sum of squares is available or not. #
#------------------------------------------------------------------------------------#
if (vname %in% names(qmsqu)){
umean[[vname]] = qapply(X=qmean[[vname]], INDEX=mfac, DIM=1,FUN=mean, na.rm=TRUE)
umsqu = qapply(X=qmsqu[[vname]], INDEX=mfac, DIM=1,FUN=mean, na.rm=TRUE)
usdev[[vname]] = sqrt ( umsqu - umean[[vname]]^ 2 ) * srnorm1
}else{
umean[[vname]] = qapply(X=qmean[[vname]], INDEX=mfac, DIM=1,FUN=mean, na.rm=TRUE)
usdev[[vname]] = qapply(X=qmean[[vname]], INDEX=mfac, DIM=1,FUN=sd , na.rm=TRUE)
}#end if
#------------------------------------------------------------------------------------#
#----- Fix the bad data. ------------------------------------------------------------#
bad.umean = ! is.finite(umean[[vname]])
bad.usdev = ! is.finite(usdev[[vname]])
umean[[vname]][bad.umean] = NA
usdev[[vname]][bad.usdev] = 0.
#------------------------------------------------------------------------------------#
}#end for
#---------------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------------#
# Estimate NPP and NEE standard deviation. #
#---------------------------------------------------------------------------------------#
usdev$nee = sqrt(usdev$cflxca^2 + usdev$cflxst^2 )
usdev$reco = sqrt(usdev$plant.resp^2 + usdev$het.resp^2 )
usdev$evap = sqrt(usdev$wflxgc^2 + usdev$wflxlc^2 + usdev$wflxwc^2 )
#---------------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------------#
# Remove all elements of the DBH/PFT class that do not have a single valid cohort #
# at any given time. #
#---------------------------------------------------------------------------------------#
empty = is.na(szpft$nplant) | szpft$nplant == 0
for (vname in names(szpft)) szpft[[vname]][empty] = NA
#---------------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------------#
# Replace the mortality and recruitment exponential rates by the "interests" rates. #
#---------------------------------------------------------------------------------------#
szpft$mort = 100. * (1.0 - exp(- szpft$mort ) )
szpft$dimort = 100. * (1.0 - exp(- szpft$dimort ) )
szpft$ncbmort = 100. * (1.0 - exp(- szpft$ncbmort ) )
szpft$recrpft = 100. * ( exp( szpft$recr ) - 1.0)
szpft$agb.mort = 100. * (1.0 - exp(- szpft$agb.mort ) )
szpft$agb.dimort = 100. * (1.0 - exp(- szpft$agb.dimort ) )
szpft$agb.ncbmort = 100. * (1.0 - exp(- szpft$agb.ncbmort ) )
szpft$agb.recrpft = 100. * ( exp( szpft$agb.recr ) - 1.0)
szpft$bsa.mort = 100. * (1.0 - exp(- szpft$bsa.mort ) )
szpft$bsa.dimort = 100. * (1.0 - exp(- szpft$bsa.dimort ) )
szpft$bsa.ncbmort = 100. * (1.0 - exp(- szpft$bsa.ncbmort ) )
szpft$bsa.recrpft = 100. * ( exp( szpft$bsa.recr ) - 1.0)
#---------------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------------#
# Find the patch density function for all patch-level data. #
#---------------------------------------------------------------------------------------#
cat (" - Finding the distribution function of patch properties...","\n")
patchpdf = list()
for (pp in 1:nplotpatch){
this = plotpatch[[pp]]
vname = this$vnam
col.scheme = get(this$col.scheme)(n=ncolsfc)
emean.area = patch$area
emean.vname = patch[[vname]]
mmean.area = tapply(X=emean.area ,INDEX=mfac,FUN=unlist)
mmean.vname = tapply(X=emean.vname,INDEX=mfac,FUN=unlist)
#----- Find the range for which we find the density function. -----------------------#
low.vname = min(unlist(emean.vname),na.rm=TRUE)
high.vname = max(unlist(emean.vname),na.rm=TRUE)
#------------------------------------------------------------------------------------#
#----- Find the density function for each time. -------------------------------------#
edfun.now = mapply( FUN = density.safe
, x = emean.vname
, weights = emean.area
, MoreArgs = list(n=n.density,from=low.vname,to=high.vname)
)#end mapply
mdfun.now = mapply( FUN = density.safe
, x = mmean.vname
, weights = mmean.area
, MoreArgs = list(n=n.density,from=low.vname,to=high.vname)
)#end mapply
#------------------------------------------------------------------------------------#
#----- Save the density function. ---------------------------------------------------#
edfun = list()
edfun$x = chron(datum$when)
edfun$y = seq(from=low.vname,to=high.vname,length.out=n.density)
edfun$z = t(sapply(X=edfun.now["y",],FUN=cbind))
#------------------------------------------------------------------------------------#
#----- Save the density function. ---------------------------------------------------#
mdfun = list()
mdfun$x = montmont
mdfun$y = seq(from=low.vname,to=high.vname,length.out=n.density)
mdfun$z = t(sapply(X=mdfun.now["y",],FUN=cbind))
#------------------------------------------------------------------------------------#
#----- Remove tiny values (even with log scale values can be very hard to see. ------#
bye = is.finite(edfun$z) & edfun$z < 1.e-6 * max(unlist(edfun$z),na.rm=TRUE)
edfun$z[bye] = NA
#------------------------------------------------------------------------------------#
#----- Remove tiny values (even with log scale values can be very hard to see. ------#
bye = is.finite(mdfun$z) & mdfun$z < 1.e-6 * max(unlist(mdfun$z),na.rm=TRUE)
mdfun$z[bye] = NA
#------------------------------------------------------------------------------------#
patchpdf[[vname]] = list(edensity=edfun,mdensity=mdfun)
}#end for
#---------------------------------------------------------------------------------------#
#----- Find which PFTs, land uses and transitions we need to consider ------------------#
pftave = apply( X = szpft$agb[,ndbh+1,]
, MARGIN = 2
, FUN = mean
, na.rm = TRUE
)#end apply
luave = apply( X = lu$agb
, MARGIN = 2
, FUN = mean
, na.rm = TRUE
)#end apply
distave = apply(X=lu$dist,MARGIN=c(2,3),FUN=mean)
selpft = is.finite(pftave ) & pftave > 0.
sellu = is.finite(luave ) & luave > 0.
seldist = is.finite(distave) & distave > 0.
n.selpft = sum(selpft )
n.sellu = sum(sellu )
n.seldist = sum(seldist)
#---------------------------------------------------------------------------------------#
#=======================================================================================#
#=======================================================================================#
#=======================================================================================#
# Plotting section begins here... #
#---------------------------------------------------------------------------------------#
cat (" - Plotting figures...","\n")
#---------------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------------#
# Time series by PFT. #
#---------------------------------------------------------------------------------------#
for (v in 1:ntspftdbh){
thistspft = tspftdbh[[v]]
vnam = thistspft$vnam
description = thistspft$desc
unit = thistspft$e.unit
plog = thistspft$plog
plotit = thistspft$pft
#----- Check whether the user wants to have this variable plotted. ------------------#
if (plotit && any(selpft)){
#---------------------------------------------------------------------------------#
# Check whether the time series directory exists. If not, create it. #
#---------------------------------------------------------------------------------#
outdir = paste(outpref,"tspft",sep="/")
if (! file.exists(outdir)) dir.create(outdir)
cat(" + ",description," time series for all PFTs...","\n")
#----- Load variable -------------------------------------------------------------#
thisvar = szpft[[vnam]][,ndbh+1,]
if (plog){
#----- Eliminate non-positive values in case it is a log plot. ----------------#
thisvar[thisvar <= 0] = NA
}#end if
#---------------------------------------------------------------------------------#
#----- Loop over output formats. -------------------------------------------------#
for (o in 1:nout){
#----- Open file. -------------------------------------------------------------#
fichier = paste(outdir,"/",vnam,"-",suffix,".",outform[o],sep="")
if(outform[o] == "x11"){
X11(width=size$width,height=size$height,pointsize=ptsz)
}else if(outform[o] == "png"){
png(filename=fichier,width=size$width*depth,height=size$height*depth
,pointsize=ptsz,res=depth)
}else if(outform[o] == "eps"){
postscript(file=fichier,width=size$width,height=size$height
,pointsize=ptsz,paper=size$paper)
}else if(outform[o] == "pdf"){
pdf(file=fichier,onefile=FALSE
,width=size$width,height=size$height,pointsize=ptsz,paper=size$paper)
}#end if
#------------------------------------------------------------------------------#
#------------------------------------------------------------------------------#
# Find the limit, make some room for the legend, and in case the field is #
# a constant, nudge the limits so the plot command will not complain. #
#------------------------------------------------------------------------------#
xlimit = pretty.xylim(u=as.numeric(datum$tomonth),fracexp=0.0,is.log=FALSE)
ylimit = pretty.xylim(u=thisvar[,selpft] ,fracexp=0.0,is.log=plog )
if (plog){
xylog = "y"
ydrought = c( exp(ylimit[1] * sqrt(ylimit[1]/ylimit[2]))
, exp(ylimit[2] * sqrt(ylimit[2]/ylimit[1]))
)#end c
}else{
xylog = ""
ydrought = c(ylimit[1] - 0.5 * diff(ylimit), ylimit[2] + 0.5 * diff(ylimit))
}#end if
#------------------------------------------------------------------------------#
#----- Plot settings. ---------------------------------------------------------#
letitre = paste(description,lieu,sep=" - ")
ley = desc.unit(desc=description,unit=unit)
cols = pft$colour[selpft]
legs = pft$name [selpft]
#------------------------------------------------------------------------------#
#------------------------------------------------------------------------------#
# Split the plot into two windows. #
#------------------------------------------------------------------------------#
par(par.user)
layout(mat=rbind(2,1),heights=c(5,1))
#------------------------------------------------------------------------------#
#------------------------------------------------------------------------------#
# First plot: legend. #
#------------------------------------------------------------------------------#
par(mar=c(0.1,4.6,0.1,2.1))
plot.new()
plot.window(xlim=c(0,1),ylim=c(0,1))
legend( x = "bottom"
, inset = 0.0
, legend = legs
, col = cols
, lwd = lwidth
, ncol = min(pretty.box(n.selpft)$ncol,3)
, title = expression(bold("Plant Functional Type"))
)#end legend
#------------------------------------------------------------------------------#
#------------------------------------------------------------------------------#
# Main plot. #
#------------------------------------------------------------------------------#
par(mar=c(4.1,4.6,4.1,2.1))
plot.new()
plot.window(xlim=xlimit,ylim=ylimit,log=xylog)
axis(side=1,at=whenplot8$levels,labels=whenplot8$labels,padj=whenplot8$padj)
axis(side=2,las=1)
box()
title(main=letitre,xlab="Year",ylab=ley,cex.main=0.7)
if (drought.mark){
for (n in 1:ndrought){
rect(xleft = drought[[n]][1],ybottom = ydrought[1]
,xright = drought[[n]][2],ytop = ydrought[2]
,col = grid.colour,border=NA)
}#end for
}#end if
#----- Plot grid. -------------------------------------------------------------#
if (plotgrid){
abline(v=whenplot8$levels,h=axTicks(side=2),col=grid.colour,lty="solid")
}#end if
#----- Plot lines. ------------------------------------------------------------#
for (n in 1:(npft+1)){
if (selpft[n]){
lines(datum$tomonth,thisvar[,n],type="l",col=pft$colour[n],lwd=lwidth)
}#end if
}#end for
#------------------------------------------------------------------------------#
#----- Close the device. ------------------------------------------------------#
if (outform[o] == "x11"){
locator(n=1)
dev.off()
}else{
dev.off()
}#end if
dummy=clean.tmp()
#------------------------------------------------------------------------------#
} #end for outform
}#end if (tseragbpft)
} #end for tseries
#---------------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------------#
# Time series by DBH, by PFT. #
#---------------------------------------------------------------------------------------#
#----- Find the PFTs to plot. ----------------------------------------------------------#
pftuse = which(apply(X=szpft$nplant,MARGIN=3,FUN=sum,na.rm=TRUE) > 0)
pftuse = pftuse[pftuse != (npft+1)]
for (v in 1:ntspftdbh){
thistspftdbh = tspftdbh[[v]]
vnam = thistspftdbh$vnam
description = thistspftdbh$desc
unit = thistspftdbh$e.unit
plog = thistspftdbh$plog
plotit = thistspftdbh$pftdbh
#----- Load variable ----------------------------------------------------------------#
thisvar = szpft[[vnam]]
if (plog){
xylog="y"
badlog = is.finite(thisvar) & thisvar <= 0
thisvar[badlog] = NA
}else{
xylog=""
}#end if
#----- Check whether the user wants to have this variable plotted. ------------------#
if (plotit && length(pftuse) > 0 && any(is.finite(thisvar))){
#---------------------------------------------------------------------------------#
# Check whether the time series directory exists. If not, create it. #
#---------------------------------------------------------------------------------#
outdir = paste(outpref,"tsdbh",sep="/")
if (! file.exists(outdir)) dir.create(outdir)
outvar = paste(outdir,vnam,sep="/")
if (! file.exists(outvar)) dir.create(outvar)
#---------------------------------------------------------------------------------#
cat(" + ",description," time series for DBH class...","\n")
#---------------------------------------------------------------------------------#
# Find the limit, make some room for the legend, and in case the field is a #
# constant, nudge the limits so the plot command will not complain. #
#---------------------------------------------------------------------------------#
xlimit = pretty.xylim(u=as.numeric(datum$tomonth),fracexp=0.0,is.log=FALSE)
ylimit = pretty.xylim(u=thisvar[,,pftuse] ,fracexp=0.0,is.log=plog )
if (plog){
xylog = "y"
ydrought = c( exp(ylimit[1] * sqrt(ylimit[1]/ylimit[2]))
, exp(ylimit[2] * sqrt(ylimit[2]/ylimit[1]))
)#end c
}else{
xylog = ""
ydrought = c(ylimit[1] - 0.5 * diff(ylimit), ylimit[2] + 0.5 * diff(ylimit))
}#end if
#---------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------#
# Loop over plant functional types. #
#---------------------------------------------------------------------------------#
for (p in pftuse){
pftlab = paste("pft-",sprintf("%2.2i",p),sep="")
cat(" - ",pft$name[p],"\n")
#----- Loop over output formats. ----------------------------------------------#
for (o in 1:nout){
#----- Open file. ----------------------------------------------------------#
fichier = paste(outvar,"/",vnam,"-",pftlab,"-",suffix,".",outform[o],sep="")
if(outform[o] == "x11"){
X11(width=size$width,height=size$height,pointsize=ptsz)
}else if(outform[o] == "png"){
png(filename=fichier,width=size$width*depth,height=size$height*depth
,pointsize=ptsz,res=depth)
}else if(outform[o] == "eps"){
postscript(file=fichier,width=size$width,height=size$height
,pointsize=ptsz,paper=size$paper)
}else if(outform[o] == "pdf"){
pdf(file=fichier,onefile=FALSE
,width=size$width,height=size$height,pointsize=ptsz,paper=size$paper)
}#end if
#---------------------------------------------------------------------------#
#----- Plot annotation. ---------------------------------------------------#
letitre = paste(description,pft$name[p],lieu,sep=" - ")
ley = desc.unit(desc=description,unit=unit)
#---------------------------------------------------------------------------#
#---------------------------------------------------------------------------#
# Split the plot into two windows. #
#---------------------------------------------------------------------------#
par(par.user)
layout(mat=rbind(2,1),heights=c(5,1))
#---------------------------------------------------------------------------#
#---------------------------------------------------------------------------#
# First plot: legend. #
#---------------------------------------------------------------------------#
par(mar=c(0.1,4.6,0.1,2.1))
plot.new()
plot.window(xlim=c(0,1),ylim=c(0,1))
legend( x = "bottom"
, inset = 0.0
, bg = background
, legend = dbhnames
, col = dbhcols
, ncol = min(pretty.box(ndbh+1)$ncol,3)
, title = expression(bold("DBH class"))
, lwd = lwidth
)#end legend
#---------------------------------------------------------------------------#
#---------------------------------------------------------------------------#
# Main plot. #
#---------------------------------------------------------------------------#
par(mar=c(4.1,4.6,4.1,2.1))
plot.new()
plot.window(xlim=xlimit,ylim=ylimit,log=xylog)
axis(side=1,at=whenplot8$levels,labels=whenplot8$labels,padj=whenplot8$padj)
axis(side=2,las=1)
box()
title(main=letitre,xlab="Year",ylab=ley,cex.main=0.7)
if (drought.mark){
for (n in 1:ndrought){
rect(xleft = drought[[n]][1],ybottom = ydrought[1]
,xright = drought[[n]][2],ytop = ydrought[2]
,col = grid.colour,border=NA)
}#end for
}#end if
#----- Plot grid. ----------------------------------------------------------#
if (plotgrid){
abline(v=whenplot8$levels,h=axTicks(side=2),col=grid.colour,lty="solid")
}#end if
#----- Plot lines. ---------------------------------------------------------#
for (d in seq(from=1,to=ndbh+1,by=1)){
lines(datum$tomonth,thisvar[,d,p],type="l",col=dbhcols[d],lwd=lwidth)
}#end for
#---------------------------------------------------------------------------#
#----- Close the device. ---------------------------------------------------#
if (outform[o] == "x11"){
locator(n=1)
dev.off()
}else{
dev.off()
}#end if
dummy=clean.tmp()
#---------------------------------------------------------------------------#
}#end for outform
#------------------------------------------------------------------------------#
}#end for (p in pftuse)
#---------------------------------------------------------------------------------#
}#end if (tseragbpft)
#------------------------------------------------------------------------------------#
} #end for tseries
#---------------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------------#
# Plot the comparison between observations and model. #
#---------------------------------------------------------------------------------------#
cat(" + Comparisons of time series (model vs. observations)...","\n")
for (cc in 1:ncompmodel){
#----- Retrieve variable information from the list. ---------------------------------#
compnow = compmodel[[cc]]
vname = compnow$vnam
description = compnow$desc
unit = compnow$unit
lcolours = compnow$colour
llwd = compnow$lwd
llwd = compnow$lwd
ltype = compnow$type
plog = compnow$plog
legpos = compnow$legpos
plotit = compnow$emean
#----- Check whether there are observations for this particular site. ---------------#
if (iata == "mao" | iata == "bdf"){
obsnow = "obs.m34"
}else if(iata == "stm"){
obsnow = "obs.s67"
}else if(iata == "rao"){
obsnow = "obs.pdg"
}else if(iata == "jpr"){
obsnow = "obs.fns"
}else if(iata == "btr"){
obsnow = "obs.s77"
}else{
obsnow = paste("obs.",iata,sep="")
}#end if
#------------------------------------------------------------------------------------#
# Last check to see if we should plot it or not. #
#------------------------------------------------------------------------------------#
plotit = plotit && obsnow %in% ls()
if (plotit){
thisobs = get(obsnow)
obswhen = thisobs$tomonth
sel = datum$tomonth >= min(obswhen) & datum$tomonth <= max(obswhen)
plotit = any(sel)
}#end if
#------------------------------------------------------------------------------------#
#------------------------------------------------------------------------------------#
# Enter here only if there is any overlap of time between observations and #
# model. #
#------------------------------------------------------------------------------------#
if (plotit){
#---------------------------------------------------------------------------------#
# Copy the observations to a scratch variable. #
#---------------------------------------------------------------------------------#
mnvar = paste("emean",vname,sep=".")
obsmean = thisobs[[mnvar]]
#---------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------#
# Check whether the time series directory exists. If not, create it. #
#---------------------------------------------------------------------------------#
outdir = paste(outpref,"compemean",sep="/")
if (! file.exists(outdir)) dir.create(outdir)
cat(" - ",description," comparison...","\n")
#---------------------------------------------------------------------------------#
#----- Define the number of layers. ----------------------------------------------#
thiswhen = datum$tomonth [sel]
thismean = emean[[vname]][sel]
#---------------------------------------------------------------------------------#
#----- Find the plot range. ------------------------------------------------------#
xlimit = range(thiswhen)
ylimit = pretty.xylim(u=c(thismean,obsmean),fracexp=0.0,is.log=FALSE)
#---------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------#
# Find the nice scale for time. #
#---------------------------------------------------------------------------------#
whenplote = pretty.time(obswhen,n=8)
#---------------------------------------------------------------------------------#
#----- Plot annotation. ----------------------------------------------------------#
letitre = paste(description," - ",lieu,"\n","Monthly mean",sep="")
ley = desc.unit(desc=description,unit=unit)
#---------------------------------------------------------------------------------#
#----- Loop over formats. --------------------------------------------------------#
for (o in 1:nout){
fichier = paste(outdir,"/",vname,".",outform[o],sep="")
if(outform[o] == "x11"){
X11(width=size$width,height=size$height,pointsize=ptsz)
}else if(outform[o] == "png"){
png(filename=fichier,width=size$width*depth,height=size$height*depth
,pointsize=ptsz,res=depth)
}else if(outform[o] == "eps"){
postscript(file=fichier,width=size$width,height=size$height
,pointsize=ptsz,paper=size$paper)
}else if(outform[o] == "pdf"){
pdf(file=fichier,onefile=FALSE
,width=size$width,height=size$height,pointsize=ptsz,paper=size$paper)
}#end if
#----- Split window into two. -------------------------------------------------#
par(par.user)
layout(mat=rbind(2,1),heights=c(5,1))
#------------------------------------------------------------------------------#
#----- First plot: the legend. ------------------------------------------------#
par(mar=c(0.1,4.6,0.1,2.1))
plot.new()
plot.window(xlim=c(0,1),ylim=c(0,1))
legend(x="bottom",inset=0.0,legend=c("Model","Observation")
,col=lcolours,lwd=llwd,cex=cex.ptsz,pch=16,xpd=TRUE)
#------------------------------------------------------------------------------#
#----- Second panel: the actual plot. -----------------------------------------#
par(mar=c(4.1,4.6,4.1,2.1))
plot.new()
plot.window(xlim=xlimit,ylim=ylimit,log=plog)
if (plotgrid){
abline(v=whenplote$levels,h=axTicks(side=2),col=grid.colour,lty="solid")
}#end if
points(x=thiswhen,y=thismean,col=lcolours[1],lwd=llwd[1],type=ltype
,pch=16,cex=1.0)
points(x=obswhen,y=obsmean ,col=lcolours[2],lwd=llwd[2],type=ltype
,pch=16,cex=1.0)
box()
axis(side=1,at=whenplote$levels,labels=whenplote$labels,padj=whenplote$padj)
axis(side=2,las=1)
title(main=letitre,xlab="Time",ylab=ley,cex.main=cex.main)
#------------------------------------------------------------------------------#
#----- Close plot. ------------------------------------------------------------#
if (outform[o] == "x11"){
locator(n=1)
dev.off()
}else{
dev.off()
}#end if
dummy = clean.tmp()
#------------------------------------------------------------------------------#
}#end for outform
#---------------------------------------------------------------------------------#
}#end if plotit
#------------------------------------------------------------------------------------#
}#end for ncompare
#---------------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------------#
# Plot the comparison between observations and model. #
#---------------------------------------------------------------------------------------#
cat(" + Comparisons of monthly means (model vs. observations)...","\n")
for (cc in 1:ncompmodel){
#----- Retrieve variable information from the list. ---------------------------------#
compnow = compmodel[[cc]]
vname = compnow$vnam
description = compnow$desc
unit = compnow$unit
plotsd = compnow$plotsd
lcolours = compnow$colour
errcolours = compnow$errcol
angle = compnow$angle
dens = compnow$dens
llwd = compnow$lwd
shwd = compnow$shwd
llwd = compnow$lwd
ltype = compnow$type
plog = compnow$plog
legpos = compnow$legpos
plotit = compnow$mmean
#----- Check whether there are observations for this particular site. ---------------#
if (iata == "mao" | iata == "bdf"){
obsnow = "obs.m34"
}else if(iata == "stm"){
obsnow = "obs.s67"
}else if(iata == "rao"){
obsnow = "obs.pdg"
}else if(iata == "jpr"){
obsnow = "obs.fns"
}else if(iata == "btr"){
obsnow = "obs.s77"
}else{
obsnow = paste("obs.",iata,sep="")
}#end if
plotit = plotit && obsnow %in% ls()
if (plotit){
#---------------------------------------------------------------------------------#
# Copy the observations to a scratch variable. #
#---------------------------------------------------------------------------------#
thisobs = get(obsnow)
mnvar = paste("mmean",vname,sep=".")
sdvar = paste("msdev",vname,sep=".")
obsmean = thisobs[[mnvar]]
obssdev = thisobs[[sdvar]]
#---------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------#
# Some observations do not have enough measurements to make a full year. If #
# this is the case, then we must split the observations into smaller intervals so #
# the polygon works. In case no observation is available, make the vectors NULL #
# so we will not plot observations at all. #
#---------------------------------------------------------------------------------#
if (all(is.na(obsmean+obssdev))){
obs.x = NULL
obs.ylow = NULL
obs.yhigh = NULL
}else{
#------ Find the periods with continous data. ---------------------------------#
ok = is.finite(obsmean+obssdev)
obs.x = montmont[ok]
obs.ylow = obsmean [ok] - obssdev[ok]
obs.yhigh = obsmean [ok] + obssdev[ok]
#------------------------------------------------------------------------------#
}#end if
#---------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------#
# Check whether the time series directory exists. If not, create it. #
#---------------------------------------------------------------------------------#
outdir = paste(outpref,"compmmean",sep="/")
if (! file.exists(outdir)) dir.create(outdir)
cat(" - ",description," comparison...","\n")
#---------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------#
# Define the number of layers. Some variables have no standard deviation in #
# the model, so Make them 0 if this is the case. #
#---------------------------------------------------------------------------------#
thismean = mmean[[vname]]
thissdev = msdev[[vname]]
if (length(msdev[[vname]]) == 0){
thissdev = 0. * thismean
}else{
thissdev = msdev[[vname]]
}#end if
mod.x = montmont
mod.ylow = thismean - thissdev
mod.yhigh = thismean + thissdev
#---------------------------------------------------------------------------------#
#----- Find the plot range. ------------------------------------------------------#
xlimit = range(montmont)
if (plotsd){
ylimit = c(mod.ylow,mod.yhigh,obs.ylow,obs.yhigh)
}else{
ylimit = c(thismean,obsmean)
}#end if
ylimit = pretty.xylim(u=ylimit,fracexp=0.0,is.log=FALSE)
#---------------------------------------------------------------------------------#
#------ Plot annotation. --------------------------------------------------------#
letitre = paste(description," - ",lieu,"\n","Monthly mean",sep="")
ley = desc.unit(desc=description,unit=unit)
#---------------------------------------------------------------------------------#
#----- Loop over formats. --------------------------------------------------------#
for (o in 1:nout){
fichier = paste(outdir,"/",vname,".",outform[o],sep="")
if(outform[o] == "x11"){
X11(width=size$width,height=size$height,pointsize=ptsz)
}else if(outform[o] == "png"){
png(filename=fichier,width=size$width*depth,height=size$height*depth
,pointsize=ptsz,res=depth)
}else if(outform[o] == "eps"){
postscript(file=fichier,width=size$width,height=size$height
,pointsize=ptsz,paper=size$paper)
}else if(outform[o] == "pdf"){
pdf(file=fichier,onefile=FALSE
,width=size$width,height=size$height,pointsize=ptsz,paper=size$paper)
}#end if
#----- Split window into two. -------------------------------------------------#
par(par.user)
layout(mat=rbind(2,1),heights=c(5,1))
#------------------------------------------------------------------------------#
#----- First plot: the legend. ------------------------------------------------#
par(mar=c(0.1,4.6,0.1,2.1))
plot.new()
plot.window(xlim=c(0,1),ylim=c(0,1))
if (plotsd){
legend(x="bottom",inset=0.0,legend=c("Model","Observation")
,fill=errcolours,angle=angle,density=dens,lwd=llwd,col=lcolours
,bg=background,title="Shaded areas = 1 SD",cex=cex.ptsz
,xpd=TRUE,pch=16)
}else{
legend(x="bottom",inset=0.0,legend=c("Model","Observation")
,col=lcolours,lwd=llwd,cex=cex.ptsz,xpd=TRUE,pch=16)
}#end if
#------------------------------------------------------------------------------#
#----- Second panel: the actual plot. -----------------------------------------#
par(mar=c(4.1,4.6,4.1,2.1))
plot.new()
plot.window(xlim=xlimit,ylim=ylimit,log=plog)
if (plotgrid){
abline(v=mplot$levels,h=axTicks(side=2),col=grid.colour,lty="solid")
}#end if
if (plotsd){
if (is.null(obs.x)){
mod.x.poly = c(mod.x,rev(mod.x))
mod.y.poly = c(mod.ylow,rev(mod.yhigh))
mod.keep = is.finite(mod.y.poly)
err.x = mod.x.poly[mod.keep]
err.y = mod.y.poly[mod.keep]
polygon(x=err.x,y=err.y,col=errcolours[1],angle=angle[1],density=dens[1]
,lty="solid",lwd=shwd[1])
}else{
mod.x.poly = c(mod.x,rev(mod.x))
mod.y.poly = c(mod.ylow,rev(mod.yhigh))
mod.keep = is.finite(mod.y.poly)
obs.x.poly = c(obs.x,rev(obs.x))
obs.y.poly = c(obs.ylow,rev(obs.yhigh))
obs.keep = is.finite(obs.y.poly)
err.x = c(mod.x.poly[mod.keep],NA,obs.x.poly[obs.keep])
err.y = c(mod.y.poly[mod.keep],NA,obs.y.poly[obs.keep])
polygon(x=err.x,y=err.y,col=errcolours,angle=angle,density=dens
,lty="solid",lwd=shwd)
}#end if
}#end if
points(x=montmont,y=thismean,col=lcolours[1],lwd=llwd[1],type=ltype
,pch=16,cex=1.0)
points(x=montmont,y=obsmean ,col=lcolours[2],lwd=llwd[2],type=ltype
,pch=16,cex=1.0)
box()
title(main=letitre,xlab="Time",ylab=ley,cex.main=cex.main)
axis(side=1,at=mplot$levels,labels=mplot$labels,padj=mplot$padj)
axis(side=2,las=1)
if (outform[o] == "x11"){
locator(n=1)
dev.off()
}else{
dev.off()
}#end if
dummy = clean.tmp()
} #end for outform
}#end if plotit
}#end for ncompare
#---------------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------------#
# Plot the comparison between observations and model. #
#---------------------------------------------------------------------------------------#
cat(" * Comparisons of mean diurnal cycle (model vs. observations)...","\n")
for (cc in 1:ncompmodel){
#----- Retrieve variable information from the list. ---------------------------------#
compnow = compmodel[[cc]]
vname = compnow$vnam
description = compnow$desc
unit = compnow$unit
plotsd = compnow$plotsd
lcolours = compnow$colour
errcolours = compnow$errcol
angle = compnow$angle
dens = compnow$dens
llwd = compnow$lwd
shwd = compnow$shwd
llwd = compnow$lwd
ltype = compnow$type
plog = compnow$plog
legpos = compnow$legpos
plotit = compnow$qmean
#----- Check whether there are observations for this particular site. ---------------#
if (iata == "mao" | iata == "bdf"){
obsnow = "obs.m34"
}else if(iata == "stm"){
obsnow = "obs.s67"
}else if(iata == "rao"){
obsnow = "obs.pdg"
}else if(iata == "jpr"){
obsnow = "obs.fns"
}else if(iata == "btr"){
obsnow = "obs.s77"
}else{
obsnow = paste("obs.",iata,sep="")
}#end if
plotit = plotit && obsnow %in% ls()
if (plotit){
#---------------------------------------------------------------------------------#
# Copy the observations to a scratch variable. #
#---------------------------------------------------------------------------------#
thisobs = get(obsnow)
mnvar = paste("qmean",vname,sep=".")
sdvar = paste("qsdev",vname,sep=".")
obsmean = thisobs[[mnvar]]
obssdev = thisobs[[sdvar]]
#----- Append 1st hour after the last. -------------------------------------------#
obsmean = cbind(obsmean,obsmean[,1])
obssdev = cbind(obssdev,obssdev[,1])
#---------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------#
# Some observations do not have enough measurements to make a full year. If #
# this is the case, then we must split the observations into smaller intervals so #
# the polygon works. In case no observation is available, make the vectors NULL #
# so we will not plot observations at all. #
#---------------------------------------------------------------------------------#
obs.ylow = obsmean - obssdev
obs.yhigh = obsmean + obssdev
#---------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------#
# Check whether the time series directory exists. If not, create it. #
#---------------------------------------------------------------------------------#
outdir = paste(outpref,"compdcyc",sep="/")
if (! file.exists(outdir)) dir.create(outdir)
outtheme = paste(outdir,vname,sep="/")
if (! file.exists(outtheme)) dir.create(outtheme)
cat(" + ",description," comparison...","\n")
#---------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------#
# Define the number of layers. Some variables have no standard deviation in #
# the model, so Make them 0 if this is the case. We also append the last hour #
# before the first one so 00 UTC appears in the left. #
#---------------------------------------------------------------------------------#
thismean = umean[[vname]]
thismean = cbind(thismean[,ndcycle],thismean)
if (length(usdev[[vname]]) == 0){
thissdev = 0. * thismean
}else{
thissdev = usdev[[vname]]
thissdev = cbind(thissdev[,ndcycle],thissdev)
}#end if
mod.ylow = thismean - thissdev
mod.yhigh = thismean + thissdev
#---------------------------------------------------------------------------------#
#----- Find the plot range. ------------------------------------------------------#
xlimit = range(thisday)
if (plotsd){
ylimit = c(mod.ylow,mod.yhigh,obs.ylow,obs.yhigh)
}else{
ylimit = c(thismean,obsmean)
}#end if
ylimit = pretty.xylim(u=ylimit,fracexp=0.0,is.log=FALSE)
#---------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------#
# Loop over all months. #
#---------------------------------------------------------------------------------#
for (pmon in 1:12){
cmon = substring(100+pmon,2,3)
namemon = mlist[pmon]
#------------------------------------------------------------------------------#
# Check if the directory exists. If not, create it. #
#------------------------------------------------------------------------------#
cat(" > ",description," time series - ",namemon,"...","\n")
#------ Plot annotation. ------------------------------------------------------#
letitre = paste(description," - ",lieu,"\n"
,"Mean diurnal cycle - ",namemon,sep="")
ley = desc.unit(desc=description,unit=unit)
#------------------------------------------------------------------------------#
#----- Loop over formats. -----------------------------------------------------#
for (o in 1:nout){
fichier = paste(outtheme,"/",vname,"-",cmon,".",outform[o]
,sep="")
if(outform[o] == "x11"){
X11(width=size$width,height=size$height,pointsize=ptsz)
}else if(outform[o] == "png"){
png(filename=fichier,width=size$width*depth,height=size$height*depth
,pointsize=ptsz,res=depth)
}else if(outform[o] == "eps"){
postscript(file=fichier,width=size$width,height=size$height
,pointsize=ptsz,paper=size$paper)
}else if(outform[o] == "pdf"){
pdf(file=fichier,onefile=FALSE
,width=size$width,height=size$height,pointsize=ptsz,paper=size$paper)
}#end if
#----- Split window into two. ----------------------------------------------#
par(par.user)
layout(mat=rbind(2,1),heights=c(5,1))
#---------------------------------------------------------------------------#
#----- First plot: the legend. ---------------------------------------------#
par(mar=c(0.1,4.6,0.1,2.1))
plot.new()
plot.window(xlim=c(0,1),ylim=c(0,1))
if (plotsd){
legend(x="bottom",inset=0.0,legend=c("Model","Observation")
,fill=errcolours,angle=angle,density=dens,lwd=llwd,col=lcolours
,bg=background,title="Shaded areas = 1 SD",cex=cex.ptsz,xpd=TRUE
,pch=16)
}else{
legend(x="bottom",inset=0.0,legend=c("Model","Observation")
,col=lcolours,lwd=llwd,cex=cex.ptsz,xpd=TRUE,pch=16)
}#end if
#---------------------------------------------------------------------------#
#----- Second panel: the actual plot. --------------------------------------#
par(mar=c(4.1,4.6,4.1,2.1))
plot.new()
plot.window(xlim=xlimit,ylim=ylimit,log=plog)
if (plotgrid){
abline(v=uplot$levels,h=axTicks(side=2),col=grid.colour,lty="solid")
}#end if
if (plotsd){
mod.x.now = thisday
mod.ylow.now = mod.ylow [pmon,]
mod.yhigh.now = mod.yhigh[pmon,]
#------ Find the periods with continous data. ---------------------------#
ok = is.finite(obs.ylow[pmon,]) & is.finite(obs.yhigh[pmon,])
if (any(ok)){
obs.x.now = thisday [ok]
obs.ylow.now = obs.ylow [pmon,ok]
obs.yhigh.now = obs.yhigh[pmon,ok]
}else{
obs.x.now = NULL
obs.ylow.now = NULL
obs.yhigh.now = NULL
}#end if
#------------------------------------------------------------------------#
if (is.null(obs.x.now)){
mod.x.poly = c(mod.x.now,rev(mod.x.now))
mod.y.poly = c(mod.ylow.now,rev(mod.yhigh.now))
mod.keep = is.finite(mod.y.poly)
err.x = mod.x.poly[mod.keep]
err.y = mod.y.poly[mod.keep]
polygon(x=err.x,y=err.y,col=errcolours[1],angle=angle[1]
,density=dens[1],lty="solid",lwd=shwd[1])
}else{
mod.x.poly = c(mod.x.now,rev(mod.x.now))
mod.y.poly = c(mod.ylow.now,rev(mod.yhigh.now))
mod.keep = is.finite(mod.y.poly)
obs.x.poly = c(obs.x.now,rev(obs.x.now))
obs.y.poly = c(obs.ylow.now,rev(obs.yhigh.now))
obs.keep = is.finite(obs.y.poly)
err.x = c(mod.x.poly[mod.keep],NA,obs.x.poly[obs.keep])
err.y = c(mod.y.poly[mod.keep],NA,obs.y.poly[obs.keep])
polygon(x=err.x,y=err.y,col=errcolours,angle=angle,density=dens
,lty="solid",lwd=shwd)
}#end if
}#end if
points(x=thisday,y=thismean[pmon,],col=lcolours[1]
,lwd=llwd[1],type=ltype,pch=16,cex=1.0)
points(x=thisday,y=obsmean[pmon,],col=lcolours[2]
,lwd=llwd[2],type=ltype,pch=16,cex=1.0)
box()
title(main=letitre,xlab="Time",ylab=ley,cex.main=cex.main)
axis(side=1,at=uplot$levels,labels=uplot$labels,padj=uplot$padj)
axis(side=2,las=1)
#---------------------------------------------------------------------------#
if (outform[o] == "x11"){
locator(n=1)
dev.off()
}else{
dev.off()
}#end if
dummy = clean.tmp()
#---------------------------------------------------------------------------#
} #end for outform
}#end for pmon
}#end if plotit
}#end for ncompare
#---------------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------------#
# Time series by LU. #
#---------------------------------------------------------------------------------------#
for (v in 1:ntslu){
thistslu = tslu[[v]]
vnam = thistslu$vnam
description = thistslu$desc
unit = thistslu$unit
plog = thistslu$plog
plotit = thistslu$plt
#----- Check whether the user wants to have this variable plotted. ------------------#
if (plotit && any(sellu)){
#---------------------------------------------------------------------------------#
# Check whether the time series directory exists. If not, create it. #
#---------------------------------------------------------------------------------#
outdir = paste(outpref,"tslu",sep="/")
if (! file.exists(outdir)) dir.create(outdir)
cat(" + ",description," time series for all LUs...","\n")
#----- Load variable -------------------------------------------------------------#
thisvar = lu[[vnam]]
if (plog){
#----- Eliminate non-positive values in case it is a log plot. ----------------#
thisvar[thisvar <= 0] = NA
}#end if
#---------------------------------------------------------------------------------#
#----- Loop over output formats. -------------------------------------------------#
for (o in 1:nout){
fichier = paste(outdir,"/",vnam,"-",suffix,".",outform[o],sep="")
if(outform[o] == "x11"){
X11(width=size$width,height=size$height,pointsize=ptsz)
}else if(outform[o] == "png"){
png(filename=fichier,width=size$width*depth,height=size$height*depth
,pointsize=ptsz,res=depth)
}else if(outform[o] == "eps"){
postscript(file=fichier,width=size$width,height=size$height
,pointsize=ptsz,paper=size$paper)
}else if(outform[o] == "pdf"){
pdf(file=fichier,onefile=FALSE
,width=size$width,height=size$height,pointsize=ptsz,paper=size$paper)
}#end if
#------------------------------------------------------------------------------#
# Find the limit, make some room for the legend, and in case the field is #
# a constant, nudge the limits so the plot command will not complain. #
#------------------------------------------------------------------------------#
xlimit = pretty.xylim(u=as.numeric(datum$tomonth),fracexp=0.0,is.log=FALSE)
ylimit = pretty.xylim(u=thisvar[,sellu] ,fracexp=0.0,is.log=plog )
if (plog){
xylog = "y"
ydrought = c( exp(ylimit[1] * sqrt(ylimit[1]/ylimit[2]))
, exp(ylimit[2] * sqrt(ylimit[2]/ylimit[1]))
)#end c
}else{
xylog = ""
ydrought = c(ylimit[1] - 0.5 * diff(ylimit), ylimit[2] + 0.5 * diff(ylimit))
}#end if
#------------------------------------------------------------------------------#
#----- Plot settings. ---------------------------------------------------------#
letitre = paste(description,lieu,sep=" - ")
ley = desc.unit(desc=description,unit=unit)
cols = lucols[sellu]
legs = lunames[sellu]
#------------------------------------------------------------------------------#
#------------------------------------------------------------------------------#
# Split the plot into two windows. #
#------------------------------------------------------------------------------#
par(par.user)
layout(mat=rbind(2,1),heights=c(5,1))
#------------------------------------------------------------------------------#
#------------------------------------------------------------------------------#
# First plot: legend. #
#------------------------------------------------------------------------------#
par(mar=c(0.1,4.6,0.1,2.1))
plot.new()
plot.window(xlim=c(0,1),ylim=c(0,1))
legend( x = "bottom"
, inset = 0.0
, legend = legs
, col = cols
, lwd = lwidth
, ncol = min(3,pretty.box(n.sellu)$ncol)
, title = expression(bold("Land use type"))
, xpd = TRUE
)#end legend
#------------------------------------------------------------------------------#
#------------------------------------------------------------------------------#
# Main plot. #
#------------------------------------------------------------------------------#
par(mar=c(4.1,4.6,4.1,2.1))
plot.new()
plot.window(xlim=xlimit,ylim=ylimit,log=xylog)
axis(side=1,at=whenplot8$levels,labels=whenplot8$labels,padj=whenplot8$padj)
axis(side=2,las=1)
box()
title(main=letitre,xlab="Year",ylab=ley,cex.main=0.7,log=xylog)
if (drought.mark){
for (n in 1:ndrought){
rect(xleft = drought[[n]][1],ybottom = ydrought[1]
,xright = drought[[n]][2],ytop = ydrought[2]
,col = grid.colour,border=NA)
}#end for
}#end if
#----- Plot grid. -------------------------------------------------------------#
if (plotgrid){
abline(v=whenplot8$levels,h=axTicks(side=2),col=grid.colour,lty="solid")
}#end if
#----- Plot lines. ------------------------------------------------------------#
for (n in 1:(nlu+1)){
if (sellu[n]){
lines(datum$tomonth,thisvar[,n],type="l",col=lucols[n],lwd=lwidth)
}#end if
}#end for
#------------------------------------------------------------------------------#
#----- Close the device. ------------------------------------------------------#
if (outform[o] == "x11"){
locator(n=1)
dev.off()
}else{
dev.off()
}#end if
dummy=clean.tmp()
#------------------------------------------------------------------------------#
}#end for outform
#---------------------------------------------------------------------------------#
}#end if (tseragbpft)
#------------------------------------------------------------------------------------#
}#end for tseries
#---------------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------------#
# Plot disturbance rate by disturbance transition. #
#---------------------------------------------------------------------------------------#
if (tserdist && any(seldist)){
cat(" + Disturbance rate time series for all disturbances...","\n")
for (o in 1:nout){
fichier = paste(outpref,"/disturb-",suffix,".",outform[o],sep="")
if (outform[o] == "x11"){
X11(width=size$width,height=size$height,pointsize=ptsz)
}else if(outform[o] == "png"){
png(filename=fichier,width=size$width*depth,height=size$height*depth
,pointsize=ptsz,res=depth)
}else if(outform[o] == "eps"){
postscript(file=fichier,width=size$width,height=size$height
,pointsize=ptsz,paper=size$paper)
}else if(outform[o] == "pdf"){
pdf(file=fichier,onefile=FALSE
,width=size$width,height=size$height,pointsize=ptsz,paper=size$paper)
}#end if
#---------------------------------------------------------------------------------#
# Find the limit, make some room for the legend, and in case the field is a #
# constant, nudge the limits so the plot command will not complain. #
#---------------------------------------------------------------------------------#
xlimit = pretty.xylim(u=as.numeric(datum$tomonth),fracexp=0.0,is.log=FALSE)
ylimit = NULL
mylucols = NULL
mylulegs = NULL
n = 0
for (jlu in 1:nlu){
for (ilu in 1:nlu){
n = n + 1
if (seldist[ilu,jlu]){
ylimit = c(ylimit,lu$dist[,ilu,jlu])
mylucols = c(mylucols,distcols [n])
mylulegs = c(mylulegs,distnames[n])
}#end if
}#end for
}#end for
ylimit = pretty.xylim(u=ylimit,fracexp=0.0,is.log=FALSE)
ydrought = c(ylimit[1] - 0.5 * diff(ylimit), ylimit[2] + 0.5 * diff(ylimit))
#---------------------------------------------------------------------------------#
#----- Plot settings. ------------------------------------------------------------#
letitre = paste("Disturbance rates",lieu,sep=" - ")
#---------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------#
# Split the plot into two windows. #
#---------------------------------------------------------------------------------#
par(par.user)
layout(mat=rbind(2,1),heights=c(5,1))
#---------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------#
# First plot: legend. #
#---------------------------------------------------------------------------------#
par(mar=c(0.1,4.6,0.1,2.1))
plot.new()
plot.window(xlim=c(0,1),ylim=c(0,1))
legend( x = "bottom"
, inset = 0.0
, bg = background
, legend = mylulegs
, col = mylucols
, lwd = lwidth
, ncol = min(3,pretty.box(n)$ncol)
, title = expression(bold("Transition"))
, xpd = TRUE
)#end legend
#---------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------#
# Main plot. #
#---------------------------------------------------------------------------------#
par(mar=c(4.1,4.6,4.1,2.1))
plot.new()
plot.window(xlim=xlimit,ylim=ylimit,log=xylog)
axis(side=1,at=whenplot8$levels,labels=whenplot8$labels,padj=whenplot8$padj)
axis(side=2,las=1)
box()
title( main = letitre
, xlab = "Year"
, ylab = desc.unit(desc="Disturbance rate",unit=untab$oneoyr)
, cex.main = 0.7
)#end title
if (drought.mark){
for (n in 1:ndrought){
rect(xleft = drought[[n]][1],ybottom = ydrought[1]
,xright = drought[[n]][2],ytop = ydrought[2]
,col = grid.colour,border=NA)
}#end for
}#end if
#----- Plot grid. ----------------------------------------------------------------#
if (plotgrid){
abline(v=whenplot8$levels,h=axTicks(side=2),col=grid.colour,lty="solid")
}#end if
#----- Plot lines. ---------------------------------------------------------------#
n = 0
for (jlu in sequence(nlu)){
for (ilu in sequence(nlu)){
n = n + 1
if (seldist[ilu,jlu]){
lines(datum$tomonth,lu$dist[,ilu,jlu],type="l"
,col=distcols[n],lwd=lwidth)
}#end if
}#end for
}#end for
#---------------------------------------------------------------------------------#
#----- Close the device. ---------------------------------------------------------#
if (outform[o] == "x11"){
locator(n=1)
dev.off()
}else{
dev.off()
}#end if
dummy=clean.tmp()
#---------------------------------------------------------------------------------#
}#end for outform
#------------------------------------------------------------------------------------#
}#end if
#---------------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------------#
# Plot the time series diagrams showing months and years. #
#---------------------------------------------------------------------------------------#
cat(" * Plot some time series with groups of variables...","\n")
for (hh in 1:ntheme){
#----- Retrieve variable information from the list. ---------------------------------#
themenow = theme[[hh]]
vnames = themenow$vnam
description = themenow$desc
lcolours = themenow$colour
llwd = themenow$lwd
if (emean.line){
ltype = "l"
}else{
ltype = themenow$type
}#end if
plog = themenow$plog
prefix = themenow$prefix
group = themenow$title
unit = themenow$unit
legpos = themenow$legpos
plotit = themenow$emean
ylimit.fix = themenow$emean.lim
if (plotit){
#---------------------------------------------------------------------------------#
# Check whether the time series directory exists. If not, create it. #
#---------------------------------------------------------------------------------#
outdir = paste(outpref,"theme_emean",sep="/")
if (! file.exists(outdir)) dir.create(outdir)
cat(" + ",group," time series for several variables...","\n")
#----- Define the number of layers. ----------------------------------------------#
nlayers = length(vnames)
#---------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------#
# Find the limit, make some room for the legend, and in case the field is a #
# constant, nudge the limits so the plot command will not complain. #
#---------------------------------------------------------------------------------#
xlimit = pretty.xylim(u=as.numeric(datum$tomonth),fracexp=0.0,is.log=FALSE)
if (any(! is.finite(ylimit.fix))){
ylimit = NULL
for (l in 1:nlayers) ylimit = c(ylimit,emean[[vnames[l]]])
ylimit = pretty.xylim(u=ylimit,fracexp=0.0,is.log=plog)
}else{
ylimit = ylimit.fix
}#end if
if (plog){
xylog = "y"
ydrought = c( exp(ylimit[1] * sqrt(ylimit[1]/ylimit[2]))
, exp(ylimit[2] * sqrt(ylimit[2]/ylimit[1]))
)#end c
}else{
xylog = ""
ydrought = c(ylimit[1] - 0.5 * diff(ylimit), ylimit[2] + 0.5 * diff(ylimit))
}#end if
#---------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------#
# Check if the directory exists. If not, create it. #
#---------------------------------------------------------------------------------#
#----- Loop over formats. --------------------------------------------------------#
for (o in 1:nout){
#------ Open file. ------------------------------------------------------------#
fichier = paste(outdir,"/",prefix,"-",suffix,".",outform[o],sep="")
if(outform[o] == "x11"){
X11(width=size$width,height=size$height,pointsize=ptsz)
}else if(outform[o] == "png"){
png(filename=fichier,width=size$width*depth,height=size$height*depth
,pointsize=ptsz,res=depth)
}else if(outform[o] == "eps"){
postscript(file=fichier,width=size$width,height=size$height
,pointsize=ptsz,paper=size$paper)
}else if(outform[o] == "pdf"){
pdf(file=fichier,onefile=FALSE
,width=size$width,height=size$height,pointsize=ptsz,paper=size$paper)
}#end if
#------------------------------------------------------------------------------#
#----- Plot settings. ---------------------------------------------------------#
letitre = paste(" Time series: ",group,"\n",lieu,sep="")
ley = desc.unit(desc=description,unit=unit)
#------------------------------------------------------------------------------#
#------------------------------------------------------------------------------#
# Split the plot into two windows. #
#------------------------------------------------------------------------------#
par(par.user)
layout(mat=rbind(2,1),heights=c(5,1))
#------------------------------------------------------------------------------#
#------------------------------------------------------------------------------#
# First plot: legend. #
#------------------------------------------------------------------------------#
par(mar=c(0.1,4.6,0.1,2.1))
plot.new()
plot.window(xlim=c(0,1),ylim=c(0,1))
legend( x = "bottom"
, inset = 0.0
, legend = description
, col = lcolours
, lwd = llwd
, ncol = min(3,pretty.box(nlayers)$ncol)
, xpd = TRUE
)#end legend
#------------------------------------------------------------------------------#
#------------------------------------------------------------------------------#
# Main plot. #
#------------------------------------------------------------------------------#
par(mar=c(4.1,4.6,4.1,2.1))
plot.new()
plot.window(xlim=xlimit,ylim=ylimit,log=xylog)
axis(side=1,at=whenplot8$levels,labels=whenplot8$labels,padj=whenplot8$padj)
axis(side=2,las=1)
box()
title(main=letitre,xlab="Year",ylab=ley,cex.main=0.7)
if (drought.mark){
for (n in 1:ndrought){
rect(xleft = drought[[n]][1],ybottom = ydrought[1]
,xright = drought[[n]][2],ytop = ydrought[2]
,col = grid.colour,border=NA)
}#end for
}#end if
#----- Plot grid. -------------------------------------------------------------#
if (plotgrid){
abline(v=whenplot8$levels,h=axTicks(side=2),col=grid.colour,lty="solid")
}#end if
#----- Plot lines. ------------------------------------------------------------#
for (l in 1:nlayers){
thisvar = emean[[vnames[l]]]
points(x=datum$tomonth,y=thisvar,col=lcolours[l],lwd=llwd[l],type=ltype
,pch=16,cex=0.8)
}#end for
#------------------------------------------------------------------------------#
#----- Close the device. ------------------------------------------------------#
if (outform[o] == "x11"){
locator(n=1)
dev.off()
}else{
dev.off()
}#end if
dummy=clean.tmp()
#------------------------------------------------------------------------------#
} #end for outform
#---------------------------------------------------------------------------------#
}#end if plotit
#------------------------------------------------------------------------------------#
}#end for ntser
#---------------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------------#
# Plot the time series diagrams showing months and years. #
#---------------------------------------------------------------------------------------#
cat(" * Plot some monthly means of groups of variables ...","\n")
for (hh in 1:ntheme){
#----- Retrieve variable information from the list. ---------------------------------#
themenow = theme[[hh]]
vnames = themenow$vnam
description = themenow$desc
lcolours = themenow$colour
llwd = themenow$lwd
ltype = themenow$type
plog = themenow$plog
prefix = themenow$prefix
group = themenow$title
unit = themenow$unit
legpos = themenow$legpos
plotit = themenow$mmean
ylimit.fix = themenow$mmean.lim
if (plotit){
#---------------------------------------------------------------------------------#
# Check whether the time series directory exists. If not, create it. #
#---------------------------------------------------------------------------------#
outdir = paste(outpref,"theme_mmean",sep="/")
if (! file.exists(outdir)) dir.create(outdir)
cat(" + ",group," time series for several variables...","\n")
#----- Define the number of layers. ----------------------------------------------#
nlayers = length(vnames)
#---------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------#
# Find the limit, make some room for the legend, and in case the field is a #
# constant, nudge the limits so the plot command will not complain. #
#---------------------------------------------------------------------------------#
xlimit = pretty.xylim(u=montmont,fracexp=0.0,is.log=plog)
if (any (! is.finite(ylimit.fix))){
ylimit = NULL
for (l in 1:nlayers) ylimit = c(ylimit,mmean[[vnames[l]]])
ylimit = pretty.xylim(u=ylimit,fracexp=0.0,is.log=plog)
}else{
ylimit = ylimit.fix
}#end if
if (plog){
xylog = "y"
ydrought = c( exp(ylimit[1] * sqrt(ylimit[1]/ylimit[2]))
, exp(ylimit[2] * sqrt(ylimit[2]/ylimit[1]))
)#end c
}else{
xylog = ""
ydrought = c(ylimit[1] - 0.5 * diff(ylimit), ylimit[2] + 0.5 * diff(ylimit))
}#end if
#---------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------#
# Check if the directory exists. If not, create it. #
#---------------------------------------------------------------------------------#
#----- Loop over formats. --------------------------------------------------------#
for (o in 1:nout){
#------ Open file. ------------------------------------------------------------#
fichier = paste(outdir,"/",prefix,"-",suffix,".",outform[o],sep="")
if(outform[o] == "x11"){
X11(width=size$width,height=size$height,pointsize=ptsz)
}else if(outform[o] == "png"){
png(filename=fichier,width=size$width*depth,height=size$height*depth
,pointsize=ptsz,res=depth)
}else if(outform[o] == "eps"){
postscript(file=fichier,width=size$width,height=size$height
,pointsize=ptsz,paper=size$paper)
}else if(outform[o] == "pdf"){
pdf(file=fichier,onefile=FALSE
,width=size$width,height=size$height,pointsize=ptsz,paper=size$paper)
}#end if
#------------------------------------------------------------------------------#
#----- Plot settings. ---------------------------------------------------------#
letitre = paste(" Time series: ",group,"\n",lieu,sep="")
ley = desc.unit(desc=description,unit=unit)
#------------------------------------------------------------------------------#
#------------------------------------------------------------------------------#
# Split the plot into two windows. #
#------------------------------------------------------------------------------#
par(par.user)
layout(mat=rbind(2,1),heights=c(5,1))
#------------------------------------------------------------------------------#
#------------------------------------------------------------------------------#
# First plot: legend. #
#------------------------------------------------------------------------------#
par(mar=c(0.1,4.6,0.1,2.1))
plot.new()
plot.window(xlim=c(0,1),ylim=c(0,1))
legend( x = "bottom"
, inset = 0.0
, legend = description
, col = lcolours
, lwd = llwd
, pch = 16
, ncol = min(3,pretty.box(nlayers)$ncol)
, cex = 0.9*cex.ptsz
, xpd = TRUE
)#end legend
#------------------------------------------------------------------------------#
#------------------------------------------------------------------------------#
# Main plot. #
#------------------------------------------------------------------------------#
par(mar=c(4.1,4.6,4.1,2.1))
plot.new()
plot.window(xlim=xlimit,ylim=ylimit,log=xylog)
axis(side=1,at=mplot$levels,labels=substring(mplot$labels,1,1),padj=mplot$padj)
axis(side=2,las=1)
box()
title(main=letitre,xlab="Year",ylab=ley,cex.main=0.7)
if (drought.mark){
for (n in 1:ndrought){
rect(xleft = drought[[n]][1],ybottom = ydrought[1]
,xright = drought[[n]][2],ytop = ydrought[2]
,col = grid.colour,border=NA)
}#end for
}#end if
#----- Plot grid. -------------------------------------------------------------#
if (plotgrid){
abline(v=mplot$levels,h=axTicks(side=2),col=grid.colour,lty="solid")
}#end if
#----- Plot lines. ------------------------------------------------------------#
for (l in 1:nlayers){
thisvar = mmean[[vnames[l]]]
points(x=montmont,y=thisvar,col=lcolours[l],lwd=llwd[l],type=ltype
,pch=16,cex=0.8)
}#end for
#------------------------------------------------------------------------------#
#----- Close the device. ------------------------------------------------------#
if (outform[o] == "x11"){
locator(n=1)
dev.off()
}else{
dev.off()
}#end if
dummy=clean.tmp()
#------------------------------------------------------------------------------#
} #end for outform
#---------------------------------------------------------------------------------#
}#end if plotit
#------------------------------------------------------------------------------------#
}#end for ntser
#---------------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------------#
# Plot the climatology of the mean diurnal cycle. #
#---------------------------------------------------------------------------------------#
cat(" * Plot the mean diel of groups of variables...","\n")
for (hh in 1:ntheme){
#----- Retrieve variable information from the list. ---------------------------------#
themenow = theme[[hh]]
vnames = themenow$vnam
description = themenow$desc
lcolours = themenow$colour
llwd = themenow$lwd
ltype = themenow$type
plog = themenow$plog
prefix = themenow$prefix
group = themenow$title
unit = themenow$unit
legpos = themenow$legpos
plotit = themenow$qmean
if (plog){
xylog = "y"
}else{
xylog = ""
}#end if
if (plotit){
#---------------------------------------------------------------------------------#
# Check whether the time series directory exists. If not, create it. #
#---------------------------------------------------------------------------------#
outdir = paste(outpref,"theme_qmean",sep="/")
if (! file.exists(outdir)) dir.create(outdir)
outtheme = paste(outdir,prefix,sep="/")
if (! file.exists(outtheme)) dir.create(outtheme)
cat(" + ",group," diurnal cycle for several variables...","\n")
#----- Define the number of layers. ----------------------------------------------#
nlayers = length(vnames)
xlimit = range(thisday)
ylimit = NULL
for (l in 1:nlayers) ylimit = c(ylimit,umean[[vnames[l]]])
ylimit = pretty.xylim(u=ylimit,fracexp=0.0,is.log=FALSE)
#---------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------#
# Loop over all months. #
#---------------------------------------------------------------------------------#
for (pmon in 1:12){
cmon = sprintf("%2.2i",pmon)
namemon = mlist[pmon]
#------------------------------------------------------------------------------#
# Check if the directory exists. If not, create it. #
#------------------------------------------------------------------------------#
#----- Loop over formats. -----------------------------------------------------#
for (o in 1:nout){
#------ Open file. ---------------------------------------------------------#
fichier = paste(outtheme,"/",prefix,"-",cmon,"-",suffix,".",outform[o]
,sep="")
if(outform[o] == "x11"){
X11(width=size$width,height=size$height,pointsize=ptsz)
}else if(outform[o] == "png"){
png(filename=fichier,width=size$width*depth,height=size$height*depth
,pointsize=ptsz,res=depth)
}else if(outform[o] == "eps"){
postscript(file=fichier,width=size$width,height=size$height
,pointsize=ptsz,paper=size$paper)
}else if(outform[o] == "pdf"){
pdf(file=fichier,onefile=FALSE
,width=size$width,height=size$height,pointsize=ptsz,paper=size$paper)
}#end if
#---------------------------------------------------------------------------#
#----- Plot settings. ------------------------------------------------------#
letitre = paste(group," - ",lieu,"\n"
,"Mean diurnal cycle - ",namemon,sep="")
ley = desc.unit(desc=description,unit=unit)
#---------------------------------------------------------------------------#
#---------------------------------------------------------------------------#
# Split the plot into two windows. #
#---------------------------------------------------------------------------#
par(par.user)
layout(mat=rbind(2,1),heights=c(5,1))
#------------------------------------------------------------------------------#
#---------------------------------------------------------------------------#
# First plot: legend. #
#---------------------------------------------------------------------------#
par(mar=c(0.1,4.6,0.1,2.1))
plot.new()
plot.window(xlim=c(0,1),ylim=c(0,1))
legend( x = "bottom"
, inset = 0.0
, legend = description
, col = lcolours
, lwd = llwd
, ncol = min(3,pretty.box(nlayers)$ncol)
, xpd = TRUE
)#end legend
#---------------------------------------------------------------------------#
#---------------------------------------------------------------------------#
# Main plot. #
#---------------------------------------------------------------------------#
par(mar=c(4.1,4.6,4.1,2.1))
plot.new()
plot.window(xlim=xlimit,ylim=ylimit,log=xylog)
axis(side=1,at=uplot$levels,labels=uplot$labels,padj=uplot$padj)
axis(side=2,las=1)
box()
title(main=letitre,xlab="Year",ylab=ley,cex.main=0.7)
if (drought.mark){
for (n in 1:ndrought){
rect(xleft = drought[[n]][1],ybottom = ydrought[1]
,xright = drought[[n]][2],ytop = ydrought[2]
,col = grid.colour,border=NA)
}#end for
}#end if
#----- Plot grid. ----------------------------------------------------------#
if (plotgrid){
abline(v=uplot$levels,h=axTicks(side=2),col=grid.colour,lty="solid")
}#end if
#----- Plot lines. ---------------------------------------------------------#
for (l in 1:nlayers){
thisvar = umean[[vnames[l]]]
thisvar = cbind(thisvar[,ndcycle],thisvar)
points(x=thisday,y=thisvar[pmon,],col=lcolours[l]
,lwd=llwd[l],type=ltype,pch=16)
}#end for
#---------------------------------------------------------------------------#
#----- Close the device. ---------------------------------------------------#
if (outform[o] == "x11"){
locator(n=1)
dev.off()
}else{
dev.off()
}#end if
dummy=clean.tmp()
#---------------------------------------------------------------------------#
} #end for outform
#------------------------------------------------------------------------------#
}#end for pmon
#---------------------------------------------------------------------------------#
}#end if plotit
#------------------------------------------------------------------------------------#
}#end for ntser
#---------------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------------#
# Plot the climatology of the soil properties. #
#---------------------------------------------------------------------------------------#
for (v in 1:nsoilplot){
#----- Retrieve variable information from the list. ---------------------------------#
thissoil = soilplot[[v]]
vnam = thissoil$vnam
description = thissoil$desc
unit = thissoil$unit
vcscheme = thissoil$csch
pnlog = thissoil$pnlog
plotit = thissoil$mmean
if (plotit){
#---------------------------------------------------------------------------------#
# Check if the directory exists. If not, create it. #
#---------------------------------------------------------------------------------#
outdir = paste(outpref,"soil_mmean",sep="/")
if (! file.exists(outdir)) dir.create(outdir)
cat(" + Climatology profile of ",description,"...","\n")
#----- Find the number of rows and columns, and the axes. ------------------------#
monaxis = sort(unique(datum$month))
soilaxis = slz
nmon = length(monaxis)
nsoil = nzg
#----- Save the meaningful months and years. -------------------------------------#
monat = 1:12
monlab = substring(month.abb,1,1)
#----- Convert the vector data into an array. ------------------------------------#
vararr = mmean[[vnam]]
#----- Copy Decembers ans Januaries to make the edges buffered. ------------------#
january = vararr[1,]
january = c(january,january[nzg],january[nzg])
december = vararr[12,]
december = c(december[1],december[1],december)
#----- Bind first and last year to the array, to make the edges buffered. ---------#
varbuff = cbind(vararr[,1],vararr,vararr[,nzg])
varbuff = rbind(december,varbuff,january)
#----------------------------------------------------------------------------------#
# Expand the month and year axes. Make the -------------------------------------------#
monaxis = c(min(monaxis)-1,monaxis,max(monaxis)+1)
soilaxis = -log(-1.0 * c( slz[1]*(slz[1]/slz[2])
, soilaxis
, slz[nzg]*(slz[nzg]/slz[nzg-1]) ))
if (pnlog){
vrange = range(varbuff,na.rm=TRUE)
vlevels = pretty.log(x=vrange,n=ncolsfc,forcelog=TRUE)
vnlev = length(vlevels)
}else{
vrange = range(varbuff,na.rm=TRUE)
vlevels = pretty(x=vrange,n=ncolsfc)
vnlev = length(vlevels)
}#end if
#----- Loop over formats. --------------------------------------------------------#
for (o in 1:nout){
fichier = paste(outdir,"/",vnam,"-",suffix,".",outform[o],sep="")
if(outform[o] == "x11"){
X11(width=size$width,height=size$height,pointsize=ptsz)
}else if(outform[o] == "png"){
png(filename=fichier,width=size$width*depth,height=size$height*depth
,pointsize=ptsz,res=depth)
}else if(outform[o] == "eps"){
postscript(file=fichier,width=size$width,height=size$height
,pointsize=ptsz,paper=size$paper)
}else if(outform[o] == "pdf"){
pdf(file=fichier,onefile=FALSE
,width=size$width,height=size$height,pointsize=ptsz,paper=size$paper)
}#end if
letitre = paste(description," - ",lieu,sep="")
ley = desc.unit(desc="Soil depth",unit=untab$m)
lacle = desc.unit(desc=NULL,unit=unit)
par(par.user)
sombreado(x=monaxis,y=soilaxis,z=varbuff,levels=vlevels,nlevels=vnlev
,colour.palette=get(vcscheme)
,plot.title=title(main=letitre,xlab="Month",ylab=ley,cex.main=0.7)
,key.title=title(main=lacle,cex.main=0.8)
,key.log=pnlog
,useRaster=TRUE
,plot.axes={axis(side=1,at=monat,labels=monlab)
axis(side=2,at=zat,labels=znice)
if (fcgrid){
abline(h=zat,v=monat,col=grid.colour,lty="dotted")
}#end if fcgrid
}#end plot.axes
)
if (outform[o] == "x11"){
locator(n=1)
dev.off()
}else{
dev.off()
}#end if
dummy = clean.tmp()
} #end for outform
}#end if plotit
}#end for (v in 1:nsoilplot)
#---------------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------------#
# Plot the climatology of the soil properties. #
#---------------------------------------------------------------------------------------#
for (sts in 1:nsoilplot){
#----- Retrieve variable information from the list. ---------------------------------#
thissoil = soilplot[[sts]]
vnam = thissoil$vnam
description = thissoil$desc
unit = thissoil$unit
vcscheme = thissoil$csch
pnlog = thissoil$pnlog
plotit = thissoil$emean
if (plotit){
#---------------------------------------------------------------------------------#
# Check if the directory exists. If not, create it. #
#---------------------------------------------------------------------------------#
outdir = paste(outpref,"soil_emean",sep="/")
if (! file.exists(outdir)) dir.create(outdir)
cat(" + Time series profile of ",description,"...","\n")
#----- Find the number of rows and columns, and the axes. ------------------------#
timeaxis = datum$tomonth
soilaxis = slz
nmon = length(timeaxis)
nsoil = nzg
#----- Convert the vector data into an array. ------------------------------------#
vararr = emean[[vnam]]
#----- Copy Decembers ans Januaries to make the edges buffered. ------------------#
first = vararr[1,]
first = c(first,first[nzg],first[nzg])
last = vararr[ntimes,]
last = c(last[1],last[1],last)
#----- Bind first and last year to the array, to make the edges buffered. --------#
varbuff = cbind(vararr[,1],vararr,vararr[,nzg])
varbuff = rbind(first,varbuff,last)
#---------------------------------------------------------------------------------#
# Expand the month and year axes. Make the first and last time equal time #
# steps. #
#---------------------------------------------------------------------------------#
dwhen = as.numeric(datum$tomonth[2]-datum$tomonth[1])
whenaxis = c(chron(as.numeric(datum$tomonth[1]-dwhen))
,timeaxis
,chron(as.numeric(datum$tomonth[ntimes]+dwhen)))
soilaxis = -log(-1.0 * c( slz[1]*(slz[1]/slz[2])
, soilaxis
, slz[nzg]*(slz[nzg]/slz[nzg-1]) ))
if (pnlog){
vrange = range(varbuff,na.rm=TRUE)
vlevels = pretty.log(x=vrange,n=ncolsfc,forcelog=TRUE)
vnlev = length(vlevels)
}else{
vrange = range(varbuff,na.rm=TRUE)
vlevels = pretty(x=vrange,n=ncolsfc)
vnlev = length(vlevels)
}#end if
#----- Loop over formats. --------------------------------------------------------#
for (o in 1:nout){
fichier = paste(outdir,"/",vnam,"-",suffix,".",outform[o],sep="")
if(outform[o] == "x11"){
X11(width=size$width,height=size$height,pointsize=ptsz)
}else if(outform[o] == "png"){
png(filename=fichier,width=size$width*depth,height=size$height*depth
,pointsize=ptsz,res=depth)
}else if(outform[o] == "eps"){
postscript(file=fichier,width=size$width,height=size$height
,pointsize=ptsz,paper=size$paper)
}else if(outform[o] == "pdf"){
pdf(file=fichier,onefile=FALSE
,width=size$width,height=size$height,pointsize=ptsz,paper=size$paper)
}#end if
letitre = paste(description," - ",lieu,sep="")
ley = desc.unit(desc="Soil depth",unit=untab$m)
lacle = desc.unit(desc=NULL,unit=unit)
par(par.user)
sombreado(x=whenaxis,y=soilaxis,z=varbuff,levels=vlevels,nlevels=vnlev
,colour.palette=get(vcscheme)
,plot.title=title(main=letitre,xlab="Month",ylab=ley,cex.main=0.7)
,key.title=title(main=lacle,cex.main=0.8)
,key.log=pnlog
,useRaster=TRUE
,plot.axes={axis(side=1,at=whenplot6$levels
,labels=whenplot6$labels,padj=whenplot6$padj)
axis(side=2,at=zat,labels=znice)
if (fcgrid){
abline(h=zat,v=whenplot6$levels,col=grid.colour
,lty="dotted")
}#end if fcgrid
}#end plot.axes
)#end sombreado
if (outform[o] == "x11"){
locator(n=1)
dev.off()
}else{
dev.off()
}#end if
dummy = clean.tmp()
} #end for outform
}#end if plotit
}#end for nhov
#---------------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------------#
# Plot a filled contour plot showing months and years. #
#---------------------------------------------------------------------------------------#
for (v in 1:nsqueeze){
#----- Retrieve variable information from the list. ---------------------------------#
thisfillc = squeeze[[v]]
vnam = thisfillc$vnam
description = thisfillc$desc
unit = thisfillc$unit
vcscheme = thisfillc$col.scheme
plotit = thisfillc$fco.mmean
#------------------------------------------------------------------------------------#
# Find the first and the last full years. These will be the actual first and #
# last year only if the years are complete, otherwise the first and the last year #
# will be taken out. #
#------------------------------------------------------------------------------------#
if (monthbeg == 1){
yearaa = yeara
}else{
yearaa = yeara + 1
}# end if
if (meszz == 12){
yearzz = yearz
}else{
yearzz = yearz - 1
}#end if
sel = datum$year >= yearaa & datum$year <= yearzz
twoyears = sum(sel) >= 24
if (plotit && twoyears){
#---------------------------------------------------------------------------------#
# Check if the directory exists. If not, create it. #
#---------------------------------------------------------------------------------#
outdir = paste(outpref,"fillc_mmean",sep="/")
if (! file.exists(outdir)) dir.create(outdir)
cat(" + ",description," time series in filled contour...","\n")
#----- Load this variable into "thisvar". ----------------------------------------#
thisvar = emean[[vnam]]
#----- Find the number of rows and columns, and the axes. ------------------------#
monaxis = sort(unique(datum$month[sel]))
yraxis = sort(unique(datum$year[sel]))
nmon = length(monaxis)
nyear = length(yraxis)
#----- Save the meaningful months and years. -------------------------------------#
monat = 1:12
monlab = c("J","F","M","A","M","J","J","A","S","O","N","D")
yrat = pretty(yraxis)
#----- Convert the vector data into an array. ------------------------------------#
vararr = array(thisvar[sel],c(nmon,nyear))
#----- Copy Decembers ans Januaries to make the edges buffered. ------------------#
january = vararr[1,]
january = c(january,january[nyear],january[nyear])
december = vararr[12,]
december = c(december[1],december[1],december)
#----- Bind first and last year to the array, to make the edges buffered. --------#
varbuff = cbind(vararr[,1],vararr,vararr[,nyear])
varbuff = rbind(december,varbuff,january)
#----- Expand the month and year axes. -------------------------------------------#
monaxis = c(min(monaxis)-1,monaxis,max(monaxis)+1)
yraxis = c(min(yraxis)-1,yraxis,max(yraxis)+1)
vrange = range(varbuff,na.rm=TRUE)
vlevels = pretty(x=vrange,n=ncolsfc)
vnlev = length(vlevels)
#----- Loop over formats. --------------------------------------------------------#
for (o in 1:nout){
fichier = paste(outdir,"/",vnam,"-",suffix,".",outform[o],sep="")
if(outform[o] == "x11"){
X11(width=size$width,height=size$height,pointsize=ptsz)
}else if(outform[o] == "png"){
png(filename=fichier,width=size$width*depth,height=size$height*depth
,pointsize=ptsz,res=depth)
}else if(outform[o] == "eps"){
postscript(file=fichier,width=size$width,height=size$height
,pointsize=ptsz,paper=size$paper)
}else if(outform[o] == "pdf"){
pdf(file=fichier,onefile=FALSE
,width=size$width,height=size$height,pointsize=ptsz,paper=size$paper)
}#end if
letitre = paste(description," - ",lieu,sep="")
lacle = desc.unit(desc=NULL,unit=unit)
par(par.user)
sombreado(x=monaxis,y=yraxis,z=varbuff,levels=vlevels,nlevels=vnlev
,colour.palette=get(vcscheme)
,plot.title=title(main=letitre,xlab="Month",ylab="Year",cex.main=0.7)
,key.title=title(main=lacle,cex.main=0.8)
,useRaster=TRUE
,plot.axes={axis(side=1,at=monat,labels=monlab)
axis(side=2,at=yrat)
if (fcgrid){
for (yl in yrat){
abline(h=yl,col=grid.colour,lty="dotted")
} #end for yl
for (ml in monat){
abline(v=ml,col=grid.colour,lty="dotted")
} #end for ml
}#end if fcgrid
}#end plot.axes
)
if (outform[o] == "x11"){
locator(n=1)
dev.off()
}else{
dev.off()
}#end if
dummy = clean.tmp()
} #end for outform
}#end if plotit
}#end for nhov
#---------------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------------#
# Plot the filled contour diagrams showing time of day and time. #
#---------------------------------------------------------------------------------------#
for (v in 1:nsqueeze){
#----- Retrieve variable information from the list. ---------------------------------#
thisfillc = squeeze[[v]]
vnam = thisfillc$vnam
description = thisfillc$desc
unit = thisfillc$unit
vcscheme = thisfillc$col.scheme
plotit = thisfillc$fco.qmean
if (plotit){
#---------------------------------------------------------------------------------#
# Check if the directory exists. If not, create it. #
#---------------------------------------------------------------------------------#
outdir = paste(outpref,"fillc_qmean",sep="/")
if (! file.exists(outdir)) dir.create(outdir)
cat(" + ",description," time series of diurnal cycle...","\n")
#----- Load this variable into "thisvar". ----------------------------------------#
vararr = qmean[[vnam]]
#----- Copy Decembers ans Januaries to make the edges buffered. ------------------#
firsthr = vararr[,1]
firsthr = c(firsthr,firsthr[ntimes],firsthr[ntimes])
lasthr = vararr[,ndcycle]
lasthr = c(lasthr[1],lasthr[1],lasthr)
#----- Bind first and last year to the array, to make the edges buffered. --------#
varbuff = rbind(vararr[1,],vararr,vararr[ntimes,])
varbuff = cbind(lasthr,varbuff,firsthr)
#----- Expand the month and year axes. -------------------------------------------#
hraxis = seq(from=0,to=ndcycle+1,by=1) * 24 / ndcycle
dwhen = datum$tomonth[2]-datum$tomonth[1]
whenaxis = c(datum$tomonth[1]-dwhen,datum$tomonth,datum$tomonth[ntimes]+dwhen)
huplot = pretty.time(whenaxis,n=8)
vrange = range(varbuff,na.rm=TRUE)
vlevels = pretty(x=vrange,n=ncolsfc)
vnlev = length(vlevels)
#----- Loop over formats. --------------------------------------------------------#
for (o in 1:nout){
fichier = paste(outdir,"/",vnam,"-",suffix,".",outform[o],sep="")
if(outform[o] == "x11"){
X11(width=size$width,height=size$height,pointsize=ptsz)
}else if(outform[o] == "png"){
png(filename=fichier,width=size$width*depth,height=size$height*depth
,pointsize=ptsz,res=depth)
}else if(outform[o] == "eps"){
postscript(file=fichier,width=size$width,height=size$height
,pointsize=ptsz,paper=size$paper)
}else if(outform[o] == "pdf"){
pdf(file=fichier,onefile=FALSE
,width=size$width,height=size$height,pointsize=ptsz,paper=size$paper)
}#end if
letitre = paste("Mean diurnal cycle \n ",description," - ",lieu,sep="")
ley = desc.unit(desc="Time of day",unit=untab$gmt)
lacle = desc.unit(desc=NULL ,unit=unit)
par(par.user)
sombreado(x=whenaxis,y=hraxis,z=varbuff,levels=vlevels,nlevels=vnlev
,colour.palette=get(vcscheme)
,plot.title=title(main=letitre,ylab=ley,xlab="Time",cex.main=0.7)
,key.title=title(main=lacle,cex.main=0.8)
,useRaster=TRUE
,plot.axes={axis(side=1,at=huplot$level,labels=huplot$labels)
axis(side=2,at=uplot$levels,labels=uplot$labels)
if (fcgrid){
abline(v=huplot$levels,h=uplot$levels
,col=grid.colour,lty="dotted")
}#end if fcgrid
}#end plot.axes
)
if (outform[o] == "x11"){
locator(n=1)
dev.off()
}else{
dev.off()
}#end if
dummy = clean.tmp()
} #end for outform
}#end if plotit
}#end for nhov
#---------------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------------#
# Plot the monthly boxplots. #
#---------------------------------------------------------------------------------------#
for (v in 1:nsqueeze){
#----- Retrieve variable information from the list. ---------------------------------#
thisbplot = squeeze[[v]]
vnam = thisbplot$vnam
description = thisbplot$desc
unit = thisbplot$unit
plotit = thisbplot$box.plot
if (plotit){
#---------------------------------------------------------------------------------#
# Check if the directory exists. If not, create it. #
#---------------------------------------------------------------------------------#
outdir = paste(outpref,"boxplot",sep="/")
if (! file.exists(outdir)) dir.create(outdir)
cat(" + ",description," box plot...","\n")
#----- Load this variable into "thisvar". ----------------------------------------#
thisvar = emean[[vnam]]
for (o in 1:nout){
fichier = paste(outdir,"/",vnam,"-",suffix,".",outform[o],sep="")
if (outform[o] == "x11"){
X11(width=size$width,height=size$height,pointsize=ptsz)
}else if(outform[o] == "png"){
png(filename=fichier,width=size$width*depth,height=size$height*depth
,pointsize=ptsz,res=depth)
}else if(outform[o] == "eps"){
postscript(file=fichier,width=size$width,height=size$height
,pointsize=ptsz,paper=size$paper)
}else if(outform[o] == "pdf"){
pdf(file=fichier,onefile=FALSE
,width=size$width,height=size$height,pointsize=ptsz,paper=size$paper)
}#end if
ylimit = pretty.xylim(u=thisvar,fracexp=0.0,is.log=FALSE)
letitre = paste(description,lieu,sep=" - ")
ley = desc.unit(desc=description,unit=unit)
par(par.user)
plot(mmonth,thisvar,main=letitre,ylim=ylimit,cex.main=0.7
,xlab="Time",ylab=ley,las=1)
if (outform[o] == "x11"){
locator(n=1)
dev.off()
}else{
dev.off()
}#end if
dummy = clean.tmp()
} #end for outform
}#end if
}#end for nbox
#---------------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------------#
# Plot the PDF of patch-level properties as a function of time. #
#---------------------------------------------------------------------------------------#
cat (" * Time series of PDF of properties by patch...","\n")
for (v in 1:nplotpatch){
#----- Retrieve variable information from the list. ---------------------------------#
thispatch = plotpatch[[v]]
vnam = thispatch$vnam
description = thispatch$desc
unit = thispatch$unit
vcscheme = thispatch$col.scheme
plog = thispatch$plog
plotit = thispatch$emean
this = patchpdf[[vnam]]$edensity
plotit = ( plotit && any(is.finite(this$x),na.rm=TRUE)
&& any(is.finite(this$y),na.rm=TRUE)
&& any(is.finite(this$z),na.rm=TRUE) )
#------------------------------------------------------------------------------------#
# Find levels, and expand PDF scale in case it is a constant. #
#------------------------------------------------------------------------------------#
if (plog){
vlevs = sort(unique(pretty.log(this$z,n=ncolsfc,forcelog=TRUE)))
}else{
vlevs = sort(unique(pretty(this$z,n=ncolsfc)))
}#end if
if (length(vlevs) == 1) vlevs = pretty.xylim(u=vlevs,fracexp=0.0,is.log=plog)
#------------------------------------------------------------------------------------#
if (plotit){
#---------------------------------------------------------------------------------#
# Check if the directory exists. If not, create it. #
#---------------------------------------------------------------------------------#
outdir = paste(outpref,"patch_emean",sep="/")
if (! file.exists(outdir)) dir.create(outdir)
cat(" + PDF plot of ",description,"...","\n")
#----- Loop over formats. --------------------------------------------------------#
for (o in 1:nout){
fichier = paste(outdir,"/",vnam,"-",suffix,".",outform[o],sep="")
if (outform[o] == "x11"){
X11(width=size$width,height=size$height,pointsize=ptsz)
}else if(outform[o] == "png"){
png(filename=fichier,width=size$width*depth,height=size$height*depth
,pointsize=ptsz,res=depth)
}else if(outform[o] == "eps"){
postscript(file=fichier,width=size$width,height=size$height
,pointsize=ptsz,paper=size$paper)
}else if(outform[o] == "pdf"){
pdf(file=fichier,onefile=FALSE
,width=size$width,height=size$height,pointsize=ptsz,paper=size$paper)
}#end if
letitre = paste("Density function of ",description," \ ",lieu,sep="")
lex = "Time"
ley = desc.unit(desc=description,unit=unit)
#------------------------------------------------------------------------------#
# Plot the PDF distribution. #
#------------------------------------------------------------------------------#
par(par.user)
sombreado( x = this$x
, y = this$y
, z = this$z
, levels = vlevs
, colour.palette = get(vcscheme)
, plot.title = title(main=letitre,xlab=lex,ylab=ley,cex.main=0.7)
, key.title = title(main="Density",cex.main=0.8)
, key.log = plog
, useRaster = TRUE
, plot.axes = { axis( side = 1
, at = whenplot8$levels
, labels = whenplot8$labels
, padj = whenplot8$padj
)#end axis
axis( side = 2
, at = pretty(this$y)
, labels = NULL
)#end axis
if (fcgrid){
abline( v = whenplot8$levels
, h = pretty(this$y)
, col = grid.colour
, lty = "dotted"
)#end abline
}#end if fcgrid
}#end plot.axes
)#end sombreado
#------------------------------------------------------------------------------#
#------------------------------------------------------------------------------#
# Close the device. #
#------------------------------------------------------------------------------#
if (outform[o] == "x11"){
locator(n=1)
dev.off()
}else{
dev.off()
}#end if
#------------------------------------------------------------------------------#
dummy = clean.tmp()
#------------------------------------------------------------------------------#
} #end for outform
#---------------------------------------------------------------------------------#
}#end if plotit
#------------------------------------------------------------------------------------#
}#end for (v in 1:npatchplot)
#---------------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------------#
# Plot the PDF of patch-level properties as a function of time. #
#---------------------------------------------------------------------------------------#
cat (" * Monthly PDF of properties by patch...","\n")
for (v in 1:nplotpatch){
#----- Retrieve variable information from the list. ---------------------------------#
thispatch = plotpatch[[v]]
vnam = thispatch$vnam
description = thispatch$desc
unit = thispatch$unit
vcscheme = thispatch$col.scheme
plog = thispatch$plog
plotit = thispatch$mmean
this = patchpdf[[vnam]]$mdensity
plotit = ( plotit && any(is.finite(this$x),na.rm=TRUE)
&& any(is.finite(this$y),na.rm=TRUE)
&& any(is.finite(this$z),na.rm=TRUE) )
if (plotit){
#---------------------------------------------------------------------------------#
# Check if the directory exists. If not, create it. #
#---------------------------------------------------------------------------------#
outdir = paste(outpref,"patch_mmean",sep="/")
if (! file.exists(outdir)) dir.create(outdir)
cat(" + PDF plot of ",description,"...","\n")
#----- Find the month tick marks. ------------------------------------------------#
monat = 1:12
monlab = c("J","F","M","A","M","J","J","A","S","O","N","D")
#---------------------------------------------------------------------------------#
#----- Loop over formats. --------------------------------------------------------#
for (o in 1:nout){
fichier = paste(outdir,"/",vnam,"-",suffix,".",outform[o],sep="")
if(outform[o] == "x11"){
X11(width=size$width,height=size$height,pointsize=ptsz)
}else if(outform[o] == "png"){
png(filename=fichier,width=size$width*depth,height=size$height*depth
,pointsize=ptsz,res=depth)
}else if(outform[o] == "eps"){
postscript(file=fichier,width=size$width,height=size$height
,pointsize=ptsz,paper=size$paper)
}else if(outform[o] == "pdf"){
pdf(file=fichier,onefile=FALSE
,width=size$width,height=size$height,pointsize=ptsz,paper=size$paper)
}#end if
letitre = paste("Density function of ",description," \ ",lieu,sep="")
lex = "Months"
ley = desc.unit(desc=description,unit=unit)
#------------------------------------------------------------------------------#
# Plot the PDF distribution. #
#------------------------------------------------------------------------------#
par(par.user)
sombreado( x = this$x
, y = this$y
, z = this$z
, nlevels = ncolsfc
, colour.palette = get(vcscheme)
, plot.title = title(main=letitre,xlab=lex,ylab=ley,cex.main=0.7)
, key.title = title(main="Density",cex.main=0.8)
, key.log = plog
, useRaster = TRUE
, plot.axes = { axis( side = 1
, at = monat
, labels = monlab
)#end axis
axis( side = 2
, at = pretty(this$y)
, labels = NULL
)#end axis
if (fcgrid){
abline( v = monat
, h = pretty(this$y)
, col = grid.colour
, lty = "dotted"
)#end abline
}#end if fcgrid
}#end plot.axes
)#end sombreado
#------------------------------------------------------------------------------#
#------------------------------------------------------------------------------#
# Close the device. #
#------------------------------------------------------------------------------#
if (outform[o] == "x11"){
locator(n=1)
dev.off()
}else{
dev.off()
}#end if
#------------------------------------------------------------------------------#
dummy = clean.tmp()
#------------------------------------------------------------------------------#
} #end for outform
#---------------------------------------------------------------------------------#
}#end if plotit
#------------------------------------------------------------------------------------#
}#end for (v in 1:npatchplot)
#---------------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------------#
# Bar plot by DBH class. #
#---------------------------------------------------------------------------------------#
cat(" + Bar plot by DBH classes...","\n")
monbplot = which(nummonths(datum$tomonth) %in% sasmonth)
nmonbplot = length(monbplot)
pftuse = which(apply(X=szpft$nplant,MARGIN=3,FUN=sum,na.rm=TRUE) > 0)
pftuse = pftuse[pftuse != (npft+1)]
npftuse = length(pftuse)
pftname.use = pft$name [pftuse]
pftcol.use = pft$colour[pftuse]
for (v in 1:ntspftdbh){
#----- Load settings for this variable.----------------------------------------------#
thisbar = tspftdbh[[v]]
vnam = thisbar$vnam
description = thisbar$desc
unit = thisbar$unit
stacked = thisbar$stack
plotit = thisbar$bar.plot
plog = thisbar$plog
if (plog){
xylog = "y"
stacked = FALSE
}else{
xylog = ""
}#end if
#------------------------------------------------------------------------------------#
#------------------------------------------------------------------------------------#
# Check whether to plot this
#------------------------------------------------------------------------------------#
if (plotit){
cat(" - ",description,"...","\n")
#---------------------------------------------------------------------------------#
# Retrieve the variable, and keep only the part that is usable. #
#---------------------------------------------------------------------------------#
thisvnam = szpft[[vnam]][monbplot,,]
thisvnam = thisvnam [,,pftuse]
thisvnam = thisvnam [,-(ndbh+1),]
thisvnam[is.na(thisvnam)] = 0.
thiswhen = datum$tomonth[monbplot]
#---------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------#
# Find the limits for the plots. We use the same axis so it is easier to #
# compare different times. #
#---------------------------------------------------------------------------------#
if (stacked){
ylimit = c(0,max(apply(X=thisvnam,MARGIN=c(1,2),FUN=sum,na.rm=TRUE)))
}else{
ylimit = range(x=thisvnam,na.rm=TRUE)
}#end if
ylimit = pretty.xylim(u=ylimit,fracexp=0.0,is.log=plog)
#---------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------#
# Check if the directory exists. If not, create it. #
#---------------------------------------------------------------------------------#
barplotdir = paste(outpref,"barplot_dbh",sep="/")
if (! file.exists(barplotdir)) dir.create(barplotdir)
outdir = paste(barplotdir,vnam,sep="/")
if (! file.exists(outdir)) dir.create(outdir)
#---------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------#
# Loop over all possible months. #
#---------------------------------------------------------------------------------#
for (m in 1:nmonbplot){
#----- Find which year we are plotting. ---------------------------------------#
cmonth = sprintf("%2.2i",(nummonths(thiswhen[m])))
cyear = sprintf("%4.4i",(numyears(thiswhen[m])))
mm = as.numeric(cmonth)
yy = as.numeric(cyear)
whentitle = paste(mon2mmm(mm,cap1=TRUE),cyear,sep="-")
#------------------------------------------------------------------------------#
#----- Loop over output formats. ----------------------------------------------#
for (o in 1:nout){
#------ Open the plot. -----------------------------------------------------#
fichier = paste(outdir,"/",vnam,"-",cyear,"-",cmonth,"-",suffix
,".",outform[o],sep="")
if (outform[o] == "x11"){
X11(width=size$width,height=size$height,pointsize=ptsz)
}else if(outform[o] == "png"){
png(filename=fichier,width=size$width*depth,height=size$height*depth
,pointsize=ptsz,res=depth)
}else if(outform[o] == "eps"){
postscript(file=fichier,width=size$width,height=size$height
,pointsize=ptsz,paper=size$paper)
}else if(outform[o] == "pdf"){
pdf(file=fichier,onefile=FALSE
,width=size$width,height=size$height,pointsize=ptsz,paper=size$paper)
}#end if
#---------------------------------------------------------------------------#
#------ Set up the title and axis labels. ----------------------------------#
letitre = paste(lieu,"\n",description," - Time : ",whentitle,sep="")
lexlab = "DBH Classes"
leylab = desc.unit(desc=description,unit=unit)
#---------------------------------------------------------------------------#
#------ Split window. ------------------------------------------------------#
par(par.user)
layout(mat=rbind(2,1),heights=c(5,1))
#---------------------------------------------------------------------------#
#------ Legend. ------------------------------------------------------------#
par(mar=c(0.1,4.6,0.1,2.1))
plot.new()
plot.window(xlim=c(0,1),ylim=c(0,1))
legend( x = "bottom"
, inset = 0.0
, legend = pftname.use
, fill = pftcol.use
, ncol = min(3,pretty.box(n.selpft)$ncol)
, title = expression(bold("Plant functional type"))
, cex = cex.ptsz
, bg = background
, xpd = TRUE
)#end legend
#---------------------------------------------------------------------------#
#----- Plot all monthly means together. ------------------------------------#
par(mar=c(4.1,4.6,4.1,2.1))
barplot(height=t(thisvnam[m,,]),names.arg=dbhnames[1:ndbh],width=1.0
,main=letitre,xlab=lexlab,ylab=leylab,ylim=ylimit,legend.text=FALSE
,beside=(! stacked),col=pftcol.use,log=xylog
,border=grey.fg,xpd=FALSE,cex.main=cex.main,las=1)
if (plotgrid & (! stacked)){
xgrid=0.5+(1:ndbh)*(1+npftuse)
abline(v=xgrid,col=grid.colour,lty="solid")
}#end if
box()
#---------------------------------------------------------------------------#
#---------------------------------------------------------------------------#
# Close the device. #
#---------------------------------------------------------------------------#
if (outform[o] == "x11"){
locator(n=1)
dev.off()
}else{
dev.off()
}#end if
dummy = clean.tmp()
#---------------------------------------------------------------------------#
} #end for outform
#------------------------------------------------------------------------------#
}#end for
#---------------------------------------------------------------------------------#
}#end if
#------------------------------------------------------------------------------------#
}#end for
#---------------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------------#
# Plot the 3-D size and age structure of various variables. #
#---------------------------------------------------------------------------------------#
for (v in 1:ntspftdbh){
#----- Retrieve variable information from the list. ---------------------------------#
thissas = tspftdbh[[v]]
vnam = thissas$vnam
description = thissas$desc
unit = thissas$i.unit
plotit = thissas$sas
plog = thissas$plog
#----- If this variable is to be plotted, then go through this if block. ------------#
if (plotit){
cat(" + Size and age structure plot: ",description,"...","\n")
#---------------------------------------------------------------------------------#
# Check if the directory exists. If not, create it. #
#---------------------------------------------------------------------------------#
sasdir = paste(outpref,"sas",sep="/")
if (! file.exists(sasdir)) dir.create(sasdir)
outdir = paste(sasdir,vnam,sep="/")
if (! file.exists(outdir)) dir.create(outdir)
#---------------------------------------------------------------------------------#
#----- Load this list into "thislist". -------------------------------------------#
varco = cohort[[vnam]]
#---------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------#
# Loop over all times. #
#---------------------------------------------------------------------------------#
for (ww in names(cohort$age)){
#----- Find which year we are plotting. ---------------------------------------#
cmonth = substring(ww,7,8)
thisyear = substring(ww,2,5)
mm = as.numeric(cmonth)
yy = as.numeric(thisyear)
#----- Retrieve variable list, age, DBH, and PFT for this year. ---------------#
ageww = cohort$age [[ww]]
if (any(ageww <= 0,na.rm=TRUE)){
minww = min(ageww,na.rm=TRUE)
ageww = ageww - minww + 0.01
}#end if
dbhww = cohort$dbh [[ww]]
pftww = cohort$pft [[ww]]
varww = varco [[ww]]
popww = cohort$nplant[[ww]] * cohort$area[[ww]]
#------------------------------------------------------------------------------#
# We only plot the SAS figures when the polygon is not an absolute desert. #
#------------------------------------------------------------------------------#
if (any (! is.na(varww))){
#---------------------------------------------------------------------------#
# Find the range. If the user wants the range to be fixed, then use #
# the global range, otherwise, simply use the range for this year. #
#---------------------------------------------------------------------------#
if (sasfixlimits){
xlimit = pretty.xylim(u=unlist(cohort$age),fracexp=0.0,is.log=TRUE )
ylimit = pretty.xylim(u=unlist(cohort$dbh),fracexp=0.0,is.log=FALSE)
zlimit = pretty.xylim(u=unlist(varco) ,fracexp=0.0,is.log=plog )
popmin = min (unlist(cohort$nplant * cohort$area), na.rm=TRUE)
popmax = max (unlist(cohort$nplant * cohort$area), na.rm=TRUE)
}else{
xlimit = pretty.xylim(u=ageww ,fracexp=0.0,is.log=TRUE )
ylimit = pretty.xylim(u=dbhww ,fracexp=0.0,is.log=FALSE)
zlimit = pretty.xylim(u=varww ,fracexp=0.0,is.log=plog )
popmin = min (popww ,na.rm=TRUE)
popmax = max (popww ,na.rm=TRUE)
}#end if
#---------------------------------------------------------------------------#
#----- Define the scale-dependent population size. -------------------------#
cexww = cexmin + (cexmax - cexmin) * log(popww/popmin) / log(popmax/popmin)
#---------------------------------------------------------------------------#
#----- Define the floor location. ------------------------------------------#
if ((zlimit[1] > 0) != (zlimit[2] > 0)){
floor3d = 0.
}else if (zlimit[1] > 0){
floor3d = zlimit[1]
}else{
floor3d = zlimit[2]
}#end if
#---------------------------------------------------------------------------#
#----- Define the grid information for the 3-D plot. -----------------------#
xlabels = pretty.log(xlimit,n=5)
ylabels = pretty(ylimit,n=5)
zlabels = if(plog){pretty.log(zlimit,n=5)}else{pretty(zlimit,n=5)}
xat = log(xlabels)
yat = ylabels
zat = if(plog){log(zlabels)}else{zlabels}
xlimit = range(x=xat)
ylimit = range(x=yat)
zlimit = range(x=zat)
xfloor = seq(from=xlimit[1],to=xlimit[2],length.out=16)
yfloor = seq(from=ylimit[1],to=ylimit[2],length.out=16)
zfloor = matrix(floor3d,nrow=length(xfloor),ncol=length(yfloor))
#---------------------------------------------------------------------------#
#----- Expand the lines to make the lollipops. -----------------------------#
ncohnow = length(varww)
ageww = rep(ageww,each=3)
dbhww = rep(dbhww,each=3)
pftww = rep(pftww,each=3)
varww = as.vector(rbind(rep(floor3d,times=ncohnow)
,varco[[ww]]
,rep(NA,times=ncohnow)))
xww = log(ageww)
yww = dbhww
zww = if(plog){log(varww)}else{varww}
pchww = rep(c(NA,16,NA),times=ncohnow)
cexww = rep(cexww,each=3)
colww = pft$colour[pftww]
pftin = sort(unique(cohort$pft[[ww]]))
colleg = pft$colour[pftin]
pftleg = pft$name [pftin]
#---------------------------------------------------------------------------#
#---------------------------------------------------------------------------#
# Plot annotation. #
#---------------------------------------------------------------------------#
letitre = paste(description," - ",lieu,
"\n Time :",mlist[mm],"/",thisyear,sep=" ")
lexlab = desc.unit(desc="Gap age",unit=untab$yr)
leylab = desc.unit(desc="DBH",unit=untab$cm)
lezlab = desc.unit(desc=description,unit=unit)
#---------------------------------------------------------------------------#
#----- Loop over output formats. -------------------------------------------#
for (o in 1:nout){
#----- Open file. -------------------------------------------------------#
fichier = paste(outdir,"/",vnam,"-",thisyear,"-",cmonth,"-",suffix
,".",outform[o],sep="")
if (outform[o] == "x11"){
X11(width=size$width,height=size$height,pointsize=ptsz)
}else if(outform[o] == "png"){
png(filename=fichier,width=size$width*depth,height=size$height*depth
,pointsize=ptsz,res=depth)
}else if(outform[o] == "eps"){
postscript(file=fichier,width=size$width,height=size$height
,pointsize=ptsz,paper=size$paper)
}else if(outform[o] == "pdf"){
pdf(file=fichier,onefile=FALSE
,width=size$width,height=size$height,pointsize=ptsz
,paper=size$paper)
}#end if
#------------------------------------------------------------------------#
#----- Split the domain into 2. -----------------------------------------#
par(par.user)
layout(mat=rbind(2,1),heights=c(5,1))
#------------------------------------------------------------------------#
#------------------------------------------------------------------------#
# Plot legend. #
#------------------------------------------------------------------------#
par(mar=c(0.1,0.1,0.1,0.1))
plot.new()
plot.window(xlim=c(0,1),ylim=c(0,1))
legend( x = "center"
, inset = 0.0
, legend = pftleg
, fill = colleg
, ncol = min(4,pretty.box(length(pftleg))$ncol)
, title = expression(bold("Plant functional type"))
, cex = cex.ptsz
, xpd = TRUE
)#end legend
#------------------------------------------------------------------------#
#------------------------------------------------------------------------#
# Plot the 3-D plot. #
#------------------------------------------------------------------------#
par(mar=c(1.1,1.1,4.1,1.1))
pout = perspx( x = xfloor
, y = yfloor
, z = zfloor
, xlim = xlimit
, ylim = ylimit
, zlim = zlimit
, theta = theta
, phi = phi
, col = gcol
, expand = expz
, ticktype = "detailed"
, border = NA
, shade = shade
, ltheta = ltheta
, main = letitre
, cex.main = 0.8*cex.ptsz
, axes = FALSE
)#end perspx
#----- Add axes. --------------------------------------------------------#
paxis3d(edge="X--",pmat=pout,at=xat,cex=0.9*cex.ptsz,labels=xlabels)
paxis3d(edge="Y--",pmat=pout,at=yat,cex=0.9*cex.ptsz,labels=ylabels)
paxis3d(edge="Z-+",pmat=pout,at=zat,cex=0.9*cex.ptsz,labels=zlabels)
mtext3d(edge="X--",pmat=pout,labels=lexlab,cex=cex.ptsz,srt=theta+90)
mtext3d(edge="Y--",pmat=pout,labels=leylab,cex=cex.ptsz,srt=theta)
mtext3d(edge="Z-+",pmat=pout,labels=lezlab,cex=cex.ptsz,srt=-75)
#------------------------------------------------------------------------#
#----- Add the cohorts. -------------------------------------------------#
lines (trans3d(x=xww,y=yww,z=zww,pmat=pout),type="l",col=grey.fg,lwd=2)
points(trans3d(x=xww,y=yww,z=zww,pmat=pout),type="p",pch=pchww
,col=colww,cex=cexww)
#------------------------------------------------------------------------#
#----- Close the device. ------------------------------------------------#
if (outform[o] == "x11"){
locator(n=1)
dev.off()
}else{
dev.off()
}#end if
dummy = clean.tmp()
#------------------------------------------------------------------------#
}#end for outform
#---------------------------------------------------------------------------#
}#end if is.na(varww)
#------------------------------------------------------------------------------#
}#end for nameco
#---------------------------------------------------------------------------------#
}#end if
#------------------------------------------------------------------------------------#
}#end for npsas
#---------------------------------------------------------------------------------------#
}#end for places
#==========================================================================================#
#==========================================================================================#
| /ED/Template/odyssey/Template/plot_monthly.r | no_license | femeunier/ED2 | R | false | false | 171,109 | r | #==========================================================================================#
#==========================================================================================#
# Leave these commands at the beginning. They will refresh the session. #
#------------------------------------------------------------------------------------------#
rm(list=ls())
graphics.off()
#==========================================================================================#
#==========================================================================================#
#==========================================================================================#
#==========================================================================================#
# Here is the user defined variable section. #
#------------------------------------------------------------------------------------------#
#----- Paths. -----------------------------------------------------------------------------#
here = "thispath" # Current directory.
there = "thatpath" # Directory where analyses/history are
srcdir = "thisrscpath" # Source directory.
outroot = "thisoutroot" # Directory for figures
#------------------------------------------------------------------------------------------#
#----- Time options. ----------------------------------------------------------------------#
monthbeg = thismontha # First month to use
yearbeg = thisyeara # First year to consider
yearend = thisyearz # Maximum year to consider
reload.data = TRUE # Should I reload partially loaded data?
sasmonth.short = c(2,5,8,11) # Months for SAS plots (short runs)
sasmonth.long = 5 # Months for SAS plots (long runs)
nyears.long = 15 # Runs longer than this are considered long runs.
n.density = 256 # Number of density points
#------------------------------------------------------------------------------------------#
#----- Name of the simulations. -----------------------------------------------------------#
myplaces = c("thispoly")
#------------------------------------------------------------------------------------------#
#----- Plot options. ----------------------------------------------------------------------#
outform = thisoutform # Formats for output file. Supported formats are:
# - "X11" - for printing on screen
# - "eps" - for postscript printing
# - "png" - for PNG printing
# - "pdf" - for PDF printing
depth = 96 # PNG resolution, in pixels per inch
paper = "letter" # Paper size, to define the plot shape
ptsz = 17 # Font size.
lwidth = 2.5 # Line width
plotgrid = TRUE # Should I plot the grid in the background?
sasfixlimits = FALSE # Use a fixed scale for size and age-structure
# plots? (FALSE will set a suitable scale for
# each plot)
fcgrid = TRUE # Include a grid on the filled contour plots?
ncolsfc = 80 # Target number of colours for filled contour.
hovgrid = TRUE # Include a grid on the Hovmoller plots?
legwhere = "topleft" # Where should I place the legend?
inset = 0.01 # Inset between legend and edge of plot region.
scalleg = 0.40 # Expand y limits by this relative amount to fit
# the legend
cex.main = 0.8 # Scale coefficient for the title
theta = 315. # Azimuth for perspective projection
phi = 30. # Vertical angle for perspective projection
ltheta = -210. # Azimuth angle for light
shade = 0.125 # Shade intensity
expz = 0.5 # Expansion factor for Z axis
cexmin = 0.5 # Minimum "head" size of the lollipop
cexmax = 3.0 # Maximum "head" size of the lollipop
ylnudge = 0.05 # Nudging factor for ylimit
ptype = "l" # Type of plot
ptyped = "p" # Type of plot
ptypeb = "o" # Type of plot
drought.mark = mydroughtmark # Put a background to highlight droughts?
drought.yeara = mydroughtyeara # First year that has drought
drought.yearz = mydroughtyearz # Last year that has drought
months.drought = mymonthsdrought # Months with drought
ibackground = mybackground # Background settings (check load_everything.r)
#------------------------------------------------------------------------------------------#
#------ Miscellaneous settings. -----------------------------------------------------------#
slz.min = -5.0 # The deepest depth that trees access water.
idbh.type = myidbhtype # Type of DBH class
# 1 -- Every 10 cm until 100cm; > 100cm
# 2 -- 0-10; 10-20; 20-35; 35-50; 50-70; > 70 (cm)
# 3 -- 0-10; 10-35; 35-55; > 55 (cm)
klight = myklight # Weighting factor for maximum carbon balance
corr.growth.storage = mycorrection # Correction factor to be applied to growth and
# storage respiration
#------------------------------------------------------------------------------------------#
#==========================================================================================#
#==========================================================================================#
#==========================================================================================#
#==========================================================================================#
#==========================================================================================#
#==========================================================================================#
# NO NEED TO CHANGE ANYTHING BEYOND THIS POINT UNLESS YOU ARE DEVELOPING THE CODE... #
#==========================================================================================#
#==========================================================================================#
#==========================================================================================#
#==========================================================================================#
#==========================================================================================#
#==========================================================================================#
#----- Loading some packages and scripts. -------------------------------------------------#
source(file.path(srcdir,"load.everything.r"))
#------------------------------------------------------------------------------------------#
#----- Set how many formats we must output. -----------------------------------------------#
outform = tolower(outform)
nout = length (outform)
#------------------------------------------------------------------------------------------#
#----- Avoid unecessary and extremely annoying beeps. -------------------------------------#
options(locatorBell=FALSE)
#------------------------------------------------------------------------------------------#
#----- Load observations. -----------------------------------------------------------------#
obsrfile = paste(srcdir,"LBA_MIP.v8.RData",sep="/")
load(file=obsrfile)
#------------------------------------------------------------------------------------------#
#----- Define plot window size ------------------------------------------------------------#
size = plotsize(proje=FALSE,paper=paper)
#------------------------------------------------------------------------------------------#
#---- Create the main output directory in case there is none. -----------------------------#
if (! file.exists(outroot)) dir.create(outroot)
#------------------------------------------------------------------------------------------#
#------------------------------------------------------------------------------------------#
# Big place loop starts here... #
#------------------------------------------------------------------------------------------#
for (place in myplaces){
#----- Retrieve default information about this place and set up some variables. --------#
thispoi = locations(where=place,here=there,yearbeg=yearbeg,yearend=yearend
,monthbeg=monthbeg)
inpref = thispoi$pathin
outmain = paste(outroot,place,sep="/")
outpref = paste(outmain,"monthly",sep="/")
lieu = thispoi$lieu
iata = thispoi$iata
suffix = thispoi$iata
yeara = thispoi$yeara
yearz = thispoi$yearz
meszz = thispoi$monz
#---------------------------------------------------------------------------------------#
#----- Create the directories in case they don't exist. --------------------------------#
if (! file.exists(outmain)) dir.create(outmain)
if (! file.exists(outpref)) dir.create(outpref)
#---------------------------------------------------------------------------------------#
#----- Decide how frequently the cohort-level variables should be saved. ---------------#
if (yearend - yearbeg + 1 <= nyears.long){
sasmonth = sasmonth.short
emean.line = TRUE
}else{
sasmonth = sasmonth.long
emean.line = FALSE
}#end if
#---------------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------------#
# Find the total number of months that can be loaded this time. #
#---------------------------------------------------------------------------------------#
ntimes = (yearz-yeara-1)*12+meszz+(12-monthbeg+1)
#---------------------------------------------------------------------------------------#
#----- Print a banner to entretain the user. -------------------------------------------#
cat(" + Post-processing output from ",lieu,"...","\n")
#---------------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------------#
# Make the RData file name, then we check whether we must read the files again #
# or use the stored RData. #
#---------------------------------------------------------------------------------------#
path.data = paste(here,place,"rdata_month",sep="/")
if (! file.exists(path.data)) dir.create(path.data)
ed22.rdata = paste(path.data,paste(place,"RData",sep="."),sep="/")
if (reload.data && file.exists(ed22.rdata)){
#----- Load the modelled dataset. ---------------------------------------------------#
cat(" - Loading previous session...","\n")
load(ed22.rdata)
tresume = datum$ntimes + 1
datum = update.monthly( new.ntimes = ntimes
, old.datum = datum
, montha = monthbeg
, yeara = yeara
, inpref = inpref
, slz.min = slz.min
)#end update.monthly
}else{
cat(" - Starting new session...","\n")
tresume = 1
datum = create.monthly( ntimes = ntimes
, montha = monthbeg
, yeara = yeara
, inpref = inpref
, slz.min = slz.min
)#end create.monthly
}#end if
#---------------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------------#
# Make a list with the time span of each drought so we can plot rectangles showing #
# the drought. #
#---------------------------------------------------------------------------------------#
drought = list()
year = drought.yeara
ndrought = length(months.drought)
n = 0
overyear = months.drought[1] > months.drought[ndrought]
for (year in seq(from=drought.yeara,to=drought.yearz-as.integer(overyear),by=1)){
n = n + 1
#----- Define the beginning and the end of the drought. -----------------------------#
month.start = months.drought[1]
month.end = 1 + (months.drought[ndrought] %% 12)
year.start = year
year.end = year + as.integer(month.end == 1) + 1
drought.whena = chron(dates=paste(month.start,1,year.start,sep="/"))
drought.whenz = chron(dates=paste(month.end ,1,year.end ,sep="/"))
drought[[n]] = c(drought.whena,drought.whenz)
}#end for
#----- ndrought becomes the number of blocks with drought. -----------------------------#
ndrought = length(drought)
#---------------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------------#
# Check whether we have anything to update. #
#---------------------------------------------------------------------------------------#
complete = tresume > ntimes
#---------------------------------------------------------------------------------------#
#----- Copy some dimensions to scalars. ------------------------------------------------#
nzg = datum$nzg
nzs = datum$nzs
ndcycle = datum$ndcycle
isoilflg = datum$isoilflg
slz = datum$slz
slxsand = datum$slxsand
slxclay = datum$slxclay
ntext = datum$ntext
soil.prop = datum$soil.prop
dslz = datum$dslz
soil.depth = datum$soil.depth
soil.dry = datum$soil.dry
soil.poro = datum$soil.poro
ka = datum$ka
kz = datum$kz
#---------------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------------#
# Loop over all times in case there is anything new to be read. #
#---------------------------------------------------------------------------------------#
if (! complete){
#------------------------------------------------------------------------------------#
# This function will read the files. #
#------------------------------------------------------------------------------------#
datum = read.q.files(datum=datum,ntimes=ntimes,tresume=tresume,sasmonth=sasmonth)
#------------------------------------------------------------------------------------#
#------ Save the data to the R object. ----------------------------------------------#
cat(" + Saving data to ",basename(ed22.rdata),"...","\n")
save(datum,file=ed22.rdata)
#------------------------------------------------------------------------------------#
}#end if (! complete)
#---------------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------------#
# Define a suitable scale for those time series that uses datum$tomonth... #
#---------------------------------------------------------------------------------------#
whenplot6 = pretty.time(datum$tomonth,n=6)
whenplot8 = pretty.time(datum$tomonth,n=8)
#---------------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------------#
# Define a suitable scale for diurnal cycle... #
#---------------------------------------------------------------------------------------#
thisday = seq(from=0,to=ndcycle,by=1) * 24 / ndcycle
uplot = list()
uplot$levels = c(0,4,8,12,16,20,24)
uplot$n = 7
uplot$scale = "hours"
uplot$padj = rep(0,times=uplot$n)
#---------------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------------#
# Define a suitable scale for soil profile layers... #
#---------------------------------------------------------------------------------------#
znice = -pretty.log(-slz,n=8)
znice = sort(c(znice,slz[1],slz[nzg]))
sel = znice >= slz[1] & znice <= slz[nzg]
znice = znice[sel]
zat = -log(-znice)
nznice = length(znice)
znice = sprintf("%.2f",znice)
#---------------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------------#
# Define a suitable scale for monthly means... #
#---------------------------------------------------------------------------------------#
montmont = seq(from=1,to=12,by=1)
mplot = list()
mplot$levels = montmont
mplot$labels = capwords(mon2mmm(montmont))
mplot$n = 12
mplot$scale = "months"
mplot$padj = rep(0,times=mplot$n)
#---------------------------------------------------------------------------------------#
#----- Make some shorter versions of some variables. -----------------------------------#
mfac = datum$month
emean = datum$emean
emsqu = datum$emsqu
qmean = datum$qmean
qmsqu = datum$qmsqu
szpft = datum$szpft
lu = datum$lu
patch = datum$patch
cohort = datum$cohort
#---------------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------------#
# Find the mean and standard deviation. For variables for which we did not track #
# the mean sum of squares, the standard deviation is just the standard deviation of the #
# means, otherwise we convert the mean sum of squares: #
# ____________________ _____________________________________ #
# / SUM_i[X_i - Xm]^2 / / SUM_i[X_i^2] \ 1 #
# sigma = \ / ------------------ = \ / | ------------ - Xm^2 | --------- #
# \/ N - 1 \/ \ N / 1 - 1/N #
# #
# srnonm1 is the square root of 1 / (1 - 1/N) #
# Find the standard deviation. #
#---------------------------------------------------------------------------------------#
cat (" - Finding the monthly means...","\n")
srnorm1 = sqrt(1./(1. - 1. / datum$montable))
srnorm1[!is.finite(srnorm1)] = 0.
mmean = list()
msdev = list()
for (vname in names(emean)){
if (vname %in% names(emsqu)){
has.emsqu = any(is.finite(emsqu[[vname]]))
}else{
has.emsqu = FALSE
}#end if
#------------------------------------------------------------------------------------#
# Soil variables are multi-dimensional. Use qapply. Otherwise, check whether #
# the mean sum of squares is available or not. #
#------------------------------------------------------------------------------------#
if (vname %in% c("soil.temp","soil.water","soil.mstpot","soil.extracted")){
mmean[[vname]] = qapply(X=emean[[vname]], INDEX=mfac, DIM=1, FUN=mean, na.rm=TRUE)
msdev[[vname]] = qapply(X=emean[[vname]], INDEX=mfac, DIM=1, FUN=sd , na.rm=TRUE)
}else if (has.emsqu){
mmean[[vname]] = tapply(X=emean[[vname]], INDEX=mfac, FUN=mean, na.rm=TRUE)
mmsqu = tapply(X=emsqu[[vname]], INDEX=mfac, FUN=mean, na.rm=TRUE)
msdev[[vname]] = sqrt ( mmsqu - mmean[[vname]]^ 2 ) * srnorm1
}else{
mmean[[vname]] = tapply(X=emean[[vname]], INDEX=mfac, FUN=mean, na.rm=TRUE)
msdev[[vname]] = tapply(X=emean[[vname]], INDEX=mfac, FUN=sd , na.rm=TRUE)
}#end if
#------------------------------------------------------------------------------------#
#----- Fix the bad data. ------------------------------------------------------------#
bad.mmean = ! is.finite(mmean[[vname]])
bad.msdev = ! is.finite(msdev[[vname]])
mmean[[vname]][bad.mmean] = NA
msdev[[vname]][bad.msdev] = 0.
#------------------------------------------------------------------------------------#
}#end for
#---------------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------------#
# Here we find the Mean diurnal cycle for each month, then compute the standard #
# deviation, similar to the monthly mean. #
#---------------------------------------------------------------------------------------#
cat (" - Aggregating the monthly mean of the diurnal cycle...","\n")
umean = list()
usdev = list()
for (vname in names(qmean)){
#------------------------------------------------------------------------------------#
# Soil variables are multi-dimensional. Use qapply. Otherwise, check whether #
# the mean sum of squares is available or not. #
#------------------------------------------------------------------------------------#
if (vname %in% names(qmsqu)){
umean[[vname]] = qapply(X=qmean[[vname]], INDEX=mfac, DIM=1,FUN=mean, na.rm=TRUE)
umsqu = qapply(X=qmsqu[[vname]], INDEX=mfac, DIM=1,FUN=mean, na.rm=TRUE)
usdev[[vname]] = sqrt ( umsqu - umean[[vname]]^ 2 ) * srnorm1
}else{
umean[[vname]] = qapply(X=qmean[[vname]], INDEX=mfac, DIM=1,FUN=mean, na.rm=TRUE)
usdev[[vname]] = qapply(X=qmean[[vname]], INDEX=mfac, DIM=1,FUN=sd , na.rm=TRUE)
}#end if
#------------------------------------------------------------------------------------#
#----- Fix the bad data. ------------------------------------------------------------#
bad.umean = ! is.finite(umean[[vname]])
bad.usdev = ! is.finite(usdev[[vname]])
umean[[vname]][bad.umean] = NA
usdev[[vname]][bad.usdev] = 0.
#------------------------------------------------------------------------------------#
}#end for
#---------------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------------#
# Estimate NPP and NEE standard deviation. #
#---------------------------------------------------------------------------------------#
usdev$nee = sqrt(usdev$cflxca^2 + usdev$cflxst^2 )
usdev$reco = sqrt(usdev$plant.resp^2 + usdev$het.resp^2 )
usdev$evap = sqrt(usdev$wflxgc^2 + usdev$wflxlc^2 + usdev$wflxwc^2 )
#---------------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------------#
# Remove all elements of the DBH/PFT class that do not have a single valid cohort #
# at any given time. #
#---------------------------------------------------------------------------------------#
empty = is.na(szpft$nplant) | szpft$nplant == 0
for (vname in names(szpft)) szpft[[vname]][empty] = NA
#---------------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------------#
# Replace the mortality and recruitment exponential rates by the "interests" rates. #
#---------------------------------------------------------------------------------------#
szpft$mort = 100. * (1.0 - exp(- szpft$mort ) )
szpft$dimort = 100. * (1.0 - exp(- szpft$dimort ) )
szpft$ncbmort = 100. * (1.0 - exp(- szpft$ncbmort ) )
szpft$recrpft = 100. * ( exp( szpft$recr ) - 1.0)
szpft$agb.mort = 100. * (1.0 - exp(- szpft$agb.mort ) )
szpft$agb.dimort = 100. * (1.0 - exp(- szpft$agb.dimort ) )
szpft$agb.ncbmort = 100. * (1.0 - exp(- szpft$agb.ncbmort ) )
szpft$agb.recrpft = 100. * ( exp( szpft$agb.recr ) - 1.0)
szpft$bsa.mort = 100. * (1.0 - exp(- szpft$bsa.mort ) )
szpft$bsa.dimort = 100. * (1.0 - exp(- szpft$bsa.dimort ) )
szpft$bsa.ncbmort = 100. * (1.0 - exp(- szpft$bsa.ncbmort ) )
szpft$bsa.recrpft = 100. * ( exp( szpft$bsa.recr ) - 1.0)
#---------------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------------#
# Find the patch density function for all patch-level data. #
#---------------------------------------------------------------------------------------#
cat (" - Finding the distribution function of patch properties...","\n")
patchpdf = list()
for (pp in 1:nplotpatch){
this = plotpatch[[pp]]
vname = this$vnam
col.scheme = get(this$col.scheme)(n=ncolsfc)
emean.area = patch$area
emean.vname = patch[[vname]]
mmean.area = tapply(X=emean.area ,INDEX=mfac,FUN=unlist)
mmean.vname = tapply(X=emean.vname,INDEX=mfac,FUN=unlist)
#----- Find the range for which we find the density function. -----------------------#
low.vname = min(unlist(emean.vname),na.rm=TRUE)
high.vname = max(unlist(emean.vname),na.rm=TRUE)
#------------------------------------------------------------------------------------#
#----- Find the density function for each time. -------------------------------------#
edfun.now = mapply( FUN = density.safe
, x = emean.vname
, weights = emean.area
, MoreArgs = list(n=n.density,from=low.vname,to=high.vname)
)#end mapply
mdfun.now = mapply( FUN = density.safe
, x = mmean.vname
, weights = mmean.area
, MoreArgs = list(n=n.density,from=low.vname,to=high.vname)
)#end mapply
#------------------------------------------------------------------------------------#
#----- Save the density function. ---------------------------------------------------#
edfun = list()
edfun$x = chron(datum$when)
edfun$y = seq(from=low.vname,to=high.vname,length.out=n.density)
edfun$z = t(sapply(X=edfun.now["y",],FUN=cbind))
#------------------------------------------------------------------------------------#
#----- Save the density function. ---------------------------------------------------#
mdfun = list()
mdfun$x = montmont
mdfun$y = seq(from=low.vname,to=high.vname,length.out=n.density)
mdfun$z = t(sapply(X=mdfun.now["y",],FUN=cbind))
#------------------------------------------------------------------------------------#
#----- Remove tiny values (even with log scale values can be very hard to see. ------#
bye = is.finite(edfun$z) & edfun$z < 1.e-6 * max(unlist(edfun$z),na.rm=TRUE)
edfun$z[bye] = NA
#------------------------------------------------------------------------------------#
#----- Remove tiny values (even with log scale values can be very hard to see. ------#
bye = is.finite(mdfun$z) & mdfun$z < 1.e-6 * max(unlist(mdfun$z),na.rm=TRUE)
mdfun$z[bye] = NA
#------------------------------------------------------------------------------------#
patchpdf[[vname]] = list(edensity=edfun,mdensity=mdfun)
}#end for
#---------------------------------------------------------------------------------------#
#----- Find which PFTs, land uses and transitions we need to consider ------------------#
pftave = apply( X = szpft$agb[,ndbh+1,]
, MARGIN = 2
, FUN = mean
, na.rm = TRUE
)#end apply
luave = apply( X = lu$agb
, MARGIN = 2
, FUN = mean
, na.rm = TRUE
)#end apply
distave = apply(X=lu$dist,MARGIN=c(2,3),FUN=mean)
selpft = is.finite(pftave ) & pftave > 0.
sellu = is.finite(luave ) & luave > 0.
seldist = is.finite(distave) & distave > 0.
n.selpft = sum(selpft )
n.sellu = sum(sellu )
n.seldist = sum(seldist)
#---------------------------------------------------------------------------------------#
#=======================================================================================#
#=======================================================================================#
#=======================================================================================#
# Plotting section begins here... #
#---------------------------------------------------------------------------------------#
cat (" - Plotting figures...","\n")
#---------------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------------#
# Time series by PFT. #
#---------------------------------------------------------------------------------------#
for (v in 1:ntspftdbh){
thistspft = tspftdbh[[v]]
vnam = thistspft$vnam
description = thistspft$desc
unit = thistspft$e.unit
plog = thistspft$plog
plotit = thistspft$pft
#----- Check whether the user wants to have this variable plotted. ------------------#
if (plotit && any(selpft)){
#---------------------------------------------------------------------------------#
# Check whether the time series directory exists. If not, create it. #
#---------------------------------------------------------------------------------#
outdir = paste(outpref,"tspft",sep="/")
if (! file.exists(outdir)) dir.create(outdir)
cat(" + ",description," time series for all PFTs...","\n")
#----- Load variable -------------------------------------------------------------#
thisvar = szpft[[vnam]][,ndbh+1,]
if (plog){
#----- Eliminate non-positive values in case it is a log plot. ----------------#
thisvar[thisvar <= 0] = NA
}#end if
#---------------------------------------------------------------------------------#
#----- Loop over output formats. -------------------------------------------------#
for (o in 1:nout){
#----- Open file. -------------------------------------------------------------#
fichier = paste(outdir,"/",vnam,"-",suffix,".",outform[o],sep="")
if(outform[o] == "x11"){
X11(width=size$width,height=size$height,pointsize=ptsz)
}else if(outform[o] == "png"){
png(filename=fichier,width=size$width*depth,height=size$height*depth
,pointsize=ptsz,res=depth)
}else if(outform[o] == "eps"){
postscript(file=fichier,width=size$width,height=size$height
,pointsize=ptsz,paper=size$paper)
}else if(outform[o] == "pdf"){
pdf(file=fichier,onefile=FALSE
,width=size$width,height=size$height,pointsize=ptsz,paper=size$paper)
}#end if
#------------------------------------------------------------------------------#
#------------------------------------------------------------------------------#
# Find the limit, make some room for the legend, and in case the field is #
# a constant, nudge the limits so the plot command will not complain. #
#------------------------------------------------------------------------------#
xlimit = pretty.xylim(u=as.numeric(datum$tomonth),fracexp=0.0,is.log=FALSE)
ylimit = pretty.xylim(u=thisvar[,selpft] ,fracexp=0.0,is.log=plog )
if (plog){
xylog = "y"
ydrought = c( exp(ylimit[1] * sqrt(ylimit[1]/ylimit[2]))
, exp(ylimit[2] * sqrt(ylimit[2]/ylimit[1]))
)#end c
}else{
xylog = ""
ydrought = c(ylimit[1] - 0.5 * diff(ylimit), ylimit[2] + 0.5 * diff(ylimit))
}#end if
#------------------------------------------------------------------------------#
#----- Plot settings. ---------------------------------------------------------#
letitre = paste(description,lieu,sep=" - ")
ley = desc.unit(desc=description,unit=unit)
cols = pft$colour[selpft]
legs = pft$name [selpft]
#------------------------------------------------------------------------------#
#------------------------------------------------------------------------------#
# Split the plot into two windows. #
#------------------------------------------------------------------------------#
par(par.user)
layout(mat=rbind(2,1),heights=c(5,1))
#------------------------------------------------------------------------------#
#------------------------------------------------------------------------------#
# First plot: legend. #
#------------------------------------------------------------------------------#
par(mar=c(0.1,4.6,0.1,2.1))
plot.new()
plot.window(xlim=c(0,1),ylim=c(0,1))
legend( x = "bottom"
, inset = 0.0
, legend = legs
, col = cols
, lwd = lwidth
, ncol = min(pretty.box(n.selpft)$ncol,3)
, title = expression(bold("Plant Functional Type"))
)#end legend
#------------------------------------------------------------------------------#
#------------------------------------------------------------------------------#
# Main plot. #
#------------------------------------------------------------------------------#
par(mar=c(4.1,4.6,4.1,2.1))
plot.new()
plot.window(xlim=xlimit,ylim=ylimit,log=xylog)
axis(side=1,at=whenplot8$levels,labels=whenplot8$labels,padj=whenplot8$padj)
axis(side=2,las=1)
box()
title(main=letitre,xlab="Year",ylab=ley,cex.main=0.7)
if (drought.mark){
for (n in 1:ndrought){
rect(xleft = drought[[n]][1],ybottom = ydrought[1]
,xright = drought[[n]][2],ytop = ydrought[2]
,col = grid.colour,border=NA)
}#end for
}#end if
#----- Plot grid. -------------------------------------------------------------#
if (plotgrid){
abline(v=whenplot8$levels,h=axTicks(side=2),col=grid.colour,lty="solid")
}#end if
#----- Plot lines. ------------------------------------------------------------#
for (n in 1:(npft+1)){
if (selpft[n]){
lines(datum$tomonth,thisvar[,n],type="l",col=pft$colour[n],lwd=lwidth)
}#end if
}#end for
#------------------------------------------------------------------------------#
#----- Close the device. ------------------------------------------------------#
if (outform[o] == "x11"){
locator(n=1)
dev.off()
}else{
dev.off()
}#end if
dummy=clean.tmp()
#------------------------------------------------------------------------------#
} #end for outform
}#end if (tseragbpft)
} #end for tseries
#---------------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------------#
# Time series by DBH, by PFT. #
#---------------------------------------------------------------------------------------#
#----- Find the PFTs to plot. ----------------------------------------------------------#
pftuse = which(apply(X=szpft$nplant,MARGIN=3,FUN=sum,na.rm=TRUE) > 0)
pftuse = pftuse[pftuse != (npft+1)]
for (v in 1:ntspftdbh){
thistspftdbh = tspftdbh[[v]]
vnam = thistspftdbh$vnam
description = thistspftdbh$desc
unit = thistspftdbh$e.unit
plog = thistspftdbh$plog
plotit = thistspftdbh$pftdbh
#----- Load variable ----------------------------------------------------------------#
thisvar = szpft[[vnam]]
if (plog){
xylog="y"
badlog = is.finite(thisvar) & thisvar <= 0
thisvar[badlog] = NA
}else{
xylog=""
}#end if
#----- Check whether the user wants to have this variable plotted. ------------------#
if (plotit && length(pftuse) > 0 && any(is.finite(thisvar))){
#---------------------------------------------------------------------------------#
# Check whether the time series directory exists. If not, create it. #
#---------------------------------------------------------------------------------#
outdir = paste(outpref,"tsdbh",sep="/")
if (! file.exists(outdir)) dir.create(outdir)
outvar = paste(outdir,vnam,sep="/")
if (! file.exists(outvar)) dir.create(outvar)
#---------------------------------------------------------------------------------#
cat(" + ",description," time series for DBH class...","\n")
#---------------------------------------------------------------------------------#
# Find the limit, make some room for the legend, and in case the field is a #
# constant, nudge the limits so the plot command will not complain. #
#---------------------------------------------------------------------------------#
xlimit = pretty.xylim(u=as.numeric(datum$tomonth),fracexp=0.0,is.log=FALSE)
ylimit = pretty.xylim(u=thisvar[,,pftuse] ,fracexp=0.0,is.log=plog )
if (plog){
xylog = "y"
ydrought = c( exp(ylimit[1] * sqrt(ylimit[1]/ylimit[2]))
, exp(ylimit[2] * sqrt(ylimit[2]/ylimit[1]))
)#end c
}else{
xylog = ""
ydrought = c(ylimit[1] - 0.5 * diff(ylimit), ylimit[2] + 0.5 * diff(ylimit))
}#end if
#---------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------#
# Loop over plant functional types. #
#---------------------------------------------------------------------------------#
for (p in pftuse){
pftlab = paste("pft-",sprintf("%2.2i",p),sep="")
cat(" - ",pft$name[p],"\n")
#----- Loop over output formats. ----------------------------------------------#
for (o in 1:nout){
#----- Open file. ----------------------------------------------------------#
fichier = paste(outvar,"/",vnam,"-",pftlab,"-",suffix,".",outform[o],sep="")
if(outform[o] == "x11"){
X11(width=size$width,height=size$height,pointsize=ptsz)
}else if(outform[o] == "png"){
png(filename=fichier,width=size$width*depth,height=size$height*depth
,pointsize=ptsz,res=depth)
}else if(outform[o] == "eps"){
postscript(file=fichier,width=size$width,height=size$height
,pointsize=ptsz,paper=size$paper)
}else if(outform[o] == "pdf"){
pdf(file=fichier,onefile=FALSE
,width=size$width,height=size$height,pointsize=ptsz,paper=size$paper)
}#end if
#---------------------------------------------------------------------------#
#----- Plot annotation. ---------------------------------------------------#
letitre = paste(description,pft$name[p],lieu,sep=" - ")
ley = desc.unit(desc=description,unit=unit)
#---------------------------------------------------------------------------#
#---------------------------------------------------------------------------#
# Split the plot into two windows. #
#---------------------------------------------------------------------------#
par(par.user)
layout(mat=rbind(2,1),heights=c(5,1))
#---------------------------------------------------------------------------#
#---------------------------------------------------------------------------#
# First plot: legend. #
#---------------------------------------------------------------------------#
par(mar=c(0.1,4.6,0.1,2.1))
plot.new()
plot.window(xlim=c(0,1),ylim=c(0,1))
legend( x = "bottom"
, inset = 0.0
, bg = background
, legend = dbhnames
, col = dbhcols
, ncol = min(pretty.box(ndbh+1)$ncol,3)
, title = expression(bold("DBH class"))
, lwd = lwidth
)#end legend
#---------------------------------------------------------------------------#
#---------------------------------------------------------------------------#
# Main plot. #
#---------------------------------------------------------------------------#
par(mar=c(4.1,4.6,4.1,2.1))
plot.new()
plot.window(xlim=xlimit,ylim=ylimit,log=xylog)
axis(side=1,at=whenplot8$levels,labels=whenplot8$labels,padj=whenplot8$padj)
axis(side=2,las=1)
box()
title(main=letitre,xlab="Year",ylab=ley,cex.main=0.7)
if (drought.mark){
for (n in 1:ndrought){
rect(xleft = drought[[n]][1],ybottom = ydrought[1]
,xright = drought[[n]][2],ytop = ydrought[2]
,col = grid.colour,border=NA)
}#end for
}#end if
#----- Plot grid. ----------------------------------------------------------#
if (plotgrid){
abline(v=whenplot8$levels,h=axTicks(side=2),col=grid.colour,lty="solid")
}#end if
#----- Plot lines. ---------------------------------------------------------#
for (d in seq(from=1,to=ndbh+1,by=1)){
lines(datum$tomonth,thisvar[,d,p],type="l",col=dbhcols[d],lwd=lwidth)
}#end for
#---------------------------------------------------------------------------#
#----- Close the device. ---------------------------------------------------#
if (outform[o] == "x11"){
locator(n=1)
dev.off()
}else{
dev.off()
}#end if
dummy=clean.tmp()
#---------------------------------------------------------------------------#
}#end for outform
#------------------------------------------------------------------------------#
}#end for (p in pftuse)
#---------------------------------------------------------------------------------#
}#end if (tseragbpft)
#------------------------------------------------------------------------------------#
} #end for tseries
#---------------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------------#
# Plot the comparison between observations and model. #
#---------------------------------------------------------------------------------------#
cat(" + Comparisons of time series (model vs. observations)...","\n")
for (cc in 1:ncompmodel){
#----- Retrieve variable information from the list. ---------------------------------#
compnow = compmodel[[cc]]
vname = compnow$vnam
description = compnow$desc
unit = compnow$unit
lcolours = compnow$colour
llwd = compnow$lwd
llwd = compnow$lwd
ltype = compnow$type
plog = compnow$plog
legpos = compnow$legpos
plotit = compnow$emean
#----- Check whether there are observations for this particular site. ---------------#
if (iata == "mao" | iata == "bdf"){
obsnow = "obs.m34"
}else if(iata == "stm"){
obsnow = "obs.s67"
}else if(iata == "rao"){
obsnow = "obs.pdg"
}else if(iata == "jpr"){
obsnow = "obs.fns"
}else if(iata == "btr"){
obsnow = "obs.s77"
}else{
obsnow = paste("obs.",iata,sep="")
}#end if
#------------------------------------------------------------------------------------#
# Last check to see if we should plot it or not. #
#------------------------------------------------------------------------------------#
plotit = plotit && obsnow %in% ls()
if (plotit){
thisobs = get(obsnow)
obswhen = thisobs$tomonth
sel = datum$tomonth >= min(obswhen) & datum$tomonth <= max(obswhen)
plotit = any(sel)
}#end if
#------------------------------------------------------------------------------------#
#------------------------------------------------------------------------------------#
# Enter here only if there is any overlap of time between observations and #
# model. #
#------------------------------------------------------------------------------------#
if (plotit){
#---------------------------------------------------------------------------------#
# Copy the observations to a scratch variable. #
#---------------------------------------------------------------------------------#
mnvar = paste("emean",vname,sep=".")
obsmean = thisobs[[mnvar]]
#---------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------#
# Check whether the time series directory exists. If not, create it. #
#---------------------------------------------------------------------------------#
outdir = paste(outpref,"compemean",sep="/")
if (! file.exists(outdir)) dir.create(outdir)
cat(" - ",description," comparison...","\n")
#---------------------------------------------------------------------------------#
#----- Define the number of layers. ----------------------------------------------#
thiswhen = datum$tomonth [sel]
thismean = emean[[vname]][sel]
#---------------------------------------------------------------------------------#
#----- Find the plot range. ------------------------------------------------------#
xlimit = range(thiswhen)
ylimit = pretty.xylim(u=c(thismean,obsmean),fracexp=0.0,is.log=FALSE)
#---------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------#
# Find the nice scale for time. #
#---------------------------------------------------------------------------------#
whenplote = pretty.time(obswhen,n=8)
#---------------------------------------------------------------------------------#
#----- Plot annotation. ----------------------------------------------------------#
letitre = paste(description," - ",lieu,"\n","Monthly mean",sep="")
ley = desc.unit(desc=description,unit=unit)
#---------------------------------------------------------------------------------#
#----- Loop over formats. --------------------------------------------------------#
for (o in 1:nout){
fichier = paste(outdir,"/",vname,".",outform[o],sep="")
if(outform[o] == "x11"){
X11(width=size$width,height=size$height,pointsize=ptsz)
}else if(outform[o] == "png"){
png(filename=fichier,width=size$width*depth,height=size$height*depth
,pointsize=ptsz,res=depth)
}else if(outform[o] == "eps"){
postscript(file=fichier,width=size$width,height=size$height
,pointsize=ptsz,paper=size$paper)
}else if(outform[o] == "pdf"){
pdf(file=fichier,onefile=FALSE
,width=size$width,height=size$height,pointsize=ptsz,paper=size$paper)
}#end if
#----- Split window into two. -------------------------------------------------#
par(par.user)
layout(mat=rbind(2,1),heights=c(5,1))
#------------------------------------------------------------------------------#
#----- First plot: the legend. ------------------------------------------------#
par(mar=c(0.1,4.6,0.1,2.1))
plot.new()
plot.window(xlim=c(0,1),ylim=c(0,1))
legend(x="bottom",inset=0.0,legend=c("Model","Observation")
,col=lcolours,lwd=llwd,cex=cex.ptsz,pch=16,xpd=TRUE)
#------------------------------------------------------------------------------#
#----- Second panel: the actual plot. -----------------------------------------#
par(mar=c(4.1,4.6,4.1,2.1))
plot.new()
plot.window(xlim=xlimit,ylim=ylimit,log=plog)
if (plotgrid){
abline(v=whenplote$levels,h=axTicks(side=2),col=grid.colour,lty="solid")
}#end if
points(x=thiswhen,y=thismean,col=lcolours[1],lwd=llwd[1],type=ltype
,pch=16,cex=1.0)
points(x=obswhen,y=obsmean ,col=lcolours[2],lwd=llwd[2],type=ltype
,pch=16,cex=1.0)
box()
axis(side=1,at=whenplote$levels,labels=whenplote$labels,padj=whenplote$padj)
axis(side=2,las=1)
title(main=letitre,xlab="Time",ylab=ley,cex.main=cex.main)
#------------------------------------------------------------------------------#
#----- Close plot. ------------------------------------------------------------#
if (outform[o] == "x11"){
locator(n=1)
dev.off()
}else{
dev.off()
}#end if
dummy = clean.tmp()
#------------------------------------------------------------------------------#
}#end for outform
#---------------------------------------------------------------------------------#
}#end if plotit
#------------------------------------------------------------------------------------#
}#end for ncompare
#---------------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------------#
# Plot the comparison between observations and model. #
#---------------------------------------------------------------------------------------#
cat(" + Comparisons of monthly means (model vs. observations)...","\n")
for (cc in 1:ncompmodel){
#----- Retrieve variable information from the list. ---------------------------------#
compnow = compmodel[[cc]]
vname = compnow$vnam
description = compnow$desc
unit = compnow$unit
plotsd = compnow$plotsd
lcolours = compnow$colour
errcolours = compnow$errcol
angle = compnow$angle
dens = compnow$dens
llwd = compnow$lwd
shwd = compnow$shwd
llwd = compnow$lwd
ltype = compnow$type
plog = compnow$plog
legpos = compnow$legpos
plotit = compnow$mmean
#----- Check whether there are observations for this particular site. ---------------#
if (iata == "mao" | iata == "bdf"){
obsnow = "obs.m34"
}else if(iata == "stm"){
obsnow = "obs.s67"
}else if(iata == "rao"){
obsnow = "obs.pdg"
}else if(iata == "jpr"){
obsnow = "obs.fns"
}else if(iata == "btr"){
obsnow = "obs.s77"
}else{
obsnow = paste("obs.",iata,sep="")
}#end if
plotit = plotit && obsnow %in% ls()
if (plotit){
#---------------------------------------------------------------------------------#
# Copy the observations to a scratch variable. #
#---------------------------------------------------------------------------------#
thisobs = get(obsnow)
mnvar = paste("mmean",vname,sep=".")
sdvar = paste("msdev",vname,sep=".")
obsmean = thisobs[[mnvar]]
obssdev = thisobs[[sdvar]]
#---------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------#
# Some observations do not have enough measurements to make a full year. If #
# this is the case, then we must split the observations into smaller intervals so #
# the polygon works. In case no observation is available, make the vectors NULL #
# so we will not plot observations at all. #
#---------------------------------------------------------------------------------#
if (all(is.na(obsmean+obssdev))){
obs.x = NULL
obs.ylow = NULL
obs.yhigh = NULL
}else{
#------ Find the periods with continous data. ---------------------------------#
ok = is.finite(obsmean+obssdev)
obs.x = montmont[ok]
obs.ylow = obsmean [ok] - obssdev[ok]
obs.yhigh = obsmean [ok] + obssdev[ok]
#------------------------------------------------------------------------------#
}#end if
#---------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------#
# Check whether the time series directory exists. If not, create it. #
#---------------------------------------------------------------------------------#
outdir = paste(outpref,"compmmean",sep="/")
if (! file.exists(outdir)) dir.create(outdir)
cat(" - ",description," comparison...","\n")
#---------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------#
# Define the number of layers. Some variables have no standard deviation in #
# the model, so Make them 0 if this is the case. #
#---------------------------------------------------------------------------------#
thismean = mmean[[vname]]
thissdev = msdev[[vname]]
if (length(msdev[[vname]]) == 0){
thissdev = 0. * thismean
}else{
thissdev = msdev[[vname]]
}#end if
mod.x = montmont
mod.ylow = thismean - thissdev
mod.yhigh = thismean + thissdev
#---------------------------------------------------------------------------------#
#----- Find the plot range. ------------------------------------------------------#
xlimit = range(montmont)
if (plotsd){
ylimit = c(mod.ylow,mod.yhigh,obs.ylow,obs.yhigh)
}else{
ylimit = c(thismean,obsmean)
}#end if
ylimit = pretty.xylim(u=ylimit,fracexp=0.0,is.log=FALSE)
#---------------------------------------------------------------------------------#
#------ Plot annotation. --------------------------------------------------------#
letitre = paste(description," - ",lieu,"\n","Monthly mean",sep="")
ley = desc.unit(desc=description,unit=unit)
#---------------------------------------------------------------------------------#
#----- Loop over formats. --------------------------------------------------------#
for (o in 1:nout){
fichier = paste(outdir,"/",vname,".",outform[o],sep="")
if(outform[o] == "x11"){
X11(width=size$width,height=size$height,pointsize=ptsz)
}else if(outform[o] == "png"){
png(filename=fichier,width=size$width*depth,height=size$height*depth
,pointsize=ptsz,res=depth)
}else if(outform[o] == "eps"){
postscript(file=fichier,width=size$width,height=size$height
,pointsize=ptsz,paper=size$paper)
}else if(outform[o] == "pdf"){
pdf(file=fichier,onefile=FALSE
,width=size$width,height=size$height,pointsize=ptsz,paper=size$paper)
}#end if
#----- Split window into two. -------------------------------------------------#
par(par.user)
layout(mat=rbind(2,1),heights=c(5,1))
#------------------------------------------------------------------------------#
#----- First plot: the legend. ------------------------------------------------#
par(mar=c(0.1,4.6,0.1,2.1))
plot.new()
plot.window(xlim=c(0,1),ylim=c(0,1))
if (plotsd){
legend(x="bottom",inset=0.0,legend=c("Model","Observation")
,fill=errcolours,angle=angle,density=dens,lwd=llwd,col=lcolours
,bg=background,title="Shaded areas = 1 SD",cex=cex.ptsz
,xpd=TRUE,pch=16)
}else{
legend(x="bottom",inset=0.0,legend=c("Model","Observation")
,col=lcolours,lwd=llwd,cex=cex.ptsz,xpd=TRUE,pch=16)
}#end if
#------------------------------------------------------------------------------#
#----- Second panel: the actual plot. -----------------------------------------#
par(mar=c(4.1,4.6,4.1,2.1))
plot.new()
plot.window(xlim=xlimit,ylim=ylimit,log=plog)
if (plotgrid){
abline(v=mplot$levels,h=axTicks(side=2),col=grid.colour,lty="solid")
}#end if
if (plotsd){
if (is.null(obs.x)){
mod.x.poly = c(mod.x,rev(mod.x))
mod.y.poly = c(mod.ylow,rev(mod.yhigh))
mod.keep = is.finite(mod.y.poly)
err.x = mod.x.poly[mod.keep]
err.y = mod.y.poly[mod.keep]
polygon(x=err.x,y=err.y,col=errcolours[1],angle=angle[1],density=dens[1]
,lty="solid",lwd=shwd[1])
}else{
mod.x.poly = c(mod.x,rev(mod.x))
mod.y.poly = c(mod.ylow,rev(mod.yhigh))
mod.keep = is.finite(mod.y.poly)
obs.x.poly = c(obs.x,rev(obs.x))
obs.y.poly = c(obs.ylow,rev(obs.yhigh))
obs.keep = is.finite(obs.y.poly)
err.x = c(mod.x.poly[mod.keep],NA,obs.x.poly[obs.keep])
err.y = c(mod.y.poly[mod.keep],NA,obs.y.poly[obs.keep])
polygon(x=err.x,y=err.y,col=errcolours,angle=angle,density=dens
,lty="solid",lwd=shwd)
}#end if
}#end if
points(x=montmont,y=thismean,col=lcolours[1],lwd=llwd[1],type=ltype
,pch=16,cex=1.0)
points(x=montmont,y=obsmean ,col=lcolours[2],lwd=llwd[2],type=ltype
,pch=16,cex=1.0)
box()
title(main=letitre,xlab="Time",ylab=ley,cex.main=cex.main)
axis(side=1,at=mplot$levels,labels=mplot$labels,padj=mplot$padj)
axis(side=2,las=1)
if (outform[o] == "x11"){
locator(n=1)
dev.off()
}else{
dev.off()
}#end if
dummy = clean.tmp()
} #end for outform
}#end if plotit
}#end for ncompare
#---------------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------------#
# Plot the comparison between observations and model. #
#---------------------------------------------------------------------------------------#
cat(" * Comparisons of mean diurnal cycle (model vs. observations)...","\n")
for (cc in 1:ncompmodel){
#----- Retrieve variable information from the list. ---------------------------------#
compnow = compmodel[[cc]]
vname = compnow$vnam
description = compnow$desc
unit = compnow$unit
plotsd = compnow$plotsd
lcolours = compnow$colour
errcolours = compnow$errcol
angle = compnow$angle
dens = compnow$dens
llwd = compnow$lwd
shwd = compnow$shwd
llwd = compnow$lwd
ltype = compnow$type
plog = compnow$plog
legpos = compnow$legpos
plotit = compnow$qmean
#----- Check whether there are observations for this particular site. ---------------#
if (iata == "mao" | iata == "bdf"){
obsnow = "obs.m34"
}else if(iata == "stm"){
obsnow = "obs.s67"
}else if(iata == "rao"){
obsnow = "obs.pdg"
}else if(iata == "jpr"){
obsnow = "obs.fns"
}else if(iata == "btr"){
obsnow = "obs.s77"
}else{
obsnow = paste("obs.",iata,sep="")
}#end if
plotit = plotit && obsnow %in% ls()
if (plotit){
#---------------------------------------------------------------------------------#
# Copy the observations to a scratch variable. #
#---------------------------------------------------------------------------------#
thisobs = get(obsnow)
mnvar = paste("qmean",vname,sep=".")
sdvar = paste("qsdev",vname,sep=".")
obsmean = thisobs[[mnvar]]
obssdev = thisobs[[sdvar]]
#----- Append 1st hour after the last. -------------------------------------------#
obsmean = cbind(obsmean,obsmean[,1])
obssdev = cbind(obssdev,obssdev[,1])
#---------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------#
# Some observations do not have enough measurements to make a full year. If #
# this is the case, then we must split the observations into smaller intervals so #
# the polygon works. In case no observation is available, make the vectors NULL #
# so we will not plot observations at all. #
#---------------------------------------------------------------------------------#
obs.ylow = obsmean - obssdev
obs.yhigh = obsmean + obssdev
#---------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------#
# Check whether the time series directory exists. If not, create it. #
#---------------------------------------------------------------------------------#
outdir = paste(outpref,"compdcyc",sep="/")
if (! file.exists(outdir)) dir.create(outdir)
outtheme = paste(outdir,vname,sep="/")
if (! file.exists(outtheme)) dir.create(outtheme)
cat(" + ",description," comparison...","\n")
#---------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------#
# Define the number of layers. Some variables have no standard deviation in #
# the model, so Make them 0 if this is the case. We also append the last hour #
# before the first one so 00 UTC appears in the left. #
#---------------------------------------------------------------------------------#
thismean = umean[[vname]]
thismean = cbind(thismean[,ndcycle],thismean)
if (length(usdev[[vname]]) == 0){
thissdev = 0. * thismean
}else{
thissdev = usdev[[vname]]
thissdev = cbind(thissdev[,ndcycle],thissdev)
}#end if
mod.ylow = thismean - thissdev
mod.yhigh = thismean + thissdev
#---------------------------------------------------------------------------------#
#----- Find the plot range. ------------------------------------------------------#
xlimit = range(thisday)
if (plotsd){
ylimit = c(mod.ylow,mod.yhigh,obs.ylow,obs.yhigh)
}else{
ylimit = c(thismean,obsmean)
}#end if
ylimit = pretty.xylim(u=ylimit,fracexp=0.0,is.log=FALSE)
#---------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------#
# Loop over all months. #
#---------------------------------------------------------------------------------#
for (pmon in 1:12){
cmon = substring(100+pmon,2,3)
namemon = mlist[pmon]
#------------------------------------------------------------------------------#
# Check if the directory exists. If not, create it. #
#------------------------------------------------------------------------------#
cat(" > ",description," time series - ",namemon,"...","\n")
#------ Plot annotation. ------------------------------------------------------#
letitre = paste(description," - ",lieu,"\n"
,"Mean diurnal cycle - ",namemon,sep="")
ley = desc.unit(desc=description,unit=unit)
#------------------------------------------------------------------------------#
#----- Loop over formats. -----------------------------------------------------#
for (o in 1:nout){
fichier = paste(outtheme,"/",vname,"-",cmon,".",outform[o]
,sep="")
if(outform[o] == "x11"){
X11(width=size$width,height=size$height,pointsize=ptsz)
}else if(outform[o] == "png"){
png(filename=fichier,width=size$width*depth,height=size$height*depth
,pointsize=ptsz,res=depth)
}else if(outform[o] == "eps"){
postscript(file=fichier,width=size$width,height=size$height
,pointsize=ptsz,paper=size$paper)
}else if(outform[o] == "pdf"){
pdf(file=fichier,onefile=FALSE
,width=size$width,height=size$height,pointsize=ptsz,paper=size$paper)
}#end if
#----- Split window into two. ----------------------------------------------#
par(par.user)
layout(mat=rbind(2,1),heights=c(5,1))
#---------------------------------------------------------------------------#
#----- First plot: the legend. ---------------------------------------------#
par(mar=c(0.1,4.6,0.1,2.1))
plot.new()
plot.window(xlim=c(0,1),ylim=c(0,1))
if (plotsd){
legend(x="bottom",inset=0.0,legend=c("Model","Observation")
,fill=errcolours,angle=angle,density=dens,lwd=llwd,col=lcolours
,bg=background,title="Shaded areas = 1 SD",cex=cex.ptsz,xpd=TRUE
,pch=16)
}else{
legend(x="bottom",inset=0.0,legend=c("Model","Observation")
,col=lcolours,lwd=llwd,cex=cex.ptsz,xpd=TRUE,pch=16)
}#end if
#---------------------------------------------------------------------------#
#----- Second panel: the actual plot. --------------------------------------#
par(mar=c(4.1,4.6,4.1,2.1))
plot.new()
plot.window(xlim=xlimit,ylim=ylimit,log=plog)
if (plotgrid){
abline(v=uplot$levels,h=axTicks(side=2),col=grid.colour,lty="solid")
}#end if
if (plotsd){
mod.x.now = thisday
mod.ylow.now = mod.ylow [pmon,]
mod.yhigh.now = mod.yhigh[pmon,]
#------ Find the periods with continous data. ---------------------------#
ok = is.finite(obs.ylow[pmon,]) & is.finite(obs.yhigh[pmon,])
if (any(ok)){
obs.x.now = thisday [ok]
obs.ylow.now = obs.ylow [pmon,ok]
obs.yhigh.now = obs.yhigh[pmon,ok]
}else{
obs.x.now = NULL
obs.ylow.now = NULL
obs.yhigh.now = NULL
}#end if
#------------------------------------------------------------------------#
if (is.null(obs.x.now)){
mod.x.poly = c(mod.x.now,rev(mod.x.now))
mod.y.poly = c(mod.ylow.now,rev(mod.yhigh.now))
mod.keep = is.finite(mod.y.poly)
err.x = mod.x.poly[mod.keep]
err.y = mod.y.poly[mod.keep]
polygon(x=err.x,y=err.y,col=errcolours[1],angle=angle[1]
,density=dens[1],lty="solid",lwd=shwd[1])
}else{
mod.x.poly = c(mod.x.now,rev(mod.x.now))
mod.y.poly = c(mod.ylow.now,rev(mod.yhigh.now))
mod.keep = is.finite(mod.y.poly)
obs.x.poly = c(obs.x.now,rev(obs.x.now))
obs.y.poly = c(obs.ylow.now,rev(obs.yhigh.now))
obs.keep = is.finite(obs.y.poly)
err.x = c(mod.x.poly[mod.keep],NA,obs.x.poly[obs.keep])
err.y = c(mod.y.poly[mod.keep],NA,obs.y.poly[obs.keep])
polygon(x=err.x,y=err.y,col=errcolours,angle=angle,density=dens
,lty="solid",lwd=shwd)
}#end if
}#end if
points(x=thisday,y=thismean[pmon,],col=lcolours[1]
,lwd=llwd[1],type=ltype,pch=16,cex=1.0)
points(x=thisday,y=obsmean[pmon,],col=lcolours[2]
,lwd=llwd[2],type=ltype,pch=16,cex=1.0)
box()
title(main=letitre,xlab="Time",ylab=ley,cex.main=cex.main)
axis(side=1,at=uplot$levels,labels=uplot$labels,padj=uplot$padj)
axis(side=2,las=1)
#---------------------------------------------------------------------------#
if (outform[o] == "x11"){
locator(n=1)
dev.off()
}else{
dev.off()
}#end if
dummy = clean.tmp()
#---------------------------------------------------------------------------#
} #end for outform
}#end for pmon
}#end if plotit
}#end for ncompare
#---------------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------------#
# Time series by LU. #
#---------------------------------------------------------------------------------------#
for (v in 1:ntslu){
thistslu = tslu[[v]]
vnam = thistslu$vnam
description = thistslu$desc
unit = thistslu$unit
plog = thistslu$plog
plotit = thistslu$plt
#----- Check whether the user wants to have this variable plotted. ------------------#
if (plotit && any(sellu)){
#---------------------------------------------------------------------------------#
# Check whether the time series directory exists. If not, create it. #
#---------------------------------------------------------------------------------#
outdir = paste(outpref,"tslu",sep="/")
if (! file.exists(outdir)) dir.create(outdir)
cat(" + ",description," time series for all LUs...","\n")
#----- Load variable -------------------------------------------------------------#
thisvar = lu[[vnam]]
if (plog){
#----- Eliminate non-positive values in case it is a log plot. ----------------#
thisvar[thisvar <= 0] = NA
}#end if
#---------------------------------------------------------------------------------#
#----- Loop over output formats. -------------------------------------------------#
for (o in 1:nout){
fichier = paste(outdir,"/",vnam,"-",suffix,".",outform[o],sep="")
if(outform[o] == "x11"){
X11(width=size$width,height=size$height,pointsize=ptsz)
}else if(outform[o] == "png"){
png(filename=fichier,width=size$width*depth,height=size$height*depth
,pointsize=ptsz,res=depth)
}else if(outform[o] == "eps"){
postscript(file=fichier,width=size$width,height=size$height
,pointsize=ptsz,paper=size$paper)
}else if(outform[o] == "pdf"){
pdf(file=fichier,onefile=FALSE
,width=size$width,height=size$height,pointsize=ptsz,paper=size$paper)
}#end if
#------------------------------------------------------------------------------#
# Find the limit, make some room for the legend, and in case the field is #
# a constant, nudge the limits so the plot command will not complain. #
#------------------------------------------------------------------------------#
xlimit = pretty.xylim(u=as.numeric(datum$tomonth),fracexp=0.0,is.log=FALSE)
ylimit = pretty.xylim(u=thisvar[,sellu] ,fracexp=0.0,is.log=plog )
if (plog){
xylog = "y"
ydrought = c( exp(ylimit[1] * sqrt(ylimit[1]/ylimit[2]))
, exp(ylimit[2] * sqrt(ylimit[2]/ylimit[1]))
)#end c
}else{
xylog = ""
ydrought = c(ylimit[1] - 0.5 * diff(ylimit), ylimit[2] + 0.5 * diff(ylimit))
}#end if
#------------------------------------------------------------------------------#
#----- Plot settings. ---------------------------------------------------------#
letitre = paste(description,lieu,sep=" - ")
ley = desc.unit(desc=description,unit=unit)
cols = lucols[sellu]
legs = lunames[sellu]
#------------------------------------------------------------------------------#
#------------------------------------------------------------------------------#
# Split the plot into two windows. #
#------------------------------------------------------------------------------#
par(par.user)
layout(mat=rbind(2,1),heights=c(5,1))
#------------------------------------------------------------------------------#
#------------------------------------------------------------------------------#
# First plot: legend. #
#------------------------------------------------------------------------------#
par(mar=c(0.1,4.6,0.1,2.1))
plot.new()
plot.window(xlim=c(0,1),ylim=c(0,1))
legend( x = "bottom"
, inset = 0.0
, legend = legs
, col = cols
, lwd = lwidth
, ncol = min(3,pretty.box(n.sellu)$ncol)
, title = expression(bold("Land use type"))
, xpd = TRUE
)#end legend
#------------------------------------------------------------------------------#
#------------------------------------------------------------------------------#
# Main plot. #
#------------------------------------------------------------------------------#
par(mar=c(4.1,4.6,4.1,2.1))
plot.new()
plot.window(xlim=xlimit,ylim=ylimit,log=xylog)
axis(side=1,at=whenplot8$levels,labels=whenplot8$labels,padj=whenplot8$padj)
axis(side=2,las=1)
box()
title(main=letitre,xlab="Year",ylab=ley,cex.main=0.7,log=xylog)
if (drought.mark){
for (n in 1:ndrought){
rect(xleft = drought[[n]][1],ybottom = ydrought[1]
,xright = drought[[n]][2],ytop = ydrought[2]
,col = grid.colour,border=NA)
}#end for
}#end if
#----- Plot grid. -------------------------------------------------------------#
if (plotgrid){
abline(v=whenplot8$levels,h=axTicks(side=2),col=grid.colour,lty="solid")
}#end if
#----- Plot lines. ------------------------------------------------------------#
for (n in 1:(nlu+1)){
if (sellu[n]){
lines(datum$tomonth,thisvar[,n],type="l",col=lucols[n],lwd=lwidth)
}#end if
}#end for
#------------------------------------------------------------------------------#
#----- Close the device. ------------------------------------------------------#
if (outform[o] == "x11"){
locator(n=1)
dev.off()
}else{
dev.off()
}#end if
dummy=clean.tmp()
#------------------------------------------------------------------------------#
}#end for outform
#---------------------------------------------------------------------------------#
}#end if (tseragbpft)
#------------------------------------------------------------------------------------#
}#end for tseries
#---------------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------------#
# Plot disturbance rate by disturbance transition. #
#---------------------------------------------------------------------------------------#
if (tserdist && any(seldist)){
cat(" + Disturbance rate time series for all disturbances...","\n")
for (o in 1:nout){
fichier = paste(outpref,"/disturb-",suffix,".",outform[o],sep="")
if (outform[o] == "x11"){
X11(width=size$width,height=size$height,pointsize=ptsz)
}else if(outform[o] == "png"){
png(filename=fichier,width=size$width*depth,height=size$height*depth
,pointsize=ptsz,res=depth)
}else if(outform[o] == "eps"){
postscript(file=fichier,width=size$width,height=size$height
,pointsize=ptsz,paper=size$paper)
}else if(outform[o] == "pdf"){
pdf(file=fichier,onefile=FALSE
,width=size$width,height=size$height,pointsize=ptsz,paper=size$paper)
}#end if
#---------------------------------------------------------------------------------#
# Find the limit, make some room for the legend, and in case the field is a #
# constant, nudge the limits so the plot command will not complain. #
#---------------------------------------------------------------------------------#
xlimit = pretty.xylim(u=as.numeric(datum$tomonth),fracexp=0.0,is.log=FALSE)
ylimit = NULL
mylucols = NULL
mylulegs = NULL
n = 0
for (jlu in 1:nlu){
for (ilu in 1:nlu){
n = n + 1
if (seldist[ilu,jlu]){
ylimit = c(ylimit,lu$dist[,ilu,jlu])
mylucols = c(mylucols,distcols [n])
mylulegs = c(mylulegs,distnames[n])
}#end if
}#end for
}#end for
ylimit = pretty.xylim(u=ylimit,fracexp=0.0,is.log=FALSE)
ydrought = c(ylimit[1] - 0.5 * diff(ylimit), ylimit[2] + 0.5 * diff(ylimit))
#---------------------------------------------------------------------------------#
#----- Plot settings. ------------------------------------------------------------#
letitre = paste("Disturbance rates",lieu,sep=" - ")
#---------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------#
# Split the plot into two windows. #
#---------------------------------------------------------------------------------#
par(par.user)
layout(mat=rbind(2,1),heights=c(5,1))
#---------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------#
# First plot: legend. #
#---------------------------------------------------------------------------------#
par(mar=c(0.1,4.6,0.1,2.1))
plot.new()
plot.window(xlim=c(0,1),ylim=c(0,1))
legend( x = "bottom"
, inset = 0.0
, bg = background
, legend = mylulegs
, col = mylucols
, lwd = lwidth
, ncol = min(3,pretty.box(n)$ncol)
, title = expression(bold("Transition"))
, xpd = TRUE
)#end legend
#---------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------#
# Main plot. #
#---------------------------------------------------------------------------------#
par(mar=c(4.1,4.6,4.1,2.1))
plot.new()
plot.window(xlim=xlimit,ylim=ylimit,log=xylog)
axis(side=1,at=whenplot8$levels,labels=whenplot8$labels,padj=whenplot8$padj)
axis(side=2,las=1)
box()
title( main = letitre
, xlab = "Year"
, ylab = desc.unit(desc="Disturbance rate",unit=untab$oneoyr)
, cex.main = 0.7
)#end title
if (drought.mark){
for (n in 1:ndrought){
rect(xleft = drought[[n]][1],ybottom = ydrought[1]
,xright = drought[[n]][2],ytop = ydrought[2]
,col = grid.colour,border=NA)
}#end for
}#end if
#----- Plot grid. ----------------------------------------------------------------#
if (plotgrid){
abline(v=whenplot8$levels,h=axTicks(side=2),col=grid.colour,lty="solid")
}#end if
#----- Plot lines. ---------------------------------------------------------------#
n = 0
for (jlu in sequence(nlu)){
for (ilu in sequence(nlu)){
n = n + 1
if (seldist[ilu,jlu]){
lines(datum$tomonth,lu$dist[,ilu,jlu],type="l"
,col=distcols[n],lwd=lwidth)
}#end if
}#end for
}#end for
#---------------------------------------------------------------------------------#
#----- Close the device. ---------------------------------------------------------#
if (outform[o] == "x11"){
locator(n=1)
dev.off()
}else{
dev.off()
}#end if
dummy=clean.tmp()
#---------------------------------------------------------------------------------#
}#end for outform
#------------------------------------------------------------------------------------#
}#end if
#---------------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------------#
# Plot the time series diagrams showing months and years. #
#---------------------------------------------------------------------------------------#
cat(" * Plot some time series with groups of variables...","\n")
for (hh in 1:ntheme){
#----- Retrieve variable information from the list. ---------------------------------#
themenow = theme[[hh]]
vnames = themenow$vnam
description = themenow$desc
lcolours = themenow$colour
llwd = themenow$lwd
if (emean.line){
ltype = "l"
}else{
ltype = themenow$type
}#end if
plog = themenow$plog
prefix = themenow$prefix
group = themenow$title
unit = themenow$unit
legpos = themenow$legpos
plotit = themenow$emean
ylimit.fix = themenow$emean.lim
if (plotit){
#---------------------------------------------------------------------------------#
# Check whether the time series directory exists. If not, create it. #
#---------------------------------------------------------------------------------#
outdir = paste(outpref,"theme_emean",sep="/")
if (! file.exists(outdir)) dir.create(outdir)
cat(" + ",group," time series for several variables...","\n")
#----- Define the number of layers. ----------------------------------------------#
nlayers = length(vnames)
#---------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------#
# Find the limit, make some room for the legend, and in case the field is a #
# constant, nudge the limits so the plot command will not complain. #
#---------------------------------------------------------------------------------#
xlimit = pretty.xylim(u=as.numeric(datum$tomonth),fracexp=0.0,is.log=FALSE)
if (any(! is.finite(ylimit.fix))){
ylimit = NULL
for (l in 1:nlayers) ylimit = c(ylimit,emean[[vnames[l]]])
ylimit = pretty.xylim(u=ylimit,fracexp=0.0,is.log=plog)
}else{
ylimit = ylimit.fix
}#end if
if (plog){
xylog = "y"
ydrought = c( exp(ylimit[1] * sqrt(ylimit[1]/ylimit[2]))
, exp(ylimit[2] * sqrt(ylimit[2]/ylimit[1]))
)#end c
}else{
xylog = ""
ydrought = c(ylimit[1] - 0.5 * diff(ylimit), ylimit[2] + 0.5 * diff(ylimit))
}#end if
#---------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------#
# Check if the directory exists. If not, create it. #
#---------------------------------------------------------------------------------#
#----- Loop over formats. --------------------------------------------------------#
for (o in 1:nout){
#------ Open file. ------------------------------------------------------------#
fichier = paste(outdir,"/",prefix,"-",suffix,".",outform[o],sep="")
if(outform[o] == "x11"){
X11(width=size$width,height=size$height,pointsize=ptsz)
}else if(outform[o] == "png"){
png(filename=fichier,width=size$width*depth,height=size$height*depth
,pointsize=ptsz,res=depth)
}else if(outform[o] == "eps"){
postscript(file=fichier,width=size$width,height=size$height
,pointsize=ptsz,paper=size$paper)
}else if(outform[o] == "pdf"){
pdf(file=fichier,onefile=FALSE
,width=size$width,height=size$height,pointsize=ptsz,paper=size$paper)
}#end if
#------------------------------------------------------------------------------#
#----- Plot settings. ---------------------------------------------------------#
letitre = paste(" Time series: ",group,"\n",lieu,sep="")
ley = desc.unit(desc=description,unit=unit)
#------------------------------------------------------------------------------#
#------------------------------------------------------------------------------#
# Split the plot into two windows. #
#------------------------------------------------------------------------------#
par(par.user)
layout(mat=rbind(2,1),heights=c(5,1))
#------------------------------------------------------------------------------#
#------------------------------------------------------------------------------#
# First plot: legend. #
#------------------------------------------------------------------------------#
par(mar=c(0.1,4.6,0.1,2.1))
plot.new()
plot.window(xlim=c(0,1),ylim=c(0,1))
legend( x = "bottom"
, inset = 0.0
, legend = description
, col = lcolours
, lwd = llwd
, ncol = min(3,pretty.box(nlayers)$ncol)
, xpd = TRUE
)#end legend
#------------------------------------------------------------------------------#
#------------------------------------------------------------------------------#
# Main plot. #
#------------------------------------------------------------------------------#
par(mar=c(4.1,4.6,4.1,2.1))
plot.new()
plot.window(xlim=xlimit,ylim=ylimit,log=xylog)
axis(side=1,at=whenplot8$levels,labels=whenplot8$labels,padj=whenplot8$padj)
axis(side=2,las=1)
box()
title(main=letitre,xlab="Year",ylab=ley,cex.main=0.7)
if (drought.mark){
for (n in 1:ndrought){
rect(xleft = drought[[n]][1],ybottom = ydrought[1]
,xright = drought[[n]][2],ytop = ydrought[2]
,col = grid.colour,border=NA)
}#end for
}#end if
#----- Plot grid. -------------------------------------------------------------#
if (plotgrid){
abline(v=whenplot8$levels,h=axTicks(side=2),col=grid.colour,lty="solid")
}#end if
#----- Plot lines. ------------------------------------------------------------#
for (l in 1:nlayers){
thisvar = emean[[vnames[l]]]
points(x=datum$tomonth,y=thisvar,col=lcolours[l],lwd=llwd[l],type=ltype
,pch=16,cex=0.8)
}#end for
#------------------------------------------------------------------------------#
#----- Close the device. ------------------------------------------------------#
if (outform[o] == "x11"){
locator(n=1)
dev.off()
}else{
dev.off()
}#end if
dummy=clean.tmp()
#------------------------------------------------------------------------------#
} #end for outform
#---------------------------------------------------------------------------------#
}#end if plotit
#------------------------------------------------------------------------------------#
}#end for ntser
#---------------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------------#
# Plot the time series diagrams showing months and years. #
#---------------------------------------------------------------------------------------#
cat(" * Plot some monthly means of groups of variables ...","\n")
for (hh in 1:ntheme){
#----- Retrieve variable information from the list. ---------------------------------#
themenow = theme[[hh]]
vnames = themenow$vnam
description = themenow$desc
lcolours = themenow$colour
llwd = themenow$lwd
ltype = themenow$type
plog = themenow$plog
prefix = themenow$prefix
group = themenow$title
unit = themenow$unit
legpos = themenow$legpos
plotit = themenow$mmean
ylimit.fix = themenow$mmean.lim
if (plotit){
#---------------------------------------------------------------------------------#
# Check whether the time series directory exists. If not, create it. #
#---------------------------------------------------------------------------------#
outdir = paste(outpref,"theme_mmean",sep="/")
if (! file.exists(outdir)) dir.create(outdir)
cat(" + ",group," time series for several variables...","\n")
#----- Define the number of layers. ----------------------------------------------#
nlayers = length(vnames)
#---------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------#
# Find the limit, make some room for the legend, and in case the field is a #
# constant, nudge the limits so the plot command will not complain. #
#---------------------------------------------------------------------------------#
xlimit = pretty.xylim(u=montmont,fracexp=0.0,is.log=plog)
if (any (! is.finite(ylimit.fix))){
ylimit = NULL
for (l in 1:nlayers) ylimit = c(ylimit,mmean[[vnames[l]]])
ylimit = pretty.xylim(u=ylimit,fracexp=0.0,is.log=plog)
}else{
ylimit = ylimit.fix
}#end if
if (plog){
xylog = "y"
ydrought = c( exp(ylimit[1] * sqrt(ylimit[1]/ylimit[2]))
, exp(ylimit[2] * sqrt(ylimit[2]/ylimit[1]))
)#end c
}else{
xylog = ""
ydrought = c(ylimit[1] - 0.5 * diff(ylimit), ylimit[2] + 0.5 * diff(ylimit))
}#end if
#---------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------#
# Check if the directory exists. If not, create it. #
#---------------------------------------------------------------------------------#
#----- Loop over formats. --------------------------------------------------------#
for (o in 1:nout){
#------ Open file. ------------------------------------------------------------#
fichier = paste(outdir,"/",prefix,"-",suffix,".",outform[o],sep="")
if(outform[o] == "x11"){
X11(width=size$width,height=size$height,pointsize=ptsz)
}else if(outform[o] == "png"){
png(filename=fichier,width=size$width*depth,height=size$height*depth
,pointsize=ptsz,res=depth)
}else if(outform[o] == "eps"){
postscript(file=fichier,width=size$width,height=size$height
,pointsize=ptsz,paper=size$paper)
}else if(outform[o] == "pdf"){
pdf(file=fichier,onefile=FALSE
,width=size$width,height=size$height,pointsize=ptsz,paper=size$paper)
}#end if
#------------------------------------------------------------------------------#
#----- Plot settings. ---------------------------------------------------------#
letitre = paste(" Time series: ",group,"\n",lieu,sep="")
ley = desc.unit(desc=description,unit=unit)
#------------------------------------------------------------------------------#
#------------------------------------------------------------------------------#
# Split the plot into two windows. #
#------------------------------------------------------------------------------#
par(par.user)
layout(mat=rbind(2,1),heights=c(5,1))
#------------------------------------------------------------------------------#
#------------------------------------------------------------------------------#
# First plot: legend. #
#------------------------------------------------------------------------------#
par(mar=c(0.1,4.6,0.1,2.1))
plot.new()
plot.window(xlim=c(0,1),ylim=c(0,1))
legend( x = "bottom"
, inset = 0.0
, legend = description
, col = lcolours
, lwd = llwd
, pch = 16
, ncol = min(3,pretty.box(nlayers)$ncol)
, cex = 0.9*cex.ptsz
, xpd = TRUE
)#end legend
#------------------------------------------------------------------------------#
#------------------------------------------------------------------------------#
# Main plot. #
#------------------------------------------------------------------------------#
par(mar=c(4.1,4.6,4.1,2.1))
plot.new()
plot.window(xlim=xlimit,ylim=ylimit,log=xylog)
axis(side=1,at=mplot$levels,labels=substring(mplot$labels,1,1),padj=mplot$padj)
axis(side=2,las=1)
box()
title(main=letitre,xlab="Year",ylab=ley,cex.main=0.7)
if (drought.mark){
for (n in 1:ndrought){
rect(xleft = drought[[n]][1],ybottom = ydrought[1]
,xright = drought[[n]][2],ytop = ydrought[2]
,col = grid.colour,border=NA)
}#end for
}#end if
#----- Plot grid. -------------------------------------------------------------#
if (plotgrid){
abline(v=mplot$levels,h=axTicks(side=2),col=grid.colour,lty="solid")
}#end if
#----- Plot lines. ------------------------------------------------------------#
for (l in 1:nlayers){
thisvar = mmean[[vnames[l]]]
points(x=montmont,y=thisvar,col=lcolours[l],lwd=llwd[l],type=ltype
,pch=16,cex=0.8)
}#end for
#------------------------------------------------------------------------------#
#----- Close the device. ------------------------------------------------------#
if (outform[o] == "x11"){
locator(n=1)
dev.off()
}else{
dev.off()
}#end if
dummy=clean.tmp()
#------------------------------------------------------------------------------#
} #end for outform
#---------------------------------------------------------------------------------#
}#end if plotit
#------------------------------------------------------------------------------------#
}#end for ntser
#---------------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------------#
# Plot the climatology of the mean diurnal cycle. #
#---------------------------------------------------------------------------------------#
cat(" * Plot the mean diel of groups of variables...","\n")
for (hh in 1:ntheme){
#----- Retrieve variable information from the list. ---------------------------------#
themenow = theme[[hh]]
vnames = themenow$vnam
description = themenow$desc
lcolours = themenow$colour
llwd = themenow$lwd
ltype = themenow$type
plog = themenow$plog
prefix = themenow$prefix
group = themenow$title
unit = themenow$unit
legpos = themenow$legpos
plotit = themenow$qmean
if (plog){
xylog = "y"
}else{
xylog = ""
}#end if
if (plotit){
#---------------------------------------------------------------------------------#
# Check whether the time series directory exists. If not, create it. #
#---------------------------------------------------------------------------------#
outdir = paste(outpref,"theme_qmean",sep="/")
if (! file.exists(outdir)) dir.create(outdir)
outtheme = paste(outdir,prefix,sep="/")
if (! file.exists(outtheme)) dir.create(outtheme)
cat(" + ",group," diurnal cycle for several variables...","\n")
#----- Define the number of layers. ----------------------------------------------#
nlayers = length(vnames)
xlimit = range(thisday)
ylimit = NULL
for (l in 1:nlayers) ylimit = c(ylimit,umean[[vnames[l]]])
ylimit = pretty.xylim(u=ylimit,fracexp=0.0,is.log=FALSE)
#---------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------#
# Loop over all months. #
#---------------------------------------------------------------------------------#
for (pmon in 1:12){
cmon = sprintf("%2.2i",pmon)
namemon = mlist[pmon]
#------------------------------------------------------------------------------#
# Check if the directory exists. If not, create it. #
#------------------------------------------------------------------------------#
#----- Loop over formats. -----------------------------------------------------#
for (o in 1:nout){
#------ Open file. ---------------------------------------------------------#
fichier = paste(outtheme,"/",prefix,"-",cmon,"-",suffix,".",outform[o]
,sep="")
if(outform[o] == "x11"){
X11(width=size$width,height=size$height,pointsize=ptsz)
}else if(outform[o] == "png"){
png(filename=fichier,width=size$width*depth,height=size$height*depth
,pointsize=ptsz,res=depth)
}else if(outform[o] == "eps"){
postscript(file=fichier,width=size$width,height=size$height
,pointsize=ptsz,paper=size$paper)
}else if(outform[o] == "pdf"){
pdf(file=fichier,onefile=FALSE
,width=size$width,height=size$height,pointsize=ptsz,paper=size$paper)
}#end if
#---------------------------------------------------------------------------#
#----- Plot settings. ------------------------------------------------------#
letitre = paste(group," - ",lieu,"\n"
,"Mean diurnal cycle - ",namemon,sep="")
ley = desc.unit(desc=description,unit=unit)
#---------------------------------------------------------------------------#
#---------------------------------------------------------------------------#
# Split the plot into two windows. #
#---------------------------------------------------------------------------#
par(par.user)
layout(mat=rbind(2,1),heights=c(5,1))
#------------------------------------------------------------------------------#
#---------------------------------------------------------------------------#
# First plot: legend. #
#---------------------------------------------------------------------------#
par(mar=c(0.1,4.6,0.1,2.1))
plot.new()
plot.window(xlim=c(0,1),ylim=c(0,1))
legend( x = "bottom"
, inset = 0.0
, legend = description
, col = lcolours
, lwd = llwd
, ncol = min(3,pretty.box(nlayers)$ncol)
, xpd = TRUE
)#end legend
#---------------------------------------------------------------------------#
#---------------------------------------------------------------------------#
# Main plot. #
#---------------------------------------------------------------------------#
par(mar=c(4.1,4.6,4.1,2.1))
plot.new()
plot.window(xlim=xlimit,ylim=ylimit,log=xylog)
axis(side=1,at=uplot$levels,labels=uplot$labels,padj=uplot$padj)
axis(side=2,las=1)
box()
title(main=letitre,xlab="Year",ylab=ley,cex.main=0.7)
if (drought.mark){
for (n in 1:ndrought){
rect(xleft = drought[[n]][1],ybottom = ydrought[1]
,xright = drought[[n]][2],ytop = ydrought[2]
,col = grid.colour,border=NA)
}#end for
}#end if
#----- Plot grid. ----------------------------------------------------------#
if (plotgrid){
abline(v=uplot$levels,h=axTicks(side=2),col=grid.colour,lty="solid")
}#end if
#----- Plot lines. ---------------------------------------------------------#
for (l in 1:nlayers){
thisvar = umean[[vnames[l]]]
thisvar = cbind(thisvar[,ndcycle],thisvar)
points(x=thisday,y=thisvar[pmon,],col=lcolours[l]
,lwd=llwd[l],type=ltype,pch=16)
}#end for
#---------------------------------------------------------------------------#
#----- Close the device. ---------------------------------------------------#
if (outform[o] == "x11"){
locator(n=1)
dev.off()
}else{
dev.off()
}#end if
dummy=clean.tmp()
#---------------------------------------------------------------------------#
} #end for outform
#------------------------------------------------------------------------------#
}#end for pmon
#---------------------------------------------------------------------------------#
}#end if plotit
#------------------------------------------------------------------------------------#
}#end for ntser
#---------------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------------#
# Plot the climatology of the soil properties. #
#---------------------------------------------------------------------------------------#
for (v in 1:nsoilplot){
#----- Retrieve variable information from the list. ---------------------------------#
thissoil = soilplot[[v]]
vnam = thissoil$vnam
description = thissoil$desc
unit = thissoil$unit
vcscheme = thissoil$csch
pnlog = thissoil$pnlog
plotit = thissoil$mmean
if (plotit){
#---------------------------------------------------------------------------------#
# Check if the directory exists. If not, create it. #
#---------------------------------------------------------------------------------#
outdir = paste(outpref,"soil_mmean",sep="/")
if (! file.exists(outdir)) dir.create(outdir)
cat(" + Climatology profile of ",description,"...","\n")
#----- Find the number of rows and columns, and the axes. ------------------------#
monaxis = sort(unique(datum$month))
soilaxis = slz
nmon = length(monaxis)
nsoil = nzg
#----- Save the meaningful months and years. -------------------------------------#
monat = 1:12
monlab = substring(month.abb,1,1)
#----- Convert the vector data into an array. ------------------------------------#
vararr = mmean[[vnam]]
#----- Copy Decembers ans Januaries to make the edges buffered. ------------------#
january = vararr[1,]
january = c(january,january[nzg],january[nzg])
december = vararr[12,]
december = c(december[1],december[1],december)
#----- Bind first and last year to the array, to make the edges buffered. ---------#
varbuff = cbind(vararr[,1],vararr,vararr[,nzg])
varbuff = rbind(december,varbuff,january)
#----------------------------------------------------------------------------------#
# Expand the month and year axes. Make the -------------------------------------------#
monaxis = c(min(monaxis)-1,monaxis,max(monaxis)+1)
soilaxis = -log(-1.0 * c( slz[1]*(slz[1]/slz[2])
, soilaxis
, slz[nzg]*(slz[nzg]/slz[nzg-1]) ))
if (pnlog){
vrange = range(varbuff,na.rm=TRUE)
vlevels = pretty.log(x=vrange,n=ncolsfc,forcelog=TRUE)
vnlev = length(vlevels)
}else{
vrange = range(varbuff,na.rm=TRUE)
vlevels = pretty(x=vrange,n=ncolsfc)
vnlev = length(vlevels)
}#end if
#----- Loop over formats. --------------------------------------------------------#
for (o in 1:nout){
fichier = paste(outdir,"/",vnam,"-",suffix,".",outform[o],sep="")
if(outform[o] == "x11"){
X11(width=size$width,height=size$height,pointsize=ptsz)
}else if(outform[o] == "png"){
png(filename=fichier,width=size$width*depth,height=size$height*depth
,pointsize=ptsz,res=depth)
}else if(outform[o] == "eps"){
postscript(file=fichier,width=size$width,height=size$height
,pointsize=ptsz,paper=size$paper)
}else if(outform[o] == "pdf"){
pdf(file=fichier,onefile=FALSE
,width=size$width,height=size$height,pointsize=ptsz,paper=size$paper)
}#end if
letitre = paste(description," - ",lieu,sep="")
ley = desc.unit(desc="Soil depth",unit=untab$m)
lacle = desc.unit(desc=NULL,unit=unit)
par(par.user)
sombreado(x=monaxis,y=soilaxis,z=varbuff,levels=vlevels,nlevels=vnlev
,colour.palette=get(vcscheme)
,plot.title=title(main=letitre,xlab="Month",ylab=ley,cex.main=0.7)
,key.title=title(main=lacle,cex.main=0.8)
,key.log=pnlog
,useRaster=TRUE
,plot.axes={axis(side=1,at=monat,labels=monlab)
axis(side=2,at=zat,labels=znice)
if (fcgrid){
abline(h=zat,v=monat,col=grid.colour,lty="dotted")
}#end if fcgrid
}#end plot.axes
)
if (outform[o] == "x11"){
locator(n=1)
dev.off()
}else{
dev.off()
}#end if
dummy = clean.tmp()
} #end for outform
}#end if plotit
}#end for (v in 1:nsoilplot)
#---------------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------------#
# Plot the climatology of the soil properties. #
#---------------------------------------------------------------------------------------#
for (sts in 1:nsoilplot){
#----- Retrieve variable information from the list. ---------------------------------#
thissoil = soilplot[[sts]]
vnam = thissoil$vnam
description = thissoil$desc
unit = thissoil$unit
vcscheme = thissoil$csch
pnlog = thissoil$pnlog
plotit = thissoil$emean
if (plotit){
#---------------------------------------------------------------------------------#
# Check if the directory exists. If not, create it. #
#---------------------------------------------------------------------------------#
outdir = paste(outpref,"soil_emean",sep="/")
if (! file.exists(outdir)) dir.create(outdir)
cat(" + Time series profile of ",description,"...","\n")
#----- Find the number of rows and columns, and the axes. ------------------------#
timeaxis = datum$tomonth
soilaxis = slz
nmon = length(timeaxis)
nsoil = nzg
#----- Convert the vector data into an array. ------------------------------------#
vararr = emean[[vnam]]
#----- Copy Decembers ans Januaries to make the edges buffered. ------------------#
first = vararr[1,]
first = c(first,first[nzg],first[nzg])
last = vararr[ntimes,]
last = c(last[1],last[1],last)
#----- Bind first and last year to the array, to make the edges buffered. --------#
varbuff = cbind(vararr[,1],vararr,vararr[,nzg])
varbuff = rbind(first,varbuff,last)
#---------------------------------------------------------------------------------#
# Expand the month and year axes. Make the first and last time equal time #
# steps. #
#---------------------------------------------------------------------------------#
dwhen = as.numeric(datum$tomonth[2]-datum$tomonth[1])
whenaxis = c(chron(as.numeric(datum$tomonth[1]-dwhen))
,timeaxis
,chron(as.numeric(datum$tomonth[ntimes]+dwhen)))
soilaxis = -log(-1.0 * c( slz[1]*(slz[1]/slz[2])
, soilaxis
, slz[nzg]*(slz[nzg]/slz[nzg-1]) ))
if (pnlog){
vrange = range(varbuff,na.rm=TRUE)
vlevels = pretty.log(x=vrange,n=ncolsfc,forcelog=TRUE)
vnlev = length(vlevels)
}else{
vrange = range(varbuff,na.rm=TRUE)
vlevels = pretty(x=vrange,n=ncolsfc)
vnlev = length(vlevels)
}#end if
#----- Loop over formats. --------------------------------------------------------#
for (o in 1:nout){
fichier = paste(outdir,"/",vnam,"-",suffix,".",outform[o],sep="")
if(outform[o] == "x11"){
X11(width=size$width,height=size$height,pointsize=ptsz)
}else if(outform[o] == "png"){
png(filename=fichier,width=size$width*depth,height=size$height*depth
,pointsize=ptsz,res=depth)
}else if(outform[o] == "eps"){
postscript(file=fichier,width=size$width,height=size$height
,pointsize=ptsz,paper=size$paper)
}else if(outform[o] == "pdf"){
pdf(file=fichier,onefile=FALSE
,width=size$width,height=size$height,pointsize=ptsz,paper=size$paper)
}#end if
letitre = paste(description," - ",lieu,sep="")
ley = desc.unit(desc="Soil depth",unit=untab$m)
lacle = desc.unit(desc=NULL,unit=unit)
par(par.user)
sombreado(x=whenaxis,y=soilaxis,z=varbuff,levels=vlevels,nlevels=vnlev
,colour.palette=get(vcscheme)
,plot.title=title(main=letitre,xlab="Month",ylab=ley,cex.main=0.7)
,key.title=title(main=lacle,cex.main=0.8)
,key.log=pnlog
,useRaster=TRUE
,plot.axes={axis(side=1,at=whenplot6$levels
,labels=whenplot6$labels,padj=whenplot6$padj)
axis(side=2,at=zat,labels=znice)
if (fcgrid){
abline(h=zat,v=whenplot6$levels,col=grid.colour
,lty="dotted")
}#end if fcgrid
}#end plot.axes
)#end sombreado
if (outform[o] == "x11"){
locator(n=1)
dev.off()
}else{
dev.off()
}#end if
dummy = clean.tmp()
} #end for outform
}#end if plotit
}#end for nhov
#---------------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------------#
# Plot a filled contour plot showing months and years. #
#---------------------------------------------------------------------------------------#
for (v in 1:nsqueeze){
#----- Retrieve variable information from the list. ---------------------------------#
thisfillc = squeeze[[v]]
vnam = thisfillc$vnam
description = thisfillc$desc
unit = thisfillc$unit
vcscheme = thisfillc$col.scheme
plotit = thisfillc$fco.mmean
#------------------------------------------------------------------------------------#
# Find the first and the last full years. These will be the actual first and #
# last year only if the years are complete, otherwise the first and the last year #
# will be taken out. #
#------------------------------------------------------------------------------------#
if (monthbeg == 1){
yearaa = yeara
}else{
yearaa = yeara + 1
}# end if
if (meszz == 12){
yearzz = yearz
}else{
yearzz = yearz - 1
}#end if
sel = datum$year >= yearaa & datum$year <= yearzz
twoyears = sum(sel) >= 24
if (plotit && twoyears){
#---------------------------------------------------------------------------------#
# Check if the directory exists. If not, create it. #
#---------------------------------------------------------------------------------#
outdir = paste(outpref,"fillc_mmean",sep="/")
if (! file.exists(outdir)) dir.create(outdir)
cat(" + ",description," time series in filled contour...","\n")
#----- Load this variable into "thisvar". ----------------------------------------#
thisvar = emean[[vnam]]
#----- Find the number of rows and columns, and the axes. ------------------------#
monaxis = sort(unique(datum$month[sel]))
yraxis = sort(unique(datum$year[sel]))
nmon = length(monaxis)
nyear = length(yraxis)
#----- Save the meaningful months and years. -------------------------------------#
monat = 1:12
monlab = c("J","F","M","A","M","J","J","A","S","O","N","D")
yrat = pretty(yraxis)
#----- Convert the vector data into an array. ------------------------------------#
vararr = array(thisvar[sel],c(nmon,nyear))
#----- Copy Decembers ans Januaries to make the edges buffered. ------------------#
january = vararr[1,]
january = c(january,january[nyear],january[nyear])
december = vararr[12,]
december = c(december[1],december[1],december)
#----- Bind first and last year to the array, to make the edges buffered. --------#
varbuff = cbind(vararr[,1],vararr,vararr[,nyear])
varbuff = rbind(december,varbuff,january)
#----- Expand the month and year axes. -------------------------------------------#
monaxis = c(min(monaxis)-1,monaxis,max(monaxis)+1)
yraxis = c(min(yraxis)-1,yraxis,max(yraxis)+1)
vrange = range(varbuff,na.rm=TRUE)
vlevels = pretty(x=vrange,n=ncolsfc)
vnlev = length(vlevels)
#----- Loop over formats. --------------------------------------------------------#
for (o in 1:nout){
fichier = paste(outdir,"/",vnam,"-",suffix,".",outform[o],sep="")
if(outform[o] == "x11"){
X11(width=size$width,height=size$height,pointsize=ptsz)
}else if(outform[o] == "png"){
png(filename=fichier,width=size$width*depth,height=size$height*depth
,pointsize=ptsz,res=depth)
}else if(outform[o] == "eps"){
postscript(file=fichier,width=size$width,height=size$height
,pointsize=ptsz,paper=size$paper)
}else if(outform[o] == "pdf"){
pdf(file=fichier,onefile=FALSE
,width=size$width,height=size$height,pointsize=ptsz,paper=size$paper)
}#end if
letitre = paste(description," - ",lieu,sep="")
lacle = desc.unit(desc=NULL,unit=unit)
par(par.user)
sombreado(x=monaxis,y=yraxis,z=varbuff,levels=vlevels,nlevels=vnlev
,colour.palette=get(vcscheme)
,plot.title=title(main=letitre,xlab="Month",ylab="Year",cex.main=0.7)
,key.title=title(main=lacle,cex.main=0.8)
,useRaster=TRUE
,plot.axes={axis(side=1,at=monat,labels=monlab)
axis(side=2,at=yrat)
if (fcgrid){
for (yl in yrat){
abline(h=yl,col=grid.colour,lty="dotted")
} #end for yl
for (ml in monat){
abline(v=ml,col=grid.colour,lty="dotted")
} #end for ml
}#end if fcgrid
}#end plot.axes
)
if (outform[o] == "x11"){
locator(n=1)
dev.off()
}else{
dev.off()
}#end if
dummy = clean.tmp()
} #end for outform
}#end if plotit
}#end for nhov
#---------------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------------#
# Plot the filled contour diagrams showing time of day and time. #
#---------------------------------------------------------------------------------------#
for (v in 1:nsqueeze){
#----- Retrieve variable information from the list. ---------------------------------#
thisfillc = squeeze[[v]]
vnam = thisfillc$vnam
description = thisfillc$desc
unit = thisfillc$unit
vcscheme = thisfillc$col.scheme
plotit = thisfillc$fco.qmean
if (plotit){
#---------------------------------------------------------------------------------#
# Check if the directory exists. If not, create it. #
#---------------------------------------------------------------------------------#
outdir = paste(outpref,"fillc_qmean",sep="/")
if (! file.exists(outdir)) dir.create(outdir)
cat(" + ",description," time series of diurnal cycle...","\n")
#----- Load this variable into "thisvar". ----------------------------------------#
vararr = qmean[[vnam]]
#----- Copy Decembers ans Januaries to make the edges buffered. ------------------#
firsthr = vararr[,1]
firsthr = c(firsthr,firsthr[ntimes],firsthr[ntimes])
lasthr = vararr[,ndcycle]
lasthr = c(lasthr[1],lasthr[1],lasthr)
#----- Bind first and last year to the array, to make the edges buffered. --------#
varbuff = rbind(vararr[1,],vararr,vararr[ntimes,])
varbuff = cbind(lasthr,varbuff,firsthr)
#----- Expand the month and year axes. -------------------------------------------#
hraxis = seq(from=0,to=ndcycle+1,by=1) * 24 / ndcycle
dwhen = datum$tomonth[2]-datum$tomonth[1]
whenaxis = c(datum$tomonth[1]-dwhen,datum$tomonth,datum$tomonth[ntimes]+dwhen)
huplot = pretty.time(whenaxis,n=8)
vrange = range(varbuff,na.rm=TRUE)
vlevels = pretty(x=vrange,n=ncolsfc)
vnlev = length(vlevels)
#----- Loop over formats. --------------------------------------------------------#
for (o in 1:nout){
fichier = paste(outdir,"/",vnam,"-",suffix,".",outform[o],sep="")
if(outform[o] == "x11"){
X11(width=size$width,height=size$height,pointsize=ptsz)
}else if(outform[o] == "png"){
png(filename=fichier,width=size$width*depth,height=size$height*depth
,pointsize=ptsz,res=depth)
}else if(outform[o] == "eps"){
postscript(file=fichier,width=size$width,height=size$height
,pointsize=ptsz,paper=size$paper)
}else if(outform[o] == "pdf"){
pdf(file=fichier,onefile=FALSE
,width=size$width,height=size$height,pointsize=ptsz,paper=size$paper)
}#end if
letitre = paste("Mean diurnal cycle \n ",description," - ",lieu,sep="")
ley = desc.unit(desc="Time of day",unit=untab$gmt)
lacle = desc.unit(desc=NULL ,unit=unit)
par(par.user)
sombreado(x=whenaxis,y=hraxis,z=varbuff,levels=vlevels,nlevels=vnlev
,colour.palette=get(vcscheme)
,plot.title=title(main=letitre,ylab=ley,xlab="Time",cex.main=0.7)
,key.title=title(main=lacle,cex.main=0.8)
,useRaster=TRUE
,plot.axes={axis(side=1,at=huplot$level,labels=huplot$labels)
axis(side=2,at=uplot$levels,labels=uplot$labels)
if (fcgrid){
abline(v=huplot$levels,h=uplot$levels
,col=grid.colour,lty="dotted")
}#end if fcgrid
}#end plot.axes
)
if (outform[o] == "x11"){
locator(n=1)
dev.off()
}else{
dev.off()
}#end if
dummy = clean.tmp()
} #end for outform
}#end if plotit
}#end for nhov
#---------------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------------#
# Plot the monthly boxplots. #
#---------------------------------------------------------------------------------------#
for (v in 1:nsqueeze){
#----- Retrieve variable information from the list. ---------------------------------#
thisbplot = squeeze[[v]]
vnam = thisbplot$vnam
description = thisbplot$desc
unit = thisbplot$unit
plotit = thisbplot$box.plot
if (plotit){
#---------------------------------------------------------------------------------#
# Check if the directory exists. If not, create it. #
#---------------------------------------------------------------------------------#
outdir = paste(outpref,"boxplot",sep="/")
if (! file.exists(outdir)) dir.create(outdir)
cat(" + ",description," box plot...","\n")
#----- Load this variable into "thisvar". ----------------------------------------#
thisvar = emean[[vnam]]
for (o in 1:nout){
fichier = paste(outdir,"/",vnam,"-",suffix,".",outform[o],sep="")
if (outform[o] == "x11"){
X11(width=size$width,height=size$height,pointsize=ptsz)
}else if(outform[o] == "png"){
png(filename=fichier,width=size$width*depth,height=size$height*depth
,pointsize=ptsz,res=depth)
}else if(outform[o] == "eps"){
postscript(file=fichier,width=size$width,height=size$height
,pointsize=ptsz,paper=size$paper)
}else if(outform[o] == "pdf"){
pdf(file=fichier,onefile=FALSE
,width=size$width,height=size$height,pointsize=ptsz,paper=size$paper)
}#end if
ylimit = pretty.xylim(u=thisvar,fracexp=0.0,is.log=FALSE)
letitre = paste(description,lieu,sep=" - ")
ley = desc.unit(desc=description,unit=unit)
par(par.user)
plot(mmonth,thisvar,main=letitre,ylim=ylimit,cex.main=0.7
,xlab="Time",ylab=ley,las=1)
if (outform[o] == "x11"){
locator(n=1)
dev.off()
}else{
dev.off()
}#end if
dummy = clean.tmp()
} #end for outform
}#end if
}#end for nbox
#---------------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------------#
# Plot the PDF of patch-level properties as a function of time. #
#---------------------------------------------------------------------------------------#
cat (" * Time series of PDF of properties by patch...","\n")
for (v in 1:nplotpatch){
#----- Retrieve variable information from the list. ---------------------------------#
thispatch = plotpatch[[v]]
vnam = thispatch$vnam
description = thispatch$desc
unit = thispatch$unit
vcscheme = thispatch$col.scheme
plog = thispatch$plog
plotit = thispatch$emean
this = patchpdf[[vnam]]$edensity
plotit = ( plotit && any(is.finite(this$x),na.rm=TRUE)
&& any(is.finite(this$y),na.rm=TRUE)
&& any(is.finite(this$z),na.rm=TRUE) )
#------------------------------------------------------------------------------------#
# Find levels, and expand PDF scale in case it is a constant. #
#------------------------------------------------------------------------------------#
if (plog){
vlevs = sort(unique(pretty.log(this$z,n=ncolsfc,forcelog=TRUE)))
}else{
vlevs = sort(unique(pretty(this$z,n=ncolsfc)))
}#end if
if (length(vlevs) == 1) vlevs = pretty.xylim(u=vlevs,fracexp=0.0,is.log=plog)
#------------------------------------------------------------------------------------#
if (plotit){
#---------------------------------------------------------------------------------#
# Check if the directory exists. If not, create it. #
#---------------------------------------------------------------------------------#
outdir = paste(outpref,"patch_emean",sep="/")
if (! file.exists(outdir)) dir.create(outdir)
cat(" + PDF plot of ",description,"...","\n")
#----- Loop over formats. --------------------------------------------------------#
for (o in 1:nout){
fichier = paste(outdir,"/",vnam,"-",suffix,".",outform[o],sep="")
if (outform[o] == "x11"){
X11(width=size$width,height=size$height,pointsize=ptsz)
}else if(outform[o] == "png"){
png(filename=fichier,width=size$width*depth,height=size$height*depth
,pointsize=ptsz,res=depth)
}else if(outform[o] == "eps"){
postscript(file=fichier,width=size$width,height=size$height
,pointsize=ptsz,paper=size$paper)
}else if(outform[o] == "pdf"){
pdf(file=fichier,onefile=FALSE
,width=size$width,height=size$height,pointsize=ptsz,paper=size$paper)
}#end if
letitre = paste("Density function of ",description," \ ",lieu,sep="")
lex = "Time"
ley = desc.unit(desc=description,unit=unit)
#------------------------------------------------------------------------------#
# Plot the PDF distribution. #
#------------------------------------------------------------------------------#
par(par.user)
sombreado( x = this$x
, y = this$y
, z = this$z
, levels = vlevs
, colour.palette = get(vcscheme)
, plot.title = title(main=letitre,xlab=lex,ylab=ley,cex.main=0.7)
, key.title = title(main="Density",cex.main=0.8)
, key.log = plog
, useRaster = TRUE
, plot.axes = { axis( side = 1
, at = whenplot8$levels
, labels = whenplot8$labels
, padj = whenplot8$padj
)#end axis
axis( side = 2
, at = pretty(this$y)
, labels = NULL
)#end axis
if (fcgrid){
abline( v = whenplot8$levels
, h = pretty(this$y)
, col = grid.colour
, lty = "dotted"
)#end abline
}#end if fcgrid
}#end plot.axes
)#end sombreado
#------------------------------------------------------------------------------#
#------------------------------------------------------------------------------#
# Close the device. #
#------------------------------------------------------------------------------#
if (outform[o] == "x11"){
locator(n=1)
dev.off()
}else{
dev.off()
}#end if
#------------------------------------------------------------------------------#
dummy = clean.tmp()
#------------------------------------------------------------------------------#
} #end for outform
#---------------------------------------------------------------------------------#
}#end if plotit
#------------------------------------------------------------------------------------#
}#end for (v in 1:npatchplot)
#---------------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------------#
# Plot the PDF of patch-level properties as a function of time. #
#---------------------------------------------------------------------------------------#
cat (" * Monthly PDF of properties by patch...","\n")
for (v in 1:nplotpatch){
#----- Retrieve variable information from the list. ---------------------------------#
thispatch = plotpatch[[v]]
vnam = thispatch$vnam
description = thispatch$desc
unit = thispatch$unit
vcscheme = thispatch$col.scheme
plog = thispatch$plog
plotit = thispatch$mmean
this = patchpdf[[vnam]]$mdensity
plotit = ( plotit && any(is.finite(this$x),na.rm=TRUE)
&& any(is.finite(this$y),na.rm=TRUE)
&& any(is.finite(this$z),na.rm=TRUE) )
if (plotit){
#---------------------------------------------------------------------------------#
# Check if the directory exists. If not, create it. #
#---------------------------------------------------------------------------------#
outdir = paste(outpref,"patch_mmean",sep="/")
if (! file.exists(outdir)) dir.create(outdir)
cat(" + PDF plot of ",description,"...","\n")
#----- Find the month tick marks. ------------------------------------------------#
monat = 1:12
monlab = c("J","F","M","A","M","J","J","A","S","O","N","D")
#---------------------------------------------------------------------------------#
#----- Loop over formats. --------------------------------------------------------#
for (o in 1:nout){
fichier = paste(outdir,"/",vnam,"-",suffix,".",outform[o],sep="")
if(outform[o] == "x11"){
X11(width=size$width,height=size$height,pointsize=ptsz)
}else if(outform[o] == "png"){
png(filename=fichier,width=size$width*depth,height=size$height*depth
,pointsize=ptsz,res=depth)
}else if(outform[o] == "eps"){
postscript(file=fichier,width=size$width,height=size$height
,pointsize=ptsz,paper=size$paper)
}else if(outform[o] == "pdf"){
pdf(file=fichier,onefile=FALSE
,width=size$width,height=size$height,pointsize=ptsz,paper=size$paper)
}#end if
letitre = paste("Density function of ",description," \ ",lieu,sep="")
lex = "Months"
ley = desc.unit(desc=description,unit=unit)
#------------------------------------------------------------------------------#
# Plot the PDF distribution. #
#------------------------------------------------------------------------------#
par(par.user)
sombreado( x = this$x
, y = this$y
, z = this$z
, nlevels = ncolsfc
, colour.palette = get(vcscheme)
, plot.title = title(main=letitre,xlab=lex,ylab=ley,cex.main=0.7)
, key.title = title(main="Density",cex.main=0.8)
, key.log = plog
, useRaster = TRUE
, plot.axes = { axis( side = 1
, at = monat
, labels = monlab
)#end axis
axis( side = 2
, at = pretty(this$y)
, labels = NULL
)#end axis
if (fcgrid){
abline( v = monat
, h = pretty(this$y)
, col = grid.colour
, lty = "dotted"
)#end abline
}#end if fcgrid
}#end plot.axes
)#end sombreado
#------------------------------------------------------------------------------#
#------------------------------------------------------------------------------#
# Close the device. #
#------------------------------------------------------------------------------#
if (outform[o] == "x11"){
locator(n=1)
dev.off()
}else{
dev.off()
}#end if
#------------------------------------------------------------------------------#
dummy = clean.tmp()
#------------------------------------------------------------------------------#
} #end for outform
#---------------------------------------------------------------------------------#
}#end if plotit
#------------------------------------------------------------------------------------#
}#end for (v in 1:npatchplot)
#---------------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------------#
# Bar plot by DBH class. #
#---------------------------------------------------------------------------------------#
cat(" + Bar plot by DBH classes...","\n")
monbplot = which(nummonths(datum$tomonth) %in% sasmonth)
nmonbplot = length(monbplot)
pftuse = which(apply(X=szpft$nplant,MARGIN=3,FUN=sum,na.rm=TRUE) > 0)
pftuse = pftuse[pftuse != (npft+1)]
npftuse = length(pftuse)
pftname.use = pft$name [pftuse]
pftcol.use = pft$colour[pftuse]
for (v in 1:ntspftdbh){
#----- Load settings for this variable.----------------------------------------------#
thisbar = tspftdbh[[v]]
vnam = thisbar$vnam
description = thisbar$desc
unit = thisbar$unit
stacked = thisbar$stack
plotit = thisbar$bar.plot
plog = thisbar$plog
if (plog){
xylog = "y"
stacked = FALSE
}else{
xylog = ""
}#end if
#------------------------------------------------------------------------------------#
#------------------------------------------------------------------------------------#
# Check whether to plot this
#------------------------------------------------------------------------------------#
if (plotit){
cat(" - ",description,"...","\n")
#---------------------------------------------------------------------------------#
# Retrieve the variable, and keep only the part that is usable. #
#---------------------------------------------------------------------------------#
thisvnam = szpft[[vnam]][monbplot,,]
thisvnam = thisvnam [,,pftuse]
thisvnam = thisvnam [,-(ndbh+1),]
thisvnam[is.na(thisvnam)] = 0.
thiswhen = datum$tomonth[monbplot]
#---------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------#
# Find the limits for the plots. We use the same axis so it is easier to #
# compare different times. #
#---------------------------------------------------------------------------------#
if (stacked){
ylimit = c(0,max(apply(X=thisvnam,MARGIN=c(1,2),FUN=sum,na.rm=TRUE)))
}else{
ylimit = range(x=thisvnam,na.rm=TRUE)
}#end if
ylimit = pretty.xylim(u=ylimit,fracexp=0.0,is.log=plog)
#---------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------#
# Check if the directory exists. If not, create it. #
#---------------------------------------------------------------------------------#
barplotdir = paste(outpref,"barplot_dbh",sep="/")
if (! file.exists(barplotdir)) dir.create(barplotdir)
outdir = paste(barplotdir,vnam,sep="/")
if (! file.exists(outdir)) dir.create(outdir)
#---------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------#
# Loop over all possible months. #
#---------------------------------------------------------------------------------#
for (m in 1:nmonbplot){
#----- Find which year we are plotting. ---------------------------------------#
cmonth = sprintf("%2.2i",(nummonths(thiswhen[m])))
cyear = sprintf("%4.4i",(numyears(thiswhen[m])))
mm = as.numeric(cmonth)
yy = as.numeric(cyear)
whentitle = paste(mon2mmm(mm,cap1=TRUE),cyear,sep="-")
#------------------------------------------------------------------------------#
#----- Loop over output formats. ----------------------------------------------#
for (o in 1:nout){
#------ Open the plot. -----------------------------------------------------#
fichier = paste(outdir,"/",vnam,"-",cyear,"-",cmonth,"-",suffix
,".",outform[o],sep="")
if (outform[o] == "x11"){
X11(width=size$width,height=size$height,pointsize=ptsz)
}else if(outform[o] == "png"){
png(filename=fichier,width=size$width*depth,height=size$height*depth
,pointsize=ptsz,res=depth)
}else if(outform[o] == "eps"){
postscript(file=fichier,width=size$width,height=size$height
,pointsize=ptsz,paper=size$paper)
}else if(outform[o] == "pdf"){
pdf(file=fichier,onefile=FALSE
,width=size$width,height=size$height,pointsize=ptsz,paper=size$paper)
}#end if
#---------------------------------------------------------------------------#
#------ Set up the title and axis labels. ----------------------------------#
letitre = paste(lieu,"\n",description," - Time : ",whentitle,sep="")
lexlab = "DBH Classes"
leylab = desc.unit(desc=description,unit=unit)
#---------------------------------------------------------------------------#
#------ Split window. ------------------------------------------------------#
par(par.user)
layout(mat=rbind(2,1),heights=c(5,1))
#---------------------------------------------------------------------------#
#------ Legend. ------------------------------------------------------------#
par(mar=c(0.1,4.6,0.1,2.1))
plot.new()
plot.window(xlim=c(0,1),ylim=c(0,1))
legend( x = "bottom"
, inset = 0.0
, legend = pftname.use
, fill = pftcol.use
, ncol = min(3,pretty.box(n.selpft)$ncol)
, title = expression(bold("Plant functional type"))
, cex = cex.ptsz
, bg = background
, xpd = TRUE
)#end legend
#---------------------------------------------------------------------------#
#----- Plot all monthly means together. ------------------------------------#
par(mar=c(4.1,4.6,4.1,2.1))
barplot(height=t(thisvnam[m,,]),names.arg=dbhnames[1:ndbh],width=1.0
,main=letitre,xlab=lexlab,ylab=leylab,ylim=ylimit,legend.text=FALSE
,beside=(! stacked),col=pftcol.use,log=xylog
,border=grey.fg,xpd=FALSE,cex.main=cex.main,las=1)
if (plotgrid & (! stacked)){
xgrid=0.5+(1:ndbh)*(1+npftuse)
abline(v=xgrid,col=grid.colour,lty="solid")
}#end if
box()
#---------------------------------------------------------------------------#
#---------------------------------------------------------------------------#
# Close the device. #
#---------------------------------------------------------------------------#
if (outform[o] == "x11"){
locator(n=1)
dev.off()
}else{
dev.off()
}#end if
dummy = clean.tmp()
#---------------------------------------------------------------------------#
} #end for outform
#------------------------------------------------------------------------------#
}#end for
#---------------------------------------------------------------------------------#
}#end if
#------------------------------------------------------------------------------------#
}#end for
#---------------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------------#
# Plot the 3-D size and age structure of various variables. #
#---------------------------------------------------------------------------------------#
for (v in 1:ntspftdbh){
#----- Retrieve variable information from the list. ---------------------------------#
thissas = tspftdbh[[v]]
vnam = thissas$vnam
description = thissas$desc
unit = thissas$i.unit
plotit = thissas$sas
plog = thissas$plog
#----- If this variable is to be plotted, then go through this if block. ------------#
if (plotit){
cat(" + Size and age structure plot: ",description,"...","\n")
#---------------------------------------------------------------------------------#
# Check if the directory exists. If not, create it. #
#---------------------------------------------------------------------------------#
sasdir = paste(outpref,"sas",sep="/")
if (! file.exists(sasdir)) dir.create(sasdir)
outdir = paste(sasdir,vnam,sep="/")
if (! file.exists(outdir)) dir.create(outdir)
#---------------------------------------------------------------------------------#
#----- Load this list into "thislist". -------------------------------------------#
varco = cohort[[vnam]]
#---------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------#
# Loop over all times. #
#---------------------------------------------------------------------------------#
for (ww in names(cohort$age)){
#----- Find which year we are plotting. ---------------------------------------#
cmonth = substring(ww,7,8)
thisyear = substring(ww,2,5)
mm = as.numeric(cmonth)
yy = as.numeric(thisyear)
#----- Retrieve variable list, age, DBH, and PFT for this year. ---------------#
ageww = cohort$age [[ww]]
if (any(ageww <= 0,na.rm=TRUE)){
minww = min(ageww,na.rm=TRUE)
ageww = ageww - minww + 0.01
}#end if
dbhww = cohort$dbh [[ww]]
pftww = cohort$pft [[ww]]
varww = varco [[ww]]
popww = cohort$nplant[[ww]] * cohort$area[[ww]]
#------------------------------------------------------------------------------#
# We only plot the SAS figures when the polygon is not an absolute desert. #
#------------------------------------------------------------------------------#
if (any (! is.na(varww))){
#---------------------------------------------------------------------------#
# Find the range. If the user wants the range to be fixed, then use #
# the global range, otherwise, simply use the range for this year. #
#---------------------------------------------------------------------------#
if (sasfixlimits){
xlimit = pretty.xylim(u=unlist(cohort$age),fracexp=0.0,is.log=TRUE )
ylimit = pretty.xylim(u=unlist(cohort$dbh),fracexp=0.0,is.log=FALSE)
zlimit = pretty.xylim(u=unlist(varco) ,fracexp=0.0,is.log=plog )
popmin = min (unlist(cohort$nplant * cohort$area), na.rm=TRUE)
popmax = max (unlist(cohort$nplant * cohort$area), na.rm=TRUE)
}else{
xlimit = pretty.xylim(u=ageww ,fracexp=0.0,is.log=TRUE )
ylimit = pretty.xylim(u=dbhww ,fracexp=0.0,is.log=FALSE)
zlimit = pretty.xylim(u=varww ,fracexp=0.0,is.log=plog )
popmin = min (popww ,na.rm=TRUE)
popmax = max (popww ,na.rm=TRUE)
}#end if
#---------------------------------------------------------------------------#
#----- Define the scale-dependent population size. -------------------------#
cexww = cexmin + (cexmax - cexmin) * log(popww/popmin) / log(popmax/popmin)
#---------------------------------------------------------------------------#
#----- Define the floor location. ------------------------------------------#
if ((zlimit[1] > 0) != (zlimit[2] > 0)){
floor3d = 0.
}else if (zlimit[1] > 0){
floor3d = zlimit[1]
}else{
floor3d = zlimit[2]
}#end if
#---------------------------------------------------------------------------#
#----- Define the grid information for the 3-D plot. -----------------------#
xlabels = pretty.log(xlimit,n=5)
ylabels = pretty(ylimit,n=5)
zlabels = if(plog){pretty.log(zlimit,n=5)}else{pretty(zlimit,n=5)}
xat = log(xlabels)
yat = ylabels
zat = if(plog){log(zlabels)}else{zlabels}
xlimit = range(x=xat)
ylimit = range(x=yat)
zlimit = range(x=zat)
xfloor = seq(from=xlimit[1],to=xlimit[2],length.out=16)
yfloor = seq(from=ylimit[1],to=ylimit[2],length.out=16)
zfloor = matrix(floor3d,nrow=length(xfloor),ncol=length(yfloor))
#---------------------------------------------------------------------------#
#----- Expand the lines to make the lollipops. -----------------------------#
ncohnow = length(varww)
ageww = rep(ageww,each=3)
dbhww = rep(dbhww,each=3)
pftww = rep(pftww,each=3)
varww = as.vector(rbind(rep(floor3d,times=ncohnow)
,varco[[ww]]
,rep(NA,times=ncohnow)))
xww = log(ageww)
yww = dbhww
zww = if(plog){log(varww)}else{varww}
pchww = rep(c(NA,16,NA),times=ncohnow)
cexww = rep(cexww,each=3)
colww = pft$colour[pftww]
pftin = sort(unique(cohort$pft[[ww]]))
colleg = pft$colour[pftin]
pftleg = pft$name [pftin]
#---------------------------------------------------------------------------#
#---------------------------------------------------------------------------#
# Plot annotation. #
#---------------------------------------------------------------------------#
letitre = paste(description," - ",lieu,
"\n Time :",mlist[mm],"/",thisyear,sep=" ")
lexlab = desc.unit(desc="Gap age",unit=untab$yr)
leylab = desc.unit(desc="DBH",unit=untab$cm)
lezlab = desc.unit(desc=description,unit=unit)
#---------------------------------------------------------------------------#
#----- Loop over output formats. -------------------------------------------#
for (o in 1:nout){
#----- Open file. -------------------------------------------------------#
fichier = paste(outdir,"/",vnam,"-",thisyear,"-",cmonth,"-",suffix
,".",outform[o],sep="")
if (outform[o] == "x11"){
X11(width=size$width,height=size$height,pointsize=ptsz)
}else if(outform[o] == "png"){
png(filename=fichier,width=size$width*depth,height=size$height*depth
,pointsize=ptsz,res=depth)
}else if(outform[o] == "eps"){
postscript(file=fichier,width=size$width,height=size$height
,pointsize=ptsz,paper=size$paper)
}else if(outform[o] == "pdf"){
pdf(file=fichier,onefile=FALSE
,width=size$width,height=size$height,pointsize=ptsz
,paper=size$paper)
}#end if
#------------------------------------------------------------------------#
#----- Split the domain into 2. -----------------------------------------#
par(par.user)
layout(mat=rbind(2,1),heights=c(5,1))
#------------------------------------------------------------------------#
#------------------------------------------------------------------------#
# Plot legend. #
#------------------------------------------------------------------------#
par(mar=c(0.1,0.1,0.1,0.1))
plot.new()
plot.window(xlim=c(0,1),ylim=c(0,1))
legend( x = "center"
, inset = 0.0
, legend = pftleg
, fill = colleg
, ncol = min(4,pretty.box(length(pftleg))$ncol)
, title = expression(bold("Plant functional type"))
, cex = cex.ptsz
, xpd = TRUE
)#end legend
#------------------------------------------------------------------------#
#------------------------------------------------------------------------#
# Plot the 3-D plot. #
#------------------------------------------------------------------------#
par(mar=c(1.1,1.1,4.1,1.1))
pout = perspx( x = xfloor
, y = yfloor
, z = zfloor
, xlim = xlimit
, ylim = ylimit
, zlim = zlimit
, theta = theta
, phi = phi
, col = gcol
, expand = expz
, ticktype = "detailed"
, border = NA
, shade = shade
, ltheta = ltheta
, main = letitre
, cex.main = 0.8*cex.ptsz
, axes = FALSE
)#end perspx
#----- Add axes. --------------------------------------------------------#
paxis3d(edge="X--",pmat=pout,at=xat,cex=0.9*cex.ptsz,labels=xlabels)
paxis3d(edge="Y--",pmat=pout,at=yat,cex=0.9*cex.ptsz,labels=ylabels)
paxis3d(edge="Z-+",pmat=pout,at=zat,cex=0.9*cex.ptsz,labels=zlabels)
mtext3d(edge="X--",pmat=pout,labels=lexlab,cex=cex.ptsz,srt=theta+90)
mtext3d(edge="Y--",pmat=pout,labels=leylab,cex=cex.ptsz,srt=theta)
mtext3d(edge="Z-+",pmat=pout,labels=lezlab,cex=cex.ptsz,srt=-75)
#------------------------------------------------------------------------#
#----- Add the cohorts. -------------------------------------------------#
lines (trans3d(x=xww,y=yww,z=zww,pmat=pout),type="l",col=grey.fg,lwd=2)
points(trans3d(x=xww,y=yww,z=zww,pmat=pout),type="p",pch=pchww
,col=colww,cex=cexww)
#------------------------------------------------------------------------#
#----- Close the device. ------------------------------------------------#
if (outform[o] == "x11"){
locator(n=1)
dev.off()
}else{
dev.off()
}#end if
dummy = clean.tmp()
#------------------------------------------------------------------------#
}#end for outform
#---------------------------------------------------------------------------#
}#end if is.na(varww)
#------------------------------------------------------------------------------#
}#end for nameco
#---------------------------------------------------------------------------------#
}#end if
#------------------------------------------------------------------------------------#
}#end for npsas
#---------------------------------------------------------------------------------------#
}#end for places
#==========================================================================================#
#==========================================================================================#
|
\name{disc.1r}
\alias{disc.1r}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{ Discretization using the Holte's 1R method }
\description{
This function performs supervised discretization using the Holte's 1R method
}
\usage{
disc.1r(data, convar, binsize = 6)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{data}{The name of the dataset to be discretized}
\item{convar}{A vector containing the continuous features}
\item{binsize}{ The number of instances per bin}
}
\value{
Returns a new data matrix with discretized values
}
\references{
Kantardzic M. (2003). Data Mining: Concepts, Models, methods, and Algorithms.
John Wiley. New York. }
\author{Shiyun Wen and Edgar Acuna}
\seealso{\code{\link{disc.ew}},\code{\link{disc.ef}}, \code{\link{chiMerge}}, \code{\link{disc.mentr}}}
\examples{
#-----Discretization using the Holte's 1r method
data(bupa)
disc.1r(bupa,1:6)}
\keyword{manip}
| /man/disc.1r.Rd | no_license | a704261687/dprep | R | false | false | 982 | rd | \name{disc.1r}
\alias{disc.1r}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{ Discretization using the Holte's 1R method }
\description{
This function performs supervised discretization using the Holte's 1R method
}
\usage{
disc.1r(data, convar, binsize = 6)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{data}{The name of the dataset to be discretized}
\item{convar}{A vector containing the continuous features}
\item{binsize}{ The number of instances per bin}
}
\value{
Returns a new data matrix with discretized values
}
\references{
Kantardzic M. (2003). Data Mining: Concepts, Models, methods, and Algorithms.
John Wiley. New York. }
\author{Shiyun Wen and Edgar Acuna}
\seealso{\code{\link{disc.ew}},\code{\link{disc.ef}}, \code{\link{chiMerge}}, \code{\link{disc.mentr}}}
\examples{
#-----Discretization using the Holte's 1r method
data(bupa)
disc.1r(bupa,1:6)}
\keyword{manip}
|
# NCBI Datasets API
#
# NCBI service to query and download biological sequence data across all domains of life from NCBI databases.
#
# The version of the OpenAPI document: v1alpha
#
# Generated by: https://openapi-generator.tech
#' @docType class
#' @title V1alpha1Message
#' @description V1alpha1Message Class
#' @format An \code{R6Class} generator object
#' @field error \link{Datasetsv1alpha1Error} [optional]
#'
#'
#' @importFrom R6 R6Class
#' @importFrom jsonlite fromJSON toJSON
#' @export
V1alpha1Message <- R6::R6Class(
'V1alpha1Message',
public = list(
`error` = NULL,
initialize = function(`error`=NULL, ...){
local.optional.var <- list(...)
if (!is.null(`error`)) {
stopifnot(R6::is.R6(`error`))
self$`error` <- `error`
}
},
toJSON = function() {
V1alpha1MessageObject <- list()
if (!is.null(self$`error`)) {
V1alpha1MessageObject[['error']] <-
self$`error`$toJSON()
}
V1alpha1MessageObject
},
fromJSON = function(V1alpha1MessageJson) {
V1alpha1MessageObject <- jsonlite::fromJSON(V1alpha1MessageJson)
if (!is.null(V1alpha1MessageObject$`error`)) {
errorObject <- Datasetsv1alpha1Error$new()
errorObject$fromJSON(jsonlite::toJSON(V1alpha1MessageObject$error, auto_unbox = TRUE, digits = NA))
self$`error` <- errorObject
}
},
toJSONString = function() {
jsoncontent <- c(
if (!is.null(self$`error`)) {
sprintf(
'"error":
%s
',
jsonlite::toJSON(self$`error`$toJSON(), auto_unbox=TRUE, digits = NA)
)}
)
jsoncontent <- paste(jsoncontent, collapse = ",")
paste('{', jsoncontent, '}', sep = "")
},
fromJSONString = function(V1alpha1MessageJson) {
V1alpha1MessageObject <- jsonlite::fromJSON(V1alpha1MessageJson)
self$`error` <- Datasetsv1alpha1Error$new()$fromJSON(jsonlite::toJSON(V1alpha1MessageObject$error, auto_unbox = TRUE, digits = NA))
self
}
)
)
| /R/v1alpha1_message.R | no_license | zhang-ncbi/dataset_r_test | R | false | false | 2,032 | r | # NCBI Datasets API
#
# NCBI service to query and download biological sequence data across all domains of life from NCBI databases.
#
# The version of the OpenAPI document: v1alpha
#
# Generated by: https://openapi-generator.tech
#' @docType class
#' @title V1alpha1Message
#' @description V1alpha1Message Class
#' @format An \code{R6Class} generator object
#' @field error \link{Datasetsv1alpha1Error} [optional]
#'
#'
#' @importFrom R6 R6Class
#' @importFrom jsonlite fromJSON toJSON
#' @export
V1alpha1Message <- R6::R6Class(
'V1alpha1Message',
public = list(
`error` = NULL,
initialize = function(`error`=NULL, ...){
local.optional.var <- list(...)
if (!is.null(`error`)) {
stopifnot(R6::is.R6(`error`))
self$`error` <- `error`
}
},
toJSON = function() {
V1alpha1MessageObject <- list()
if (!is.null(self$`error`)) {
V1alpha1MessageObject[['error']] <-
self$`error`$toJSON()
}
V1alpha1MessageObject
},
fromJSON = function(V1alpha1MessageJson) {
V1alpha1MessageObject <- jsonlite::fromJSON(V1alpha1MessageJson)
if (!is.null(V1alpha1MessageObject$`error`)) {
errorObject <- Datasetsv1alpha1Error$new()
errorObject$fromJSON(jsonlite::toJSON(V1alpha1MessageObject$error, auto_unbox = TRUE, digits = NA))
self$`error` <- errorObject
}
},
toJSONString = function() {
jsoncontent <- c(
if (!is.null(self$`error`)) {
sprintf(
'"error":
%s
',
jsonlite::toJSON(self$`error`$toJSON(), auto_unbox=TRUE, digits = NA)
)}
)
jsoncontent <- paste(jsoncontent, collapse = ",")
paste('{', jsoncontent, '}', sep = "")
},
fromJSONString = function(V1alpha1MessageJson) {
V1alpha1MessageObject <- jsonlite::fromJSON(V1alpha1MessageJson)
self$`error` <- Datasetsv1alpha1Error$new()$fromJSON(jsonlite::toJSON(V1alpha1MessageObject$error, auto_unbox = TRUE, digits = NA))
self
}
)
)
|
## ----loaddata------------------------------------------------------------
require(lattice)
library(data.table)
library(ggplot2)
#First I want to make sure that default directory is set right before I start with the work
setwd("C:\\RSWeek2")
# we define name of the file in unzipped and zipped format and check if unzipped format exists. If not, we unzip it'
file = "activity.csv"
zipfilename = "repdata_data_activity.zip"
if (!file.exists(file)) {
unzip(zipfilename)
}
dataset <- read.csv('activity.csv', header = TRUE, sep = ",",
colClasses=c("numeric", "character", "numeric"))
dataset$date <- as.Date(dataset$date, format = "%Y-%m-%d")
dataset$interval <- as.factor(dataset$interval)
str(dataset)
# What is mean total number of steps taken per day?
steps_per_day <- aggregate(steps ~ date, dataset, sum)
colnames(steps_per_day) <- c("date","steps")
head(steps_per_day)
ggplot(steps_per_day, aes(x = steps)) +
geom_histogram(fill = "orange", binwidth = 1000) +
labs(title="Histogram of Steps Taken per Day",
x = "Number of Steps per Day", y = "Number of times in a day(Count)") + theme_bw()
steps_mean <- mean(steps_per_day$steps, na.rm=TRUE)
steps_median <- median(steps_per_day$steps, na.rm=TRUE)
#What is the average daily activity pattern?
steps_per_interval <- aggregate(dataset$steps,
by = list(interval = dataset$interval),
FUN=mean, na.rm=TRUE)
#convert to integers
##this helps in plotting
steps_per_interval$interval <-
as.integer(levels(steps_per_interval$interval)[steps_per_interval$interval])
colnames(steps_per_interval) <- c("interval", "steps")
ggplot(steps_per_interval, aes(x=interval, y=steps)) +
geom_line(color="orange", size=1) +
labs(title="Average Daily Activity Pattern", x="Interval", y="Number of steps") +
theme_bw()
max_interval <- steps_per_interval[which.max(
steps_per_interval$steps),]
#Imputing missing values
missing_vals <- sum(is.na(dataset$steps))
#filling na's
na_fill <- function(data, pervalue) {
na_index <- which(is.na(data$steps))
na_replace <- unlist(lapply(na_index, FUN=function(idx){
interval = data[idx,]$interval
pervalue[pervalue$interval == interval,]$steps
}))
fill_steps <- data$steps
fill_steps[na_index] <- na_replace
fill_steps
}
dataset_fill <- data.frame(
steps = na_fill(dataset, steps_per_interval),
date = dataset$date,
interval = dataset$interval)
str(dataset_fill)
sum(is.na(rdata_fill$steps))
# A histogram of the total number of steps taken each day
fill_steps_per_day <- aggregate(steps ~ date, dataset_fill, sum)
colnames(fill_steps_per_day) <- c("date","steps")
##plotting the histogram
ggplot(fill_steps_per_day, aes(x = steps)) +
geom_histogram(fill = "blue", binwidth = 1000) +
labs(title="Histogram of Steps Taken per Day",
x = "Number of Steps per Day", y = "Number of times in a day(Count)") + theme_bw()
#Calculate and report the mean and median total number of steps taken per day.
# Are there differences in activity patterns between weekdays and weekends?
weekdays_steps <- function(data) {
weekdays_steps <- aggregate(data$steps, by=list(interval = data$interval),
FUN=mean, na.rm=T)
# convert to integers for plotting
weekdays_steps$interval <-
as.integer(levels(weekdays_steps$interval)[weekdays_steps$interval])
colnames(weekdays_steps) <- c("interval", "steps")
weekdays_steps
}
data_by_weekdays <- function(data) {
data$weekday <-
as.factor(weekdays(data$date)) # weekdays
weekend_data <- subset(data, weekday %in% c("Saturday","Sunday"))
weekday_data <- subset(data, !weekday %in% c("Saturday","Sunday"))
weekend_steps <- weekdays_steps(weekend_data)
weekday_steps <- weekdays_steps(weekday_data)
weekend_steps$dayofweek <- rep("weekend", nrow(weekend_steps))
weekday_steps$dayofweek <- rep("weekday", nrow(weekday_steps))
data_by_weekdays <- rbind(weekend_steps, weekday_steps)
data_by_weekdays$dayofweek <- as.factor(data_by_weekdays$dayofweek)
data_by_weekdays
}
data_weekdays <- data_by_weekdays(dataset_fill)
ggplot(data_weekdays, aes(x=interval, y=steps)) +
geom_line(color="violet") +
facet_wrap(~ dayofweek, nrow=2, ncol=1) +
labs(x="Interval", y="Number of steps") +
theme_bw() | /RCode.r | no_license | Ramsey2016/https---github.com-Ramsey2016--Reproducible-Research_Ass1 | R | false | false | 4,361 | r | ## ----loaddata------------------------------------------------------------
require(lattice)
library(data.table)
library(ggplot2)
#First I want to make sure that default directory is set right before I start with the work
setwd("C:\\RSWeek2")
# we define name of the file in unzipped and zipped format and check if unzipped format exists. If not, we unzip it'
file = "activity.csv"
zipfilename = "repdata_data_activity.zip"
if (!file.exists(file)) {
unzip(zipfilename)
}
dataset <- read.csv('activity.csv', header = TRUE, sep = ",",
colClasses=c("numeric", "character", "numeric"))
dataset$date <- as.Date(dataset$date, format = "%Y-%m-%d")
dataset$interval <- as.factor(dataset$interval)
str(dataset)
# What is mean total number of steps taken per day?
steps_per_day <- aggregate(steps ~ date, dataset, sum)
colnames(steps_per_day) <- c("date","steps")
head(steps_per_day)
ggplot(steps_per_day, aes(x = steps)) +
geom_histogram(fill = "orange", binwidth = 1000) +
labs(title="Histogram of Steps Taken per Day",
x = "Number of Steps per Day", y = "Number of times in a day(Count)") + theme_bw()
steps_mean <- mean(steps_per_day$steps, na.rm=TRUE)
steps_median <- median(steps_per_day$steps, na.rm=TRUE)
#What is the average daily activity pattern?
steps_per_interval <- aggregate(dataset$steps,
by = list(interval = dataset$interval),
FUN=mean, na.rm=TRUE)
#convert to integers
##this helps in plotting
steps_per_interval$interval <-
as.integer(levels(steps_per_interval$interval)[steps_per_interval$interval])
colnames(steps_per_interval) <- c("interval", "steps")
ggplot(steps_per_interval, aes(x=interval, y=steps)) +
geom_line(color="orange", size=1) +
labs(title="Average Daily Activity Pattern", x="Interval", y="Number of steps") +
theme_bw()
max_interval <- steps_per_interval[which.max(
steps_per_interval$steps),]
#Imputing missing values
missing_vals <- sum(is.na(dataset$steps))
#filling na's
na_fill <- function(data, pervalue) {
na_index <- which(is.na(data$steps))
na_replace <- unlist(lapply(na_index, FUN=function(idx){
interval = data[idx,]$interval
pervalue[pervalue$interval == interval,]$steps
}))
fill_steps <- data$steps
fill_steps[na_index] <- na_replace
fill_steps
}
dataset_fill <- data.frame(
steps = na_fill(dataset, steps_per_interval),
date = dataset$date,
interval = dataset$interval)
str(dataset_fill)
sum(is.na(rdata_fill$steps))
# A histogram of the total number of steps taken each day
fill_steps_per_day <- aggregate(steps ~ date, dataset_fill, sum)
colnames(fill_steps_per_day) <- c("date","steps")
##plotting the histogram
ggplot(fill_steps_per_day, aes(x = steps)) +
geom_histogram(fill = "blue", binwidth = 1000) +
labs(title="Histogram of Steps Taken per Day",
x = "Number of Steps per Day", y = "Number of times in a day(Count)") + theme_bw()
#Calculate and report the mean and median total number of steps taken per day.
# Are there differences in activity patterns between weekdays and weekends?
weekdays_steps <- function(data) {
weekdays_steps <- aggregate(data$steps, by=list(interval = data$interval),
FUN=mean, na.rm=T)
# convert to integers for plotting
weekdays_steps$interval <-
as.integer(levels(weekdays_steps$interval)[weekdays_steps$interval])
colnames(weekdays_steps) <- c("interval", "steps")
weekdays_steps
}
data_by_weekdays <- function(data) {
data$weekday <-
as.factor(weekdays(data$date)) # weekdays
weekend_data <- subset(data, weekday %in% c("Saturday","Sunday"))
weekday_data <- subset(data, !weekday %in% c("Saturday","Sunday"))
weekend_steps <- weekdays_steps(weekend_data)
weekday_steps <- weekdays_steps(weekday_data)
weekend_steps$dayofweek <- rep("weekend", nrow(weekend_steps))
weekday_steps$dayofweek <- rep("weekday", nrow(weekday_steps))
data_by_weekdays <- rbind(weekend_steps, weekday_steps)
data_by_weekdays$dayofweek <- as.factor(data_by_weekdays$dayofweek)
data_by_weekdays
}
data_weekdays <- data_by_weekdays(dataset_fill)
ggplot(data_weekdays, aes(x=interval, y=steps)) +
geom_line(color="violet") +
facet_wrap(~ dayofweek, nrow=2, ncol=1) +
labs(x="Interval", y="Number of steps") +
theme_bw() |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/reach.r
\name{.renumber_reaches}
\alias{.renumber_reaches}
\title{Renumber reaches, starting with one at the headwaters}
\usage{
.renumber_reaches(ws)
}
\arguments{
\item{ws}{A watershed}
}
\description{
Renumber reaches, starting with one at the headwaters
}
| /man/dot-renumber_reaches.Rd | permissive | frawalther/WatershedTools | R | false | true | 338 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/reach.r
\name{.renumber_reaches}
\alias{.renumber_reaches}
\title{Renumber reaches, starting with one at the headwaters}
\usage{
.renumber_reaches(ws)
}
\arguments{
\item{ws}{A watershed}
}
\description{
Renumber reaches, starting with one at the headwaters
}
|
## ----------------------------------------------------------------- ##
## RTS.R ----------------------------------------------------------- ##
## Author(s): Peter Norwood, NCSU ---------------------------------- ##
## Purpose: run a batched bandit with restricted ------------------- ##
## thompson sampling ---------------------------------------------- ##
## ----------------------------------------------------------------- ##
setwd("-------------------------")
source("general_functions.R")
## RTS
## Purpose: run a batched bandit with restricted thompson sampling
## param train_set: data to start randomizing on
## param theta0: mean reward for arm 0
## param theta1: mean reward for arm 1
## param sigma: std deviation fo reward distribution
## param clip: lower bound on randomization probability
## param batch_type: "random" or "fixed" to denote type of batches
## param batch_size: size of batch before updating model
## param batches: number of batches
## param batch_range: range of random batches
## Parma N_total: number of total subjects if using random batches
## return lst: list with trial df and p-value df
RTS <- function(train_set,theta0,theta1,sigma,clip,
batch_type,
batch_size=NULL,batches=NULL,
batch_range=NULL,N_total=NULL){
## save params
theta <- c(theta0,theta1)
## loop through the batches
trial <- data.frame()
if(batch_type=="fixed"){
for(b in 1:batches){
## get the temporary fit
if(b==1){
temp <- train_set
}else{
temp <- trial
}
fit <- lm(Y~-1+as.factor(A),data=temp)
## reward and info gain matrix
info <- matrix(NA,2,3)
for(a in 0:1){
## det info
temp_a <- matrix(0,nrow=batch_size,ncol=2)
temp_a[,a+1] <- rep(1,batch_size)
temp_X <- rbind(model.matrix(fit),temp_a)
det <- det(t(temp_X) %*% temp_X)
## reward info
reward <- coef(fit)[a+1]
## matrix of info
info[a+1,1] <- a
info[a+1,2] <- det
info[a+1,3] <- reward
}
## find convex combinations
conx <- comb(x=info[,2],y=info[,3])-1
## assign rand probs
if(length(conx)==2){
## randomization probability
ts_probs <- thompson_probs(fit=fit,B=500)
rand_prob <- ts_probs$pi[2]
## clip the randomization probability
if(rand_prob>1-clip){
rand_prob <- 1-clip
}else if(rand_prob<clip){
rand_prob <- clip
}
}else if(conx==0){
rand_prob=0.1
}else{
rand_prob=0.9
}
## assign treatment
A <- rbinom(batch_size,1,rand_prob)
## make sure they all aren't the same
if(mean(A)==1){
A[1] <- 0
}else if(mean(A)==0){
A[1] <-1
}
## mean rewards
mean_rewards <- mean_reward(theta=theta,A=A)
## observed rewards
y <- rnorm(batch_size,mean=mean_rewards,sd=sigma)
## batch dataset
temp <- data.frame(A=A,pi=rep(rand_prob,batch_size),mu=mean_rewards,Y=y,batch=b)
trial <- rbind(trial,temp)
}
}else if(batch_type=="random"){
## initialize
b <- 1; sub <- 0;
trial <- data.frame()
while(sub < N_total){
## get the temporary fit
if(b==1){
temp <- train_set
}else{
temp <- trial
}
fit <- lm(Y~-1+as.factor(A),data=temp)
## create random batch size
batch_size2 <- sample(batch_range,1)
## reward and info gain matrix
info <- matrix(NA,2,3)
for(a in 0:1){
## det info
temp_a <- matrix(0,nrow=batch_size2,ncol=2)
temp_a[,a+1] <- rep(1,batch_size2)
temp_X <- rbind(model.matrix(fit),temp_a)
det <- det(t(temp_X) %*% temp_X)
## reward info
reward <- coef(fit)[a+1]
## matrix of info
info[a+1,1] <- a
info[a+1,2] <- det
info[a+1,3] <- reward
}
## find convex combinations
conx <- comb(x=info[,2],y=info[,3])-1
## assign rand probs
if(length(conx)==2){
## randomization probability
ts_probs <- thompson_probs(fit=fit,B=500)
rand_prob <- ts_probs$pi[2]
## clip the randomization probability
if(rand_prob>1-clip){
rand_prob <- 1-clip
}else if(rand_prob<clip){
rand_prob <- clip
}
}else if(conx==0){
rand_prob=0.1
}else{
rand_prob=0.9
}
## assign treatment
A <- rbinom(batch_size2,1,rand_prob)
## make sure they all aren't the same
if(mean(A)==1){
A[1] <- 0
}else if(mean(A)==0){
A[1] <-1
}
## mean rewards
mean_rewards <- mean_reward(theta=theta,A=A)
## observed rewards
y <- rnorm(batch_size2,mean=mean_rewards,sd=sigma)
## batch dataset
temp <- data.frame(A=A,pi=rep(rand_prob,batch_size2),mu=mean_rewards,Y=y,batch=b)
trial <- rbind(trial,temp)
## update
b <- b+1
sub <- sub+batch_size2
}
}
## assign subject
trial$subject <- 1:nrow(trial)
## OLS test
ols_p <- ols_pvalue(trial=trial)
## BOLS test
bols_p <- bols_pvalue(trial=trial)
## pvalue dataframe
pval_df <- data.frame(method=c("ols","bols"),
pval=c(ols_p,bols_p))
## return
lst <- list(trial=trial,
pval_df=pval_df)
return(lst)
}
# test_RTS <- RTS(train_set=train_set,theta0=0,theta1=0,clip=0.1,
# sigma=1,batch_type="random",batch_range=3:20,N_total=1000)
#
# test_RTS <- RTS(train_set=train_set,theta0=0,theta1=0,clip=0.1,
# sigma=1,batch_type="fixed",batch_size=20,batches=50)
| /bandit_inference/RTS.R | no_license | peterpnorwood/NonDomSeqExp | R | false | false | 5,893 | r | ## ----------------------------------------------------------------- ##
## RTS.R ----------------------------------------------------------- ##
## Author(s): Peter Norwood, NCSU ---------------------------------- ##
## Purpose: run a batched bandit with restricted ------------------- ##
## thompson sampling ---------------------------------------------- ##
## ----------------------------------------------------------------- ##
setwd("-------------------------")
source("general_functions.R")
## RTS
## Purpose: run a batched bandit with restricted thompson sampling
## param train_set: data to start randomizing on
## param theta0: mean reward for arm 0
## param theta1: mean reward for arm 1
## param sigma: std deviation fo reward distribution
## param clip: lower bound on randomization probability
## param batch_type: "random" or "fixed" to denote type of batches
## param batch_size: size of batch before updating model
## param batches: number of batches
## param batch_range: range of random batches
## Parma N_total: number of total subjects if using random batches
## return lst: list with trial df and p-value df
RTS <- function(train_set,theta0,theta1,sigma,clip,
batch_type,
batch_size=NULL,batches=NULL,
batch_range=NULL,N_total=NULL){
## save params
theta <- c(theta0,theta1)
## loop through the batches
trial <- data.frame()
if(batch_type=="fixed"){
for(b in 1:batches){
## get the temporary fit
if(b==1){
temp <- train_set
}else{
temp <- trial
}
fit <- lm(Y~-1+as.factor(A),data=temp)
## reward and info gain matrix
info <- matrix(NA,2,3)
for(a in 0:1){
## det info
temp_a <- matrix(0,nrow=batch_size,ncol=2)
temp_a[,a+1] <- rep(1,batch_size)
temp_X <- rbind(model.matrix(fit),temp_a)
det <- det(t(temp_X) %*% temp_X)
## reward info
reward <- coef(fit)[a+1]
## matrix of info
info[a+1,1] <- a
info[a+1,2] <- det
info[a+1,3] <- reward
}
## find convex combinations
conx <- comb(x=info[,2],y=info[,3])-1
## assign rand probs
if(length(conx)==2){
## randomization probability
ts_probs <- thompson_probs(fit=fit,B=500)
rand_prob <- ts_probs$pi[2]
## clip the randomization probability
if(rand_prob>1-clip){
rand_prob <- 1-clip
}else if(rand_prob<clip){
rand_prob <- clip
}
}else if(conx==0){
rand_prob=0.1
}else{
rand_prob=0.9
}
## assign treatment
A <- rbinom(batch_size,1,rand_prob)
## make sure they all aren't the same
if(mean(A)==1){
A[1] <- 0
}else if(mean(A)==0){
A[1] <-1
}
## mean rewards
mean_rewards <- mean_reward(theta=theta,A=A)
## observed rewards
y <- rnorm(batch_size,mean=mean_rewards,sd=sigma)
## batch dataset
temp <- data.frame(A=A,pi=rep(rand_prob,batch_size),mu=mean_rewards,Y=y,batch=b)
trial <- rbind(trial,temp)
}
}else if(batch_type=="random"){
## initialize
b <- 1; sub <- 0;
trial <- data.frame()
while(sub < N_total){
## get the temporary fit
if(b==1){
temp <- train_set
}else{
temp <- trial
}
fit <- lm(Y~-1+as.factor(A),data=temp)
## create random batch size
batch_size2 <- sample(batch_range,1)
## reward and info gain matrix
info <- matrix(NA,2,3)
for(a in 0:1){
## det info
temp_a <- matrix(0,nrow=batch_size2,ncol=2)
temp_a[,a+1] <- rep(1,batch_size2)
temp_X <- rbind(model.matrix(fit),temp_a)
det <- det(t(temp_X) %*% temp_X)
## reward info
reward <- coef(fit)[a+1]
## matrix of info
info[a+1,1] <- a
info[a+1,2] <- det
info[a+1,3] <- reward
}
## find convex combinations
conx <- comb(x=info[,2],y=info[,3])-1
## assign rand probs
if(length(conx)==2){
## randomization probability
ts_probs <- thompson_probs(fit=fit,B=500)
rand_prob <- ts_probs$pi[2]
## clip the randomization probability
if(rand_prob>1-clip){
rand_prob <- 1-clip
}else if(rand_prob<clip){
rand_prob <- clip
}
}else if(conx==0){
rand_prob=0.1
}else{
rand_prob=0.9
}
## assign treatment
A <- rbinom(batch_size2,1,rand_prob)
## make sure they all aren't the same
if(mean(A)==1){
A[1] <- 0
}else if(mean(A)==0){
A[1] <-1
}
## mean rewards
mean_rewards <- mean_reward(theta=theta,A=A)
## observed rewards
y <- rnorm(batch_size2,mean=mean_rewards,sd=sigma)
## batch dataset
temp <- data.frame(A=A,pi=rep(rand_prob,batch_size2),mu=mean_rewards,Y=y,batch=b)
trial <- rbind(trial,temp)
## update
b <- b+1
sub <- sub+batch_size2
}
}
## assign subject
trial$subject <- 1:nrow(trial)
## OLS test
ols_p <- ols_pvalue(trial=trial)
## BOLS test
bols_p <- bols_pvalue(trial=trial)
## pvalue dataframe
pval_df <- data.frame(method=c("ols","bols"),
pval=c(ols_p,bols_p))
## return
lst <- list(trial=trial,
pval_df=pval_df)
return(lst)
}
# test_RTS <- RTS(train_set=train_set,theta0=0,theta1=0,clip=0.1,
# sigma=1,batch_type="random",batch_range=3:20,N_total=1000)
#
# test_RTS <- RTS(train_set=train_set,theta0=0,theta1=0,clip=0.1,
# sigma=1,batch_type="fixed",batch_size=20,batches=50)
|
/Rproj/geoRglm/man/b50.Rd | no_license | yidongwork/kentucky_intern_2 | R | false | false | 1,878 | rd | ||
#' Blend multiple colors
#'
#' Blend multiple colors
#'
#' This function is intended to blend two or more colors,
#' by default using "paint mixing" style, similar to subtractive
#' color mixing. It accomplishes this goal by using a red-yellow-blue
#' color wheel (very similar to cyan-yellow-magenta), then determines
#' the average color hue with appropriate loss of color saturation.
#'
#' This function also utilized color transparency, applied internally
#' as relative color weights, during the color mixing process.
#'
#' This function blends multiple colors, including several useful
#' features:
#'
#' * color wheel red-yellow-blue, subtractive color mixing
#' * can blend more than two colors at once
#' * accounts for transparency of individual colors
#'
#' The basic design guide was to meet these expectations:
#'
#' * red + yellow = orange
#' * blue + yellow = green
#' * red + blue = purple
#' * blue + red + yellow = some brown/gray substance
#'
#' The input `x` can be a vector of colors, or a `list`. When
#' `x` is a `list` then the unique vectors are blended, returning
#' a vector with length `length(x)`.
#'
#' The default additive color mixing, with red-green-blue colors
#' used in electronic monitors, does not meet these criteria.
#' (In no logical paint mixing exercise would someone expect that
#' mixing red and green would make yellow; or that
#' blue and yellow would make grey.)
#'
#' In general the function performs well, with some exceptions
#' where the color hue angle is not well-normalized opposite
#' its complementary color, and therefore does not make the
#' expected "brownish/grey" output. Examples include
#' `blend_colors(c("yellow", "purple"))` which is closer
#' to blue + yellow = green, because purple is also composed
#' of blue with some red. Indeed, the R color hue for purple
#' is 283; the hue for blue is 266; the hue for red is 12 (372);
#' which means purple is substantially closer to blue than red.
#' A suitable workaround in this case is to use
#' `blend_colors(c("yellow", "deeppink4"))`.
#'
#' @family colorjam core
#'
#' @return `character` vector with blended color; when input `x`
#' is a `list` the returned vector will have length `length(x)`.
#'
#' @param x `character` vector of R colors in hex format, or `list`
#' of color vectors, where each vector will be independently
#' blended.
#' @param preset `character` value indicating the color wheel preset,
#' passed to `colorjam::h2hwOptions()`.
#' @param lens `numeric` value used to influence the color saturation
#' after averaging color wheel angles.
#' @param do_plot `logical` indicating whether to depict the color
#' blend operation using `jamba::showColors()`.
#' @param c_weight `numeric` value used to weight the average color
#' chroma (saturation) using the mean chroma values of the input
#' colors. When `c_weight=0` the chroma uses the radius returned
#' by the mean color wheel angle.
#' @param c_floor `numeric` value indicating the `C` chroma HCL value
#' below which a color is considered to be "grey" and unsaturated.
#' When this happens, the hue contribution is set to 0.001 relative
#' to other colors being blended. This correction is done because
#' every color is assigned one `H` hue value in HCL, even when
#' the `C` chroma (saturation) is zero, therefore these colors
#' effectively have no `H` hue.
#' @param ... additional arguments are ignored.
#'
#' @examples
#' blend_colors(c("red", "yellow"), do_plot=TRUE)
#'
#' blend_colors(c("blue", "gold"), do_plot=TRUE)
#'
#' blend_colors(c("blue", "red3"), do_plot=TRUE)
#'
#' blend_colors(c("dodgerblue", "springgreen3"), do_plot=TRUE)
#'
#' blend_colors(c("green", "dodgerblue"), do_plot=TRUE)
#'
#' blend_colors(c("red", "gold", "blue"), do_plot=TRUE)
#'
#' blend_colors(c("green4", "red"), do_plot=TRUE)
#'
#' blend_colors(c("deeppink4", "gold"), do_plot=TRUE)
#'
#' blend_colors(c("blue4", "darkorange1"), do_plot=TRUE)
#'
#' @export
blend_colors <- function
(x,
preset=c("ryb", "none", "dichromat", "rgb", "ryb2"),
h1=NULL,
h2=NULL,
do_plot=FALSE,
lens=0,
c_weight=0.2,
c_floor=12,
...)
{
## 1. Convert colors to ryb
## 2. Implement color subtraction
## new_r <- 255 - sqrt( (255 - r_1)^2 + (255 - r_2)^2 )
## 3. convert to rgb
#x <- jamba::nameVector(c("red", "yellow", "blue"));
preset <- match.arg(preset);
if (length(c_floor) == 0) {
c_floor <- 0;
}
c_floor <- head(c_floor, 1);
## handle list input
if (is.list(x)) {
x_unique <- unique(x);
x_match <- match(x, x_unique);
x_blends <- sapply(x_unique, function(x1){
blend_colors(x=x1,
preset=preset,
lens=lens,
h1=h1,
h2=h2,
do_plot=FALSE,
c_weight=c_weight);
});
x_blend <- x_blends[x_match];
names(x_blend) <- names(x);
return(x_blend);
}
## weights are defined by transparency
x_w <- jamba::col2alpha(x);
x_HCL <- jamba::col2hcl(x);
x_w_use <- ifelse(x_HCL["C",] <= c_floor,
x_w - x_w*0.999,
ifelse(x_HCL["C",] < 20,
x_w - x_w * 0.8,
x_w));
## adjust hue using color wheel preset
if (length(h1) == 0 || length(h2) == 0) {
h1h2 <- colorjam::h2hwOptions(preset=preset,
setOptions="FALSE");
} else {
h1h2 <- list(h1=h1, h2=h2);
}
h_rgb <- x_HCL["H",];
h_ryb <- colorjam::h2hw(h=h_rgb,
h1=h1h2$h1,
h2=h1h2$h2);
## mean hue angle
if (all(x_w_use == 0)) {
x_w_use <- rep(0.001, length(x_w_use));
}
h_ryb_mean_v <- mean_angle(h_ryb,
lens=lens,
w=x_w_use);
h_ryb_mean <- h_ryb_mean_v["deg"];
h_rgb_mean <- colorjam::hw2h(h=h_ryb_mean,
h1=h1h2$h1,
h2=h1h2$h2);
mean_radius <- weighted.mean(c(1, h_ryb_mean_v["radius2"]),
w=c(c_weight, 1));
x_mean_C <- weighted.mean(x_HCL["C",], w=x_w) * mean_radius;
x_mean_L <- weighted.mean(x_HCL["L",], w=x_w);
new_HCL <- as.matrix(c(H=h_rgb_mean,
C=unname(x_mean_C),
L=x_mean_L,
alpha=1));
new_col <- jamba::hcl2col(new_HCL);
if (do_plot) {
jamba::showColors(list(x=x,
blended=rep(new_col, length(x))));
}
return(new_col);
}
#' Calculate the mean angle
#'
#' Calculate the mean angle
#'
#' This function takes a vector of angles in degrees (0 to 360 degrees)
#' and returns the mean angle based upon the average of unit vectors.
#'
#' The function also optionally accomodates weighted mean values,
#' if a vector of weights is supplied as `w`.
#'
#' Part of the intent of this function is to be used for color blending
#' methods, for example taking the average color hue from a vector of
#' colors. For this purpose, some colors may have varying color saturation
#' and transparency, which are mapped here as weight `w`. Colors which are
#' fully transparent should therefore have weight `w=0` so they do not
#' contribute to the resulting average color hue. Also during color blending
#' operations, the resulting color saturation is adjusted using the `lens`
#' argument, the default `lens=-5` has a tendency to increase intermediate
#' color saturation.
#'
#' @family colorjam hue warp
#'
#' @return `numeric` vector that contains
#' * `degree` the mean angle in degrees
#' * `radius` the actual radius based upon mean unit vectors
#' * `radius2` the adjusted radius using `jamba::warpAroundZero()`
#'
#' @param x `numeric` vector of angles in degrees
#' @param w `numeric` vector representing weights
#' @param do_plot `logical` indicating whether to create a visual summary plot
#' @param lens `numeric` value passed to `jamba::warpAroundZero()` to adjust
#' the radius
#' @param ... additional arguments are ignored
#'
#' @examples
#' mean_angle(c(120, 45), do_plot=TRUE);
#'
#' @export
mean_angle <- function
(x,
w=NULL,
do_plot=FALSE,
lens=-5,
...)
{
xy <- data.frame(x=sin(jamba::deg2rad(x)),
y=cos(jamba::deg2rad(x)));
if (length(w) == 0) {
w <- 1;
}
w <- rep(w,
length.out=length(x));
xy_mean <- matrixStats::colWeightedMeans(
x=as.matrix(xy),
w=w);
xy_m <- matrix(ncol=2, byrow=TRUE,
c(0, 0, xy_mean));
x_radius <- dist(xy_m);
x_radius2 <- jamba::warpAroundZero(x_radius,
xCeiling=1,
lens=lens);
x_deg <- jamba::rad2deg(atan2(x=xy_mean["y"], y=xy_mean["x"])) %% 360;
if (do_plot) {
jamba::nullPlot(xlim=c(-1,1),
ylim=c(-1,1),
asp=1,
doBoxes=FALSE);
aseq <- seq(from=0, to=360, by=2);
lines(x=sin(jamba::deg2rad(aseq)),
y=cos(jamba::deg2rad(aseq)),
type="l",
lty="dotted");
arrows(x0=0,
y0=0,
x1=xy$x * w,
y1=xy$y * w,
lwd=2,
angle=30);
arrows(x0=0,
y0=0,
x1=xy$x,
y1=xy$y,
lty="dotted",
lwd=2,
angle=90);
arrows(x0=0,
y0=0,
x1=sin(jamba::deg2rad(x_deg)) * x_radius,
y1=cos(jamba::deg2rad(x_deg)) * x_radius,
lty="solid",
lwd=2,
angle=90,
col="dodgerblue");
arrows(x0=0,
y0=0,
x1=sin(jamba::deg2rad(x_deg)) * x_radius2,
y1=cos(jamba::deg2rad(x_deg)) * x_radius2,
lwd=4,
angle=30,
col="darkorange1");
}
c(deg=unname(x_deg),
radius=x_radius,
radius2=x_radius2);
}
| /R/colorjam-blendcolors.R | no_license | jmw86069/colorjam | R | false | false | 9,429 | r |
#' Blend multiple colors
#'
#' Blend multiple colors
#'
#' This function is intended to blend two or more colors,
#' by default using "paint mixing" style, similar to subtractive
#' color mixing. It accomplishes this goal by using a red-yellow-blue
#' color wheel (very similar to cyan-yellow-magenta), then determines
#' the average color hue with appropriate loss of color saturation.
#'
#' This function also utilized color transparency, applied internally
#' as relative color weights, during the color mixing process.
#'
#' This function blends multiple colors, including several useful
#' features:
#'
#' * color wheel red-yellow-blue, subtractive color mixing
#' * can blend more than two colors at once
#' * accounts for transparency of individual colors
#'
#' The basic design guide was to meet these expectations:
#'
#' * red + yellow = orange
#' * blue + yellow = green
#' * red + blue = purple
#' * blue + red + yellow = some brown/gray substance
#'
#' The input `x` can be a vector of colors, or a `list`. When
#' `x` is a `list` then the unique vectors are blended, returning
#' a vector with length `length(x)`.
#'
#' The default additive color mixing, with red-green-blue colors
#' used in electronic monitors, does not meet these criteria.
#' (In no logical paint mixing exercise would someone expect that
#' mixing red and green would make yellow; or that
#' blue and yellow would make grey.)
#'
#' In general the function performs well, with some exceptions
#' where the color hue angle is not well-normalized opposite
#' its complementary color, and therefore does not make the
#' expected "brownish/grey" output. Examples include
#' `blend_colors(c("yellow", "purple"))` which is closer
#' to blue + yellow = green, because purple is also composed
#' of blue with some red. Indeed, the R color hue for purple
#' is 283; the hue for blue is 266; the hue for red is 12 (372);
#' which means purple is substantially closer to blue than red.
#' A suitable workaround in this case is to use
#' `blend_colors(c("yellow", "deeppink4"))`.
#'
#' @family colorjam core
#'
#' @return `character` vector with blended color; when input `x`
#' is a `list` the returned vector will have length `length(x)`.
#'
#' @param x `character` vector of R colors in hex format, or `list`
#' of color vectors, where each vector will be independently
#' blended.
#' @param preset `character` value indicating the color wheel preset,
#' passed to `colorjam::h2hwOptions()`.
#' @param lens `numeric` value used to influence the color saturation
#' after averaging color wheel angles.
#' @param do_plot `logical` indicating whether to depict the color
#' blend operation using `jamba::showColors()`.
#' @param c_weight `numeric` value used to weight the average color
#' chroma (saturation) using the mean chroma values of the input
#' colors. When `c_weight=0` the chroma uses the radius returned
#' by the mean color wheel angle.
#' @param c_floor `numeric` value indicating the `C` chroma HCL value
#' below which a color is considered to be "grey" and unsaturated.
#' When this happens, the hue contribution is set to 0.001 relative
#' to other colors being blended. This correction is done because
#' every color is assigned one `H` hue value in HCL, even when
#' the `C` chroma (saturation) is zero, therefore these colors
#' effectively have no `H` hue.
#' @param ... additional arguments are ignored.
#'
#' @examples
#' blend_colors(c("red", "yellow"), do_plot=TRUE)
#'
#' blend_colors(c("blue", "gold"), do_plot=TRUE)
#'
#' blend_colors(c("blue", "red3"), do_plot=TRUE)
#'
#' blend_colors(c("dodgerblue", "springgreen3"), do_plot=TRUE)
#'
#' blend_colors(c("green", "dodgerblue"), do_plot=TRUE)
#'
#' blend_colors(c("red", "gold", "blue"), do_plot=TRUE)
#'
#' blend_colors(c("green4", "red"), do_plot=TRUE)
#'
#' blend_colors(c("deeppink4", "gold"), do_plot=TRUE)
#'
#' blend_colors(c("blue4", "darkorange1"), do_plot=TRUE)
#'
#' @export
blend_colors <- function
(x,
preset=c("ryb", "none", "dichromat", "rgb", "ryb2"),
h1=NULL,
h2=NULL,
do_plot=FALSE,
lens=0,
c_weight=0.2,
c_floor=12,
...)
{
## 1. Convert colors to ryb
## 2. Implement color subtraction
## new_r <- 255 - sqrt( (255 - r_1)^2 + (255 - r_2)^2 )
## 3. convert to rgb
#x <- jamba::nameVector(c("red", "yellow", "blue"));
preset <- match.arg(preset);
if (length(c_floor) == 0) {
c_floor <- 0;
}
c_floor <- head(c_floor, 1);
## handle list input
if (is.list(x)) {
x_unique <- unique(x);
x_match <- match(x, x_unique);
x_blends <- sapply(x_unique, function(x1){
blend_colors(x=x1,
preset=preset,
lens=lens,
h1=h1,
h2=h2,
do_plot=FALSE,
c_weight=c_weight);
});
x_blend <- x_blends[x_match];
names(x_blend) <- names(x);
return(x_blend);
}
## weights are defined by transparency
x_w <- jamba::col2alpha(x);
x_HCL <- jamba::col2hcl(x);
x_w_use <- ifelse(x_HCL["C",] <= c_floor,
x_w - x_w*0.999,
ifelse(x_HCL["C",] < 20,
x_w - x_w * 0.8,
x_w));
## adjust hue using color wheel preset
if (length(h1) == 0 || length(h2) == 0) {
h1h2 <- colorjam::h2hwOptions(preset=preset,
setOptions="FALSE");
} else {
h1h2 <- list(h1=h1, h2=h2);
}
h_rgb <- x_HCL["H",];
h_ryb <- colorjam::h2hw(h=h_rgb,
h1=h1h2$h1,
h2=h1h2$h2);
## mean hue angle
if (all(x_w_use == 0)) {
x_w_use <- rep(0.001, length(x_w_use));
}
h_ryb_mean_v <- mean_angle(h_ryb,
lens=lens,
w=x_w_use);
h_ryb_mean <- h_ryb_mean_v["deg"];
h_rgb_mean <- colorjam::hw2h(h=h_ryb_mean,
h1=h1h2$h1,
h2=h1h2$h2);
mean_radius <- weighted.mean(c(1, h_ryb_mean_v["radius2"]),
w=c(c_weight, 1));
x_mean_C <- weighted.mean(x_HCL["C",], w=x_w) * mean_radius;
x_mean_L <- weighted.mean(x_HCL["L",], w=x_w);
new_HCL <- as.matrix(c(H=h_rgb_mean,
C=unname(x_mean_C),
L=x_mean_L,
alpha=1));
new_col <- jamba::hcl2col(new_HCL);
if (do_plot) {
jamba::showColors(list(x=x,
blended=rep(new_col, length(x))));
}
return(new_col);
}
#' Calculate the mean angle
#'
#' Calculate the mean angle
#'
#' This function takes a vector of angles in degrees (0 to 360 degrees)
#' and returns the mean angle based upon the average of unit vectors.
#'
#' The function also optionally accomodates weighted mean values,
#' if a vector of weights is supplied as `w`.
#'
#' Part of the intent of this function is to be used for color blending
#' methods, for example taking the average color hue from a vector of
#' colors. For this purpose, some colors may have varying color saturation
#' and transparency, which are mapped here as weight `w`. Colors which are
#' fully transparent should therefore have weight `w=0` so they do not
#' contribute to the resulting average color hue. Also during color blending
#' operations, the resulting color saturation is adjusted using the `lens`
#' argument, the default `lens=-5` has a tendency to increase intermediate
#' color saturation.
#'
#' @family colorjam hue warp
#'
#' @return `numeric` vector that contains
#' * `degree` the mean angle in degrees
#' * `radius` the actual radius based upon mean unit vectors
#' * `radius2` the adjusted radius using `jamba::warpAroundZero()`
#'
#' @param x `numeric` vector of angles in degrees
#' @param w `numeric` vector representing weights
#' @param do_plot `logical` indicating whether to create a visual summary plot
#' @param lens `numeric` value passed to `jamba::warpAroundZero()` to adjust
#' the radius
#' @param ... additional arguments are ignored
#'
#' @examples
#' mean_angle(c(120, 45), do_plot=TRUE);
#'
#' @export
mean_angle <- function
(x,
w=NULL,
do_plot=FALSE,
lens=-5,
...)
{
xy <- data.frame(x=sin(jamba::deg2rad(x)),
y=cos(jamba::deg2rad(x)));
if (length(w) == 0) {
w <- 1;
}
w <- rep(w,
length.out=length(x));
xy_mean <- matrixStats::colWeightedMeans(
x=as.matrix(xy),
w=w);
xy_m <- matrix(ncol=2, byrow=TRUE,
c(0, 0, xy_mean));
x_radius <- dist(xy_m);
x_radius2 <- jamba::warpAroundZero(x_radius,
xCeiling=1,
lens=lens);
x_deg <- jamba::rad2deg(atan2(x=xy_mean["y"], y=xy_mean["x"])) %% 360;
if (do_plot) {
jamba::nullPlot(xlim=c(-1,1),
ylim=c(-1,1),
asp=1,
doBoxes=FALSE);
aseq <- seq(from=0, to=360, by=2);
lines(x=sin(jamba::deg2rad(aseq)),
y=cos(jamba::deg2rad(aseq)),
type="l",
lty="dotted");
arrows(x0=0,
y0=0,
x1=xy$x * w,
y1=xy$y * w,
lwd=2,
angle=30);
arrows(x0=0,
y0=0,
x1=xy$x,
y1=xy$y,
lty="dotted",
lwd=2,
angle=90);
arrows(x0=0,
y0=0,
x1=sin(jamba::deg2rad(x_deg)) * x_radius,
y1=cos(jamba::deg2rad(x_deg)) * x_radius,
lty="solid",
lwd=2,
angle=90,
col="dodgerblue");
arrows(x0=0,
y0=0,
x1=sin(jamba::deg2rad(x_deg)) * x_radius2,
y1=cos(jamba::deg2rad(x_deg)) * x_radius2,
lwd=4,
angle=30,
col="darkorange1");
}
c(deg=unname(x_deg),
radius=x_radius,
radius2=x_radius2);
}
|
library(data.table)
DT <- fread('/home/ops/visualizations/residuals/data/grupo-bimbo-inventory-demand/train-clients-products-towns.csv', header = T, sep = ',')
train <- DT[Semana %in% c(3, 4, 5, 6, 7, 8)]
validation <- DT[Semana %in% c(9)]
fwrite(train, 'train.csv')
fwrite(validation, 'validation.csv')
| /local-data/grupo-bimbo-inventory-demand/split.R | permissive | standardgalactic/residuals-vis-data | R | false | false | 304 | r | library(data.table)
DT <- fread('/home/ops/visualizations/residuals/data/grupo-bimbo-inventory-demand/train-clients-products-towns.csv', header = T, sep = ',')
train <- DT[Semana %in% c(3, 4, 5, 6, 7, 8)]
validation <- DT[Semana %in% c(9)]
fwrite(train, 'train.csv')
fwrite(validation, 'validation.csv')
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/psiFuns.R
\name{rho}
\alias{rho}
\title{Rho functions}
\usage{
rho(u, family = " bisquare", cc, standardize = TRUE)
}
\arguments{
\item{u}{point or vector at which rho is to be evaluated}
\item{family}{family string specifying the name of the family of loss function to be used (current valid
options are "bisquare", "opt" and "mopt").}
\item{cc}{tuning parameters to be computed according to efficiency and / or breakdown
considerations. See \link{lmrobdet.control}, \link{bisquare}, \link{mopt}
and \link{opt}.}
\item{standardize}{logical value determining whether the rho function is to be
standardized so that its maximum value is 1. See \link{Mpsi}.}
}
\value{
The value(s) of \code{rho} at \code{u}
}
\description{
This function returns the value of the "rho" loss function used
to compute either an M-scale estimator or a robust regression
estimator. It currently can be used to compute the bisquare, optimal
and modified optimal loss functions.
}
\examples{
# Evaluate rho tuned for 85\% efficiency
rho(u=1.1, family='bisquare', cc=bisquare(.85))
# Evaluate rho tuned for 50\% breakdown
rho(u=1.1, family='opt', cc=lmrobdet.control(bb=.5, family='opt')$tuning.chi)
}
\author{
Matias Salibian-Barrera, \email{matias@stat.ubc.ca}
}
| /man/rho.Rd | no_license | msalibian/RobStatTM | R | false | true | 1,320 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/psiFuns.R
\name{rho}
\alias{rho}
\title{Rho functions}
\usage{
rho(u, family = " bisquare", cc, standardize = TRUE)
}
\arguments{
\item{u}{point or vector at which rho is to be evaluated}
\item{family}{family string specifying the name of the family of loss function to be used (current valid
options are "bisquare", "opt" and "mopt").}
\item{cc}{tuning parameters to be computed according to efficiency and / or breakdown
considerations. See \link{lmrobdet.control}, \link{bisquare}, \link{mopt}
and \link{opt}.}
\item{standardize}{logical value determining whether the rho function is to be
standardized so that its maximum value is 1. See \link{Mpsi}.}
}
\value{
The value(s) of \code{rho} at \code{u}
}
\description{
This function returns the value of the "rho" loss function used
to compute either an M-scale estimator or a robust regression
estimator. It currently can be used to compute the bisquare, optimal
and modified optimal loss functions.
}
\examples{
# Evaluate rho tuned for 85\% efficiency
rho(u=1.1, family='bisquare', cc=bisquare(.85))
# Evaluate rho tuned for 50\% breakdown
rho(u=1.1, family='opt', cc=lmrobdet.control(bb=.5, family='opt')$tuning.chi)
}
\author{
Matias Salibian-Barrera, \email{matias@stat.ubc.ca}
}
|
### MNT 10x snRNA-seq workflow: step 04 - downstream comparisons
### **Region-specific analyses**
### - (3x) NAc samples from: Br5161 & Br5212 & Br5287
### * Comparison to UCLA's Drop-seq on mouse medial amyg (MeA)
#####################################################################
library(SingleCellExperiment)
library(EnsDb.Hsapiens.v86)
library(org.Hs.eg.db)
library(scater)
library(scran)
library(batchelor)
library(DropletUtils)
library(jaffelab)
library(limma)
library(lattice)
library(RColorBrewer)
library(pheatmap)
### Palette taken from `scater`
tableau10medium = c("#729ECE", "#FF9E4A", "#67BF5C", "#ED665D",
"#AD8BC9", "#A8786E", "#ED97CA", "#A2A2A2",
"#CDCC5D", "#6DCCDA")
tableau20 = c("#1F77B4", "#AEC7E8", "#FF7F0E", "#FFBB78", "#2CA02C",
"#98DF8A", "#D62728", "#FF9896", "#9467BD", "#C5B0D5",
"#8C564B", "#C49C94", "#E377C2", "#F7B6D2", "#7F7F7F",
"#C7C7C7", "#BCBD22", "#DBDB8D", "#17BECF", "#9EDAE5")
# ===
### Pseudobulk>modeling approach ============================================
# * Skip this -> Now using sn-level stats for this comparison
## load modeling outputs
# 10x-pilot human Amyg
load("/dcl01/lieber/ajaffe/Matt/MNT_thesis/snRNAseq/10x_pilot_FINAL/rdas/markers-stats_Amyg-n2_manualContrasts_MNTMar2020.rda", verbose=T)
# eb_contrasts.amy.broad, eb_list.amy.broad, sce.amy.PB
# UCLA mouse MeA Drop-seq
load("/dcl01/ajaffe/data/lab/singleCell/ucla_mouse-MeA/markers-stats_mouse-MeA-Drop-seq_manualContrasts_MNTApr2020.rda", verbose=T)
# eb_list.amy.mm, corfit.amy.mm, sce.amy.mm.PB
# Add EntrezID for human
hs.entrezIds <- mapIds(org.Hs.eg.db, keys=rowData(sce.amy.PB)$ID,
column="ENTREZID", keytype="ENSEMBL")
# "'select()' returned 1:many mapping between keys and columns"
table(!is.na(hs.entrezIds))
# 20,578 valid entries (remember this is already subsetted for those non-zero genes only)
# Add to rowData
rowData(sce.amy.PB) <- cbind(rowData(sce.amy.PB), hs.entrezIds)
## Bring in 'HomoloGene.ID' for human (already in rowData for mm SCE) ===
## JAX annotation info
hom = read.delim("http://www.informatics.jax.org/downloads/reports/HOM_AllOrganism.rpt",
as.is=TRUE)
hom_hs <- hom[hom$Common.Organism.Name == "human", ]
# of 19,124 entries
table(rowData(sce.amy.PB)$hs.entrezIds %in% hom_hs$EntrezGene.ID)
# 17,261
table(rowData(sce.amy.PB)$Symbol %in% hom_hs$Symbol)
# 16,916 - not a bad difference
# So for mapping === == === ===
# human.entrez > HomoloGene.ID < mm.Symbol
# ^ filter SCE's on this
# Human (by Entrez)
rowData(sce.amy.PB)$HomoloGene.ID <- hom_hs$HomoloGene.ID[match(rowData(sce.amy.PB)$hs.entrezIds,
hom_hs$EntrezGene.ID)]
## Now set/match to shared homologous genes ===
length(intersect(rowData(sce.amy.PB)$HomoloGene.ID,
rowData(sce.amy.mm.PB)$HomoloGene.ID)) # 13,444
sharedHomologs <- intersect(rowData(sce.amy.PB)$HomoloGene.ID,
rowData(sce.amy.mm.PB)$HomoloGene.ID)
# # That first one is NA - get rid of it
# sharedHomologs <- sharedHomologs[-1]
# Human not in mm
length(setdiff(rowData(sce.amy.PB)$HomoloGene.ID,
rowData(sce.amy.mm.PB)$HomoloGene.ID)) # 3657
# mm not in human
length(setdiff(rowData(sce.amy.mm.PB)$HomoloGene.ID,
rowData(sce.amy.PB)$HomoloGene.ID)) # 928
# Subset for those
sce.mm.PBsub <- sce.amy.mm.PB[rowData(sce.amy.mm.PB)$HomoloGene.ID %in% sharedHomologs, ] # 14247
sce.hsap.PBsub <- sce.amy.PB[rowData(sce.amy.PB)$HomoloGene.ID %in% sharedHomologs, ] # 14178
## Many are duplicated...
rowData(sce.mm.PBsub)$Symbol[duplicated(rowData(sce.mm.PBsub)$HomoloGene.ID)]
# shoot many genes are orthologs
rowData(sce.hsap.PBsub)$Symbol[duplicated(rowData(sce.hsap.PBsub)$HomoloGene.ID)]
# same here, slightly less
### -> Take the higher-expressing of the duplicated - just mean across PB clusters:
## mm ===
duplicatedSet.mm <- which(duplicated(rowData(sce.mm.PBsub)$HomoloGene.ID))
genes2compare.mm <- list()
gene2keep.mm <- character()
for(g in 1:length(duplicatedSet.mm)){
genes2compare.mm[[g]] <- rownames(sce.mm.PBsub)[rowData(sce.mm.PBsub)$HomoloGene.ID ==
rowData(sce.mm.PBsub)$HomoloGene.ID[duplicatedSet.mm[g]]]
rowmeansmat <- rowMeans(assay(sce.mm.PBsub[genes2compare.mm[[g]], ], "logcounts"))
gene2keep.mm[g] <- names(rowmeansmat[order(rowmeansmat, decreasing=TRUE)])[1]
}
# Now pull out those that not being compared, so can `c()`
table(rownames(sce.mm.PBsub) %in% unlist(genes2compare.mm)) # 133 - why isn't this ==
sum(lengths(genes2compare.mm)) # 328 ????
length(unique(unlist(genes2compare.mm))) # 133 - oh. also `length(unique(gene2keep.mm)) == 52`
genesNoCompare.mm <- rownames(sce.mm.PBsub)[!(rownames(sce.mm.PBsub) %in% unlist(genes2compare.mm))]
# Finally combine and subset
sce.mm.PBsub <- sce.mm.PBsub[c(genesNoCompare.mm, unique(gene2keep.mm)), ]
table(rowData(sce.mm.PBsub)$HomoloGene.ID %in% sharedHomologs) # 13444 TRUE
table(duplicated(rowData(sce.mm.PBsub)$HomoloGene.ID)) # 13444 FALSE dope.
## Human ===
# First change rownames to EnsemblID
rowData(sce.hsap.PBsub)$Symbol.unique <- rownames(sce.hsap.PBsub)
rownames(sce.hsap.PBsub) <- rowData(sce.hsap.PBsub)$ID
duplicatedSet.hsap <- which(duplicated(rowData(sce.hsap.PBsub)$HomoloGene.ID))
genes2compare.hsap <- list()
gene2keep.hsap <- character()
for(g in 1:length(duplicatedSet.hsap)){
genes2compare.hsap[[g]] <- rownames(sce.hsap.PBsub)[rowData(sce.hsap.PBsub)$HomoloGene.ID ==
rowData(sce.hsap.PBsub)$HomoloGene.ID[duplicatedSet.hsap[g]]]
rowmeansmat <- rowMeans(assay(sce.hsap.PBsub[genes2compare.hsap[[g]], ], "logcounts"))
gene2keep.hsap[g] <- names(rowmeansmat[order(rowmeansmat, decreasing=TRUE)])[1]
}
# Now pull out those that not being compared, so can `c()`
table(rownames(sce.hsap.PBsub) %in% unlist(genes2compare.hsap)) # 109 - why isn't this ==
sum(lengths(genes2compare.hsap)) # 136 ????
length(unique(unlist(genes2compare.hsap))) # 109 - oh. also `length(unique(gene2keep.hsap)) == 52`
genesNoCompare.hsap <- rownames(sce.hsap.PBsub)[!(rownames(sce.hsap.PBsub) %in% unlist(genes2compare.hsap))]
# of length 13392 (which + 52 == 13444)
# Finally combine and subset
sce.hsap.PBsub <- sce.hsap.PBsub[c(genesNoCompare.hsap, unique(gene2keep.hsap)), ]
table(rowData(sce.hsap.PBsub)$HomoloGene.ID %in% sharedHomologs) # 13444 TRUE
table(duplicated(rowData(sce.hsap.PBsub)$HomoloGene.ID)) # 13444 FALSE dope.
## Match order and save
sce.mm.PBsub <- sce.mm.PBsub[match(rowData(sce.hsap.PBsub)$HomoloGene.ID,
rowData(sce.mm.PBsub)$HomoloGene.ID), ]
table(rowData(sce.mm.PBsub)$HomoloGene.ID == rowData(sce.hsap.PBsub)$HomoloGene.ID)
# all TRUE - good
pheatmap(cor(assay(sce.mm.PBsub, "logcounts"), assay(sce.hsap.PBsub, "logcounts")), fontsize=5)
# (ah but this is at the sample:cluster level)
Readme <- "These two SCEs are subsetted and ordered for matching HomoloGene.ID in the rowData. This can be used to subset the nucleus-level SCEs in their respective Rdata files."
save(sce.mm.PBsub, sce.hsap.PBsub, Readme, file="/dcl01/ajaffe/data/lab/singleCell/ucla_mouse-MeA/SCE_mm-MeA-PBd_w_matchingHsap-Amyg-PBd_HomoloGene.IDs_MNT.rda")
### FINALLY resume comparisons === === === === ===
## mm stats
pvals_mm <- sapply(eb_list.amy.mm, function(x) {
x$p.value[, 2, drop = FALSE]
})
rownames(pvals_mm) = rownames(sce.amy.mm.PB)
ts_mm <- sapply(eb_list.amy.mm, function(x) {
x$t[, 2, drop = FALSE]
})
rownames(ts_mm) = rownames(sce.amy.mm.PB)
## Human stats
pvals_hsap <- sapply(eb_list.amy.broad, function(x) {
x$p.value[, 2, drop = FALSE]
})
rownames(pvals_hsap) = rowData(sce.amy.PB)$ID
ts_hsap <- sapply(eb_list.amy.broad, function(x) {
x$t[, 2, drop = FALSE]
})
rownames(ts_hsap) = rowData(sce.amy.PB)$ID
### Subset and check matching 'HomoloGene.ID' === === === ===
pvals_mm <- pvals_mm[rownames(sce.mm.PBsub), ]
ts_mm <- ts_mm[rownames(sce.mm.PBsub), ]
pvals_hsap <- pvals_hsap[rowData(sce.hsap.PBsub)$ID, ]
ts_hsap <- ts_hsap[rowData(sce.hsap.PBsub)$ID, ]
rownames(ts_mm) <- rowData(sce.mm.PBsub)$HomoloGene.ID
rownames(pvals_mm) <- rowData(sce.mm.PBsub)$HomoloGene.ID
rownames(ts_hsap) <- rowData(sce.hsap.PBsub)$HomoloGene.ID
rownames(pvals_hsap) <- rowData(sce.hsap.PBsub)$HomoloGene.ID
table(rownames(ts_mm) == rownames(ts_hsap))
## all 14121 TRUE - good
## Now run correlation
cor_t = cor(ts_mm, ts_hsap)
signif(cor_t, 2)
## On just hsap cluster-specific homologous genes ===
hsap_specific_indices = mapply(function(t, p) {
oo = order(t, decreasing = TRUE)[1:100]
},
as.data.frame(ts_hsap),
as.data.frame(pvals_hsap)
)
hsap_ind = unique(as.numeric(hsap_specific_indices))
cor_t_hsap = cor(ts_mm[hsap_ind, ],
ts_hsap[hsap_ind, ])
signif(cor_t_hsap, 3)
## On just mouse cluster-specific homologous genes ===
mm_specific_indices = mapply(function(t, p) {
oo = order(t, decreasing = TRUE)[1:100]
},
as.data.frame(ts_mm),
as.data.frame(pvals_mm)
)
mm_ind = unique(as.numeric(mm_specific_indices))
cor_t_mm = cor(ts_mm[mm_ind, ],
ts_hsap[mm_ind, ])
signif(cor_t_mm, 3)
### Heatmap
theSeq.all = seq(-.5, .5, by = 0.01)
my.col.all <- colorRampPalette(brewer.pal(7, "PRGn"))(length(theSeq.all))
ct = colData(sce.hsap.PBsub)
ct = ct[!duplicated(sce.hsap.PBsub$cellType.final), ]
cor_t_hsap_toPlot = cor_t_hsap
rownames(cor_t_hsap_toPlot) = paste0(rownames(cor_t_hsap_toPlot),"_","M.mus")
colnames(cor_t_hsap_toPlot) = paste0(colnames(cor_t_hsap_toPlot),"_","H.sap")
cor_t_mm_toPlot = cor_t_mm
rownames(cor_t_mm_toPlot) = paste0(rownames(cor_t_mm_toPlot),"_","M.mus")
colnames(cor_t_mm_toPlot) = paste0(colnames(cor_t_mm_toPlot),"_","H.sap")
cor_t_all_toPlot = cor_t
rownames(cor_t_all_toPlot) = paste0(rownames(cor_t_all_toPlot),"_","M.mus")
colnames(cor_t_all_toPlot) = paste0(colnames(cor_t_all_toPlot),"_","H.sap")
## MNT added 14Apr2020: Reorder to diagonal & threshold at 0.4 for all-gene correlation === === ===
# Start from descending - easier to manually order
#cor_t_all_toPlot <- cor_t_all_toPlot[ ,rev(1:ncol(cor_t_all_toPlot))]
# This is useful:
apply(cor_t_all_toPlot, 2, which.max)
# If want to re-order human labels (but prefer re-ordering mm labels)
#cor_t_all_toPlot <- cor_t_all_toPlot[ ,rev(c(14,5,3,4, 7,10,12,6, 9,8,2,1, 11,13))]
cor_t_all_toPlot <- cor_t_all_toPlot[c(14,11:13,4,3, 2,8,7,9, 6,15,5,16,1,10), ]
# Threshold at 0.4
range(cor_t_all_toPlot)
cor_t_all_toPlot <- ifelse(cor_t_all_toPlot >= 0.4, 0.4, cor_t_all_toPlot)
## Do for other gene subsets ===
# Human
#cor_t_hsap_toPlot <- cor_t_hsap_toPlot[ ,rev(1:ncol(cor_t_hsap_toPlot))]
#cor_t_hsap_toPlot <- cor_t_hsap_toPlot[ ,rev(c(14,5,3,4, 7,10,12,6, 9,8,2,1, 11,13))]
cor_t_hsap_toPlot <- cor_t_hsap_toPlot[c(14,11:13,4,3, 2,8,7,9, 6,15,5,16,1,10), ]
# Threshold at 0.4
range(cor_t_hsap_toPlot)
cor_t_hsap_toPlot <- ifelse(cor_t_hsap_toPlot >= 0.4, 0.4, cor_t_hsap_toPlot)
# mm
#cor_t_mm_toPlot <- cor_t_mm_toPlot[ ,rev(1:ncol(cor_t_mm_toPlot))]
#cor_t_mm_toPlot <- cor_t_mm_toPlot[ ,rev(c(14,5,3,4, 7,10,12,6, 9,8,2,1, 11,13))]
cor_t_mm_toPlot <- cor_t_mm_toPlot[c(14,11:13,4,3, 2,8,7,9, 6,15,5,16,1,10), ]
# Threshold at 0.4
range(cor_t_mm_toPlot)
cor_t_mm_toPlot <- ifelse(cor_t_mm_toPlot >= 0.4, 0.4, cor_t_mm_toPlot)
pdf("/dcl01/lieber/ajaffe/Matt/MNT_thesis/snRNAseq/10x_pilot_FINAL/pdfs/exploration/overlap-mouse-MeA_with_LIBD-10x-Amyg_top100-or-all_Apr2020.pdf")
# Most human-specific
print(
levelplot(
cor_t_hsap_toPlot,
aspect = "fill",
at = theSeq.all,
col.regions = my.col.all,
ylab = "",
xlab = "",
scales = list(x = list(rot = 90, cex = 1), y = list(cex = 1)),
main="Correlation of cluster-specific t's \n (top 100 genes/human (LIBD) clusters)"
)
)
# Most mm-specific
print(
levelplot(
cor_t_mm_toPlot,
aspect = "fill",
at = theSeq.all,
col.regions = my.col.all,
ylab = "",
xlab = "",
scales = list(x = list(rot = 90, cex = 1), y = list(cex = 1)),
main="Correlation of cluster-specific t's \n (top 100 genes/mouse MeA (UCLA) clusters)"
)
)
# All
print(
levelplot(
cor_t_all_toPlot,
aspect = "fill",
at = theSeq.all,
col.regions = my.col.all,
ylab = "",
xlab = "",
scales = list(x = list(rot = 90, cex = 1), y = list(cex = 1)),
main="Correlation of cluster-specific t's \n (all shared 13,444 homologs)",
fontsize = 20
)
)
dev.off()
### Comparison to UCLA mouse MeA with SN-LEVEL stats ==================================
# Added MNT 14May2020 - UPDATED 22May2020 to compare to 2019 dataset
# (previously only 2017 neuronal subclusters), now with neuronal subcluster info
# Load mouse stats
# load("/dcl01/ajaffe/data/lab/singleCell/ucla_mouse-MeA/2017Neuron/markers-stats_mouseMeA-2017-neuSubs_findMarkers-SN-LEVEL_MNTMay2020.rda", verbose=T)
# # markers.mmMeAneu.t.1vAll
load("/dcl01/ajaffe/data/lab/singleCell/ucla_mouse-MeA/2019Cell/markers-stats_mouseMeA-2019-with-16neuSubs_findMarkers-SN-LEVEL_MNTMay2020.rda", verbose=T)
# markers.mmMeA.t.1vAll
# Load mouse SCE
# load("/dcl01/ajaffe/data/lab/singleCell/ucla_mouse-MeA/2017Neuron/SCE_mouse-MeA-2017_neuronalSubclusters_HVGs_MNT.rda", verbose=T)
# # sce.amy.mm17hvgs
load("/dcl01/ajaffe/data/lab/singleCell/ucla_mouse-MeA/2019Cell/SCE_mouse-MeA_downstream-processing_MNT.rda", verbose=T)
# sce.amy.mm, chosen.hvgs.amy.mm
## Calculate and add t-statistic (= std.logFC * sqrt(N)) for mouse clusters
# and fix row order to the first entry "Astrocyte"
fixTo <- rownames(markers.mmMeA.t.1vAll[[1]])
for(x in names(markers.mmMeA.t.1vAll)){
markers.mmMeA.t.1vAll[[x]]$t.stat <- markers.mmMeA.t.1vAll[[x]]$std.logFC * sqrt(ncol(sce.amy.mm))
markers.mmMeA.t.1vAll[[x]] <- markers.mmMeA.t.1vAll[[x]][fixTo, ]
}
# Pull out the t's
ts.mmMeA <- sapply(markers.mmMeA.t.1vAll, function(x){x$t.stat})
rownames(ts.mmMeA) <- fixTo
## Human t stats subset/re-ordering ===
# Bring in human stats; create t's
load("/dcl01/lieber/ajaffe/Matt/MNT_thesis/snRNAseq/10x_pilot_FINAL/rdas/markers-stats_Amyg-n2_findMarkers-SN-LEVEL_MNTMay2020.rda", verbose=T)
# markers.amy.t.1vAll, markers.amy.t.design, markers.amy.wilcox.block
rm(markers.amy.t.design, markers.amy.wilcox.block)
# Need to add t's with N nuclei used in constrasts
load("/dcl01/lieber/ajaffe/Matt/MNT_thesis/snRNAseq/10x_pilot_FINAL/rdas/regionSpecific_Amyg-n2_cleaned-combined_SCE_MNTFeb2020.rda", verbose=T)
#sce.amy, chosen.hvgs.amy, pc.choice.amy, clusterRefTab.amy, ref.sampleInfo
rm(chosen.hvgs.amy, pc.choice.amy, clusterRefTab.amy,ref.sampleInfo)
# First drop "Ambig.lowNtrxts" (50 nuclei)
sce.amy <- sce.amy[ ,sce.amy$cellType.split != "Ambig.lowNtrxts"]
sce.amy$cellType.split <- droplevels(sce.amy$cellType.split)
## As above, calculate and add t-statistic (= std.logFC * sqrt(N)) from contrasts
# and fix row order to the first entry "Astro"
fixTo <- rownames(markers.amy.t.1vAll[["Astro"]])
for(s in names(markers.amy.t.1vAll)){
markers.amy.t.1vAll[[s]]$t.stat <- markers.amy.t.1vAll[[s]]$std.logFC * sqrt(ncol(sce.amy))
markers.amy.t.1vAll[[s]] <- markers.amy.t.1vAll[[s]][fixTo, ]
}
# Pull out the t's
ts.amy <- sapply(markers.amy.t.1vAll, function(x){x$t.stat})
rownames(ts.amy) <- fixTo
## Bring in HomoloGene.ID info to subset/match order
# load("/dcl01/ajaffe/data/lab/singleCell/ucla_mouse-MeA/2019Cell/SCE_mm-MeA-PBd_w_matchingHsap-Amyg-PBd_HomoloGene.IDs_MNT.rda",
# verbose=T)
# # sce.mm.PBsub, sce.hsap.PBsub, Readme
#
# table(rowData(sce.mm.PBsub)$HomoloGene.ID == rowData(sce.hsap.PBsub)$HomoloGene.ID) # all TRUE - dope
# # (see above - these are the intersecting homologs)
#
# ## However!
# table(rownames(ts.mmMeA) %in% rownames(sce.mm.PBsub)) # not all - so will need to get union
# rm(sce.mm.PBsub, sce.hsap.PBsub, Readme)
## HomoloGene.ID for all human genes ====
hom = read.delim("http://www.informatics.jax.org/downloads/reports/HOM_AllOrganism.rpt",
as.is=TRUE)
hom_hs <- hom[hom$Common.Organism.Name == "human", ]
# of 19,124 entries
# First Add EntrezID for human
hs.entrezIds <- mapIds(org.Hs.eg.db, keys=rowData(sce.amy)$ID,
column="ENTREZID", keytype="ENSEMBL")
# "'select()' returned 1:many mapping between keys and columns"
table(!is.na(hs.entrezIds))
# 22,818 valid entries (remember this is already subsetted for those non-zero genes only)
# Add to rowData
rowData(sce.amy) <- cbind(rowData(sce.amy), hs.entrezIds)
# Now how many in JAX db?
table(rowData(sce.amy)$hs.entrezIds %in% hom_hs$EntrezGene.ID)
# 18,865
table(rowData(sce.amy)$Symbol %in% hom_hs$Symbol)
# 18,472 - not a bad difference
# So for mapping === === ===
# human.entrez > HomoloGene.ID < mm.Symbol
# ^ filter SCE's on this
# Human (by Entrez)
rowData(sce.amy)$HomoloGene.ID <- hom_hs$HomoloGene.ID[match(rowData(sce.amy)$hs.entrezIds,
hom_hs$EntrezGene.ID)]
# end chunk ====
# Intersection?
table(rowData(sce.amy.mm)$HomoloGene.ID %in% rowData(sce.amy)$HomoloGene.ID)
# FALSE TRUE
# 665 13845
# First give [human] ts.amy rownames their respective EnsemblID
# (have to use the full sce bc rownames(sce.hsap.PBsub) is EnsemblID and we uniquified the $Symbol)
rownames(ts.amy) <- rowData(sce.amy)$ID[match(rownames(ts.amy), rownames(sce.amy))]
# Then to HomoloGene.ID
rownames(ts.amy) <- rowData(sce.amy)$HomoloGene.ID[match(rownames(ts.amy), rowData(sce.amy)$ID)]
# Btw half are NA
table(is.na(rownames(ts.amy)))
# FALSE TRUE
# 17261 11203
# So subset for those with HomoloGene.ID
ts.amy <- ts.amy[!is.na(rownames(ts.amy)), ]
# Mouse - can just go to HomoloGene.ID
rownames(ts.mmMeA) <- rowData(sce.amy.mm)$HomoloGene.ID[match(rownames(ts.mmMeA), rownames(sce.amy.mm))]
# Intersecting?
table(rownames(ts.mmMeA) %in% rownames(ts.amy))
# FALSE TRUE
# 985 13525 - so we'll be running correlation across these genes
# Subset and match order
ts.mmMeA <- ts.mmMeA[rownames(ts.mmMeA) %in% rownames(ts.amy), ]
ts.amy <- ts.amy[rownames(ts.mmMeA), ]
cor_t_amy <- cor(ts.amy, ts.mmMeA)
rownames(cor_t_amy) = paste0(rownames(cor_t_amy),"_","H")
colnames(cor_t_amy) = paste0(colnames(cor_t_amy),"_","M")
range(cor_t_amy)
#[1] -0.2203968 0.5023080 (previously {-0.2557751, 0.2577207} on only 2017 neuronal subsets)
### Heatmap - typically use levelplot (e.g. below), but will want pheatmap bc can cluster cols/rows
theSeq.all = seq(-.6, .6, by = 0.01)
my.col.all <- colorRampPalette(brewer.pal(7, "BrBG"))(length(theSeq.all)-1)
# Re-order mouse labels - move EN/MG/MU to after neuronal subclusters
cor_t_amy <- cor_t_amy[ ,c(1, 5,13:20,6:12, 3,2,4, 21,23,22)]
# #pdf("/dcl01/lieber/ajaffe/Matt/MNT_thesis/snRNAseq/10x_pilot_FINAL/pdfs/exploration/HongLab-UCLA_mmMeA/overlap-mouseMeA-2017-neuSubs_with_LIBD-10x-AMY_SN-LEVEL-stats_May2020.pdf")
# pdf("/dcl01/lieber/ajaffe/Matt/MNT_thesis/snRNAseq/10x_pilot_FINAL/pdfs/exploration/HongLab-UCLA_mmMeA/overlap-mouseMeA-2019-fullSubclusters_with_LIBD-10x-AMY_SN-LEVEL-stats_May2020.pdf")
# pheatmap(cor_t_amy,
# color=my.col.all,
# cluster_cols=F, cluster_rows=F,
# breaks=theSeq.all,
# fontsize=11, fontsize_row=15, fontsize_col=12,
# #main="Correlation of cluster-specific t's for mouse MeA neuronal subclusters \n (Wu et al., Neuron 2017)")
# main="Correlation of cluster-specific t's to mouse MeA \n complete subclusters (Chen-Hu-Wu et al., Cell 2019)")
# # Version with mouse glial cell types 'missing' in LIBD data dropped:
# pheatmap(cor_t_amy_sub,
# color=my.col.all,
# cluster_cols=F, cluster_rows=F,
# breaks=theSeq.all,
# fontsize=11.5, fontsize_row=15, fontsize_col=13.5,
# main="Correlation of cluster-specific t's to mouse MeA \n complete subclusters (Chen-Hu-Wu et al., Cell 2019)")
#
# dev.off()
## Version with mouse glial cell types 'missing' in LIBD data dropped:
cor_t_amy_sub <- cor_t_amy[ ,-which(colnames(cor_t_amy) %in% c("EN_M", "MU_M", "OPC.OL_M"))]
## Actually just print as second page to the above - will probably get some suggested edits
## Iteration with top N spp:subcluster-specific genes: ========
# Added MNT 25May2020
# -> Basically just run through line 488, under ("Subset and match order")
# Save the ts matrices to reduce work next time
Readme <- "These t-statistic matrices are subsetted and matched for shared 'HomoloGene.ID', so `cor()` can simply be run or other gene subsets applied first."
save(ts.amy, ts.mmMeA, Readme, file="rdas/zTsMats_libd-AMY_and_ucla-mouseMeA-2019Cell_sharedGenes_25May2020.rda")
# # Have to remove the markers objects bc the rows have been fixed (actually don't need to lol)
# rm(markers.amy.t.1vAll, markers.mmMeA.t.1vAll)
#
# # Re-load them
# load("/dcl01/lieber/ajaffe/Matt/MNT_thesis/snRNAseq/10x_pilot_FINAL/rdas/markers-stats_Amyg-n2_findMarkers-SN-LEVEL_MNTMay2020.rda", verbose=T)
# # markers.amy.t.1vAll, markers.amy.t.design, markers.amy.wilcox.block
# rm(markers.amy.t.design, markers.amy.wilcox.block)
#
# load("/dcl01/ajaffe/data/lab/singleCell/ucla_mouse-MeA/2019Cell/markers-stats_mouseMeA-2019-with-16neuSubs_findMarkers-SN-LEVEL_MNTMay2020.rda", verbose=T)
# # markers.mmMeA.t.1vAll
#
## On just hsap cluster-specific homologous genes ===
hsap_specific_indices = mapply(function(t) {
oo = order(t, decreasing = TRUE)[1:100]
},
as.data.frame(ts.amy)
)
hsap_ind = unique(as.numeric(hsap_specific_indices))
length(hsap_ind) # so of 1200 (100 x 12 cellType.split), 919 unique
cor_t_hsap = cor(ts.amy[hsap_ind, ],
ts.mmMeA[hsap_ind, ])
range(cor_t_hsap)
#[1] -0.2738376 0.6612352
## On just mouse cluster-specific homologous genes ===
mouse_specific_indices = mapply(function(t) {
oo = order(t, decreasing = TRUE)[1:100]
},
as.data.frame(ts.mmMeA)
)
mouse_ind = unique(as.numeric(mouse_specific_indices))
length(mouse_ind) # so of 2300 (100 x 23 subCluster), 1543 unique
cor_t_mouse = cor(ts.amy[mouse_ind, ],
ts.mmMeA[mouse_ind, ])
range(cor_t_mouse)
# [1] -0.2731605 0.6113445
## UPDATED heatmap:
theSeq.all = seq(-.65, .65, by = 0.01)
my.col.all <- colorRampPalette(brewer.pal(7, "BrBG"))(length(theSeq.all)-1)
# Re-order mouse labels - move EN/MG/MU to after neuronal subclusters
cor_t_hsap <- cor_t_hsap[ ,c(1, 5,13:20,6:12, 3,2,4, 21,23,22)]
# Then treshold this one to 0.65 max (max is 0.6612)
cor_t_hsap <- ifelse(cor_t_hsap >= 0.65, 0.65, cor_t_hsap)
# (and)
cor_t_amy_sub <- cor_t_amy[ ,-which(colnames(cor_t_amy) %in% c("EN_M", "MU_M", "OPC.OL_M"))]
cor_t_mouse <- cor_t_mouse[ ,c(1, 5,13:20,6:12, 3,2,4, 21,23,22)]
#pdf("/dcl01/lieber/ajaffe/Matt/MNT_thesis/snRNAseq/10x_pilot_FINAL/pdfs/exploration/HongLab-UCLA_mmMeA/overlap-mouseMeA-2019-fullSubclusters_with_LIBD-10x-AMY_SN-LEVEL-stats_May2020.pdf")
# or
pdf("/dcl01/lieber/ajaffe/Matt/MNT_thesis/snRNAseq/10x_pilot_FINAL/pdfs/exploration/HongLab-UCLA_mmMeA/overlap-mouseMeA-2019-fullSubclusters_with_LIBD-10x-AMY_SN-LEVEL-stats_numbersPrinted_May2020.pdf")
pheatmap(cor_t_amy,
color=my.col.all,
cluster_cols=F, cluster_rows=F,
breaks=theSeq.all,
fontsize=11, fontsize_row=15, fontsize_col=12,
display_numbers=T, number_format="%.2f", fontsize_number=6.5,
main="Correlation of cluster-specific t's to mouse MeA \n complete subclusters (Chen-Hu-Wu et al., Cell 2019)")
# Version with mouse glial cell types 'missing' in LIBD data dropped:
pheatmap(cor_t_amy_sub,
color=my.col.all,
cluster_cols=F, cluster_rows=F,
breaks=theSeq.all,
fontsize=11.5, fontsize_row=15, fontsize_col=13.5,
display_numbers=T, number_format="%.2f", fontsize_number=6.5,
main="Correlation of cluster-specific t's to mouse MeA \n complete subclusters (Chen-Hu-Wu et al., Cell 2019)")
# On human-specific genes (slightly thresholded)
pheatmap(cor_t_hsap,
color=my.col.all,
cluster_cols=F, cluster_rows=F,
breaks=theSeq.all,
fontsize=11.5, fontsize_row=15, fontsize_col=13.5,
display_numbers=T, number_format="%.2f", fontsize_number=6.5,
main="Correlation of top-100 cluster-specific t's to \n (Chen-Hu-Wu et al., Cell 2019) subclusters")
# On mm-MeA-specific genes
pheatmap(cor_t_mouse,
color=my.col.all,
cluster_cols=F, cluster_rows=F,
breaks=theSeq.all,
fontsize=11.5, fontsize_row=15, fontsize_col=13.5,
display_numbers=T, number_format="%.2f", fontsize_number=6.5,
main="Correlation of LIBD-AMY subclusters to \n (Chen-Hu-Wu et al., Cell 2019) top-100 subcluster t's")
dev.off()
## Intersecting some of the top markers =====================
load("/dcl01/lieber/ajaffe/Matt/MNT_thesis/snRNAseq/10x_pilot_FINAL/rdas/markers-stats_Amyg-n2_findMarkers-SN-LEVEL_MNTMay2020.rda", verbose=T)
# markers.amy.t.1vAll, markers.amy.t.design, markers.amy.wilcox.block
rm(markers.amy.t.design, markers.amy.wilcox.block)
# Take top 100
markerList.t.hsap <- lapply(markers.amy.t.1vAll, function(x){
rownames(x)[x$log.FDR < log10(1e-6)]
}
)
genes.top100.hsap <- lapply(markerList.t.hsap, function(x){head(x, n=100)})
load("/dcl01/ajaffe/data/lab/singleCell/ucla_mouse-MeA/2019Cell/markers-stats_mouseMeA-2019-with-16neuSubs_findMarkers-SN-LEVEL_MNTMay2020.rda", verbose=T)
# markers.mmMeA.t.1vAll
# Just `toupper()` it
markerList.t.mm <- lapply(markers.mmMeA.t.1vAll, function(x){
rownames(x)[x$log.FDR < log10(1e-6)]
}
)
genes.top100.mm <- lapply(markerList.t.mm, function(x){toupper(head(x, n=100))})
genes.top100.mm <- sapply(genes.top100.mm, cbind)
## sapply
sapply(genes.top100.hsap, function(x){
apply(genes.top100.mm,2,function(y){length(intersect(x,y))})
})
# Astro Excit.1 Excit.2 Excit.3 Inhib.1 Inhib.2 Inhib.3 Inhib.4 Inhib.5 Micro Oligo OPC
# AS 20 0 1 0 0 1 0 0 1 0 0 1
# EN 2 1 1 0 1 2 1 0 0 0 0 2
# MG 0 0 0 1 0 0 0 0 0 19 0 0
# MU 2 1 0 1 0 0 1 1 0 0 0 0
# N.1 1 4 2 0 14 8 3 4 9 0 1 1
# N.10 0 6 1 4 7 7 2 0 6 0 0 0
# N.11 1 10 5 2 8 3 4 6 8 0 0 4
# N.12 2 7 4 3 7 5 2 3 5 0 2 2
# N.13 1 2 1 3 1 1 0 0 5 1 0 1
# N.14 0 7 2 4 9 6 0 4 7 1 1 2
# N.15 0 7 1 6 0 1 1 0 1 0 0 1
# N.16 1 3 4 1 7 3 3 6 4 0 0 4
# N.2 2 6 2 1 9 5 2 3 6 0 0 3
# N.3 2 3 1 4 0 3 0 0 2 0 0 0
# N.4 2 5 3 1 10 7 3 10 6 1 1 3
# N.5 0 4 3 2 4 4 1 2 5 0 0 2
# N.6 1 2 3 0 13 10 6 8 9 0 3 2
# N.7 0 4 10 1 1 3 1 2 2 0 0 1
# N.8 1 7 4 4 6 6 2 3 19 1 1 3
# N.9 0 3 1 1 10 5 2 5 4 0 0 1
# OL 0 0 2 0 0 0 0 0 0 0 19 0
# OPC 0 0 0 0 0 1 0 0 0 0 0 26
# OPC.OL 0 0 0 1 0 0 0 0 1 0 5 7
## Amonst top 40 ===
genes.top40.hsap <- lapply(markerList.t.hsap, function(x){head(x, n=40)})
genes.top40.mm <- lapply(markerList.t.mm, function(x){toupper(head(x, n=40))})
genes.top40.mm <- sapply(genes.top40.mm, cbind)
sapply(genes.top40.hsap, function(x){
apply(genes.top40.mm,2,function(y){length(intersect(x,y))})
})
# Astro Excit.1 Excit.2 Excit.3 Inhib.1 Inhib.2 Inhib.3 Inhib.4 Inhib.5 Micro Oligo OPC
# AS 7 0 0 0 0 0 0 0 0 0 0 0
# EN 1 0 0 0 0 0 0 0 0 0 0 0
# MG 0 0 0 0 0 0 0 0 0 4 0 0
# MU 0 0 0 0 0 0 0 0 0 0 0 0
# N.1 0 0 1 0 1 0 0 0 0 0 0 0
# N.10 0 0 0 0 1 2 0 0 2 0 0 0
# N.11 0 4 0 0 2 0 0 2 0 0 0 1
# N.12 1 2 2 0 0 1 0 0 2 0 0 1
# N.13 0 0 0 1 0 0 0 0 1 0 0 0
# N.14 0 2 0 1 0 1 0 0 1 1 0 0
# N.15 0 3 0 0 0 0 0 0 1 0 0 0
# N.16 0 1 1 0 0 1 0 0 1 0 0 2
# N.2 0 1 1 0 1 0 0 0 1 0 0 0
# N.3 0 1 0 0 0 2 0 0 0 0 0 0
# N.4 0 1 1 0 3 3 0 1 0 0 0 1
# N.5 0 0 0 0 1 0 0 0 0 0 0 1
# N.6 0 1 0 0 2 2 0 0 0 0 0 0
# N.7 0 0 2 0 0 1 0 1 0 0 0 0
# N.8 0 1 0 0 1 1 1 2 1 0 0 0
# N.9 0 0 0 0 0 1 0 1 1 0 0 0
# OL 0 0 1 0 0 0 0 0 0 0 7 0
# OPC 0 0 0 0 0 0 0 0 0 0 0 10
# OPC.OL 0 0 0 0 0 0 0 0 0 0 1 1
# Inhib.5 : N.8 genes ==
intersect(genes.top40.hsap[["Inhib.5"]], genes.top100.mm[ ,"N.8"])
# [1] "NPFFR2" "SV2C" "OTOF" "GRM8" "OLFM3" "FOXP2"
# round(ts.mmMeA["49202", ],3) # (Tll1 - looking because a highlighted gene in text)
# # AS EN MG MU N.1 N.10 N.11 N.12 N.13 N.14 N.15 N.16
# # -5.939 -5.932 -6.699 1.698 8.835 2.691 107.521 -5.323 20.345 86.122 -5.484 -5.423
# # N.2 N.3 N.4 N.5 N.6 N.7 N.8 N.9 OL OPC OPC.OL
# # 13.117 -5.297 33.339 16.283 -6.203 -5.520 108.310 22.783 -5.886 -4.273 -5.318
#
plotExpression(sce.amy.mm, exprs_values="logcounts", x="subCluster", colour_by="subCluster", features="Tll1")
# # ahh nothing but a few outliers
sce.amy.mm.sub <- sce.amy.mm[ ,grep("N.", sce.amy.mm$subCluster)]
sce.amy.mm.sub$subCluster <- droplevels(sce.amy.mm.sub$subCluster)
plotExpression(sce.amy.mm.sub, exprs_values="logcounts", x="subCluster", colour_by="subCluster",
features=c("Npffr2","Sv2c","Otof","Grm8","Olfm3","Foxp2"))
# Actually nothing suuuper convicing - mostly outlier. These just happen to have _more_ lol
# N.8 top genes include Pcdh8 & Lamp5
plotExpression(sce.amy.mm.sub, exprs_values="logcounts", x="subCluster", colour_by="subCluster",
features=c("Pcdh8","Lamp5"))
# N.12 reported marker genes (reported in supplementals "mmc2.xlsx" with paper)
plotExpression(sce.amy.mm.sub, exprs_values="logcounts", x="subCluster", colour_by="subCluster",
features=c("Eomes","Dsp","Nhlh2","Samd3","Trpc3","Cdhr1","Lhx1"))
# Oh six of these were of the top 10 from my own test and plotted lol. Well good.
# (and btw) ===
table(sce.amy$cellType.split, sce.amy$donor)
# Br5161 Br5212
# Ambig.lowNtrxts 34 16
# Astro 489 363
# Excit.1 141 193
# Excit.2 0 40
# Excit.3 0 55
# Inhib.1 16 155
# Inhib.2 33 76
# Inhib.3 11 24
# Inhib.4 24 0
# Inhib.5 85 13
# Micro 425 339
# Oligo 1697 1776
# OPC 335 292
# Glucocorticoid receptors? (in relation to TLL1, as per https://doi.org/10.1016/j.molbrainres.2005.09.016)
plotExpression(sce.amy, exprs_values="logcounts", x="cellType.split", colour_by="cellType.split",
features=c("NR3C1","NR3C2")) + stat_summary(fun.y = median, fun.ymin = median, fun.ymax = median,
geom = "crossbar", width = 0.3,
colour=rep(tableau20[1:12], 2)) +
theme(axis.text.x = element_text(angle = 90, hjust = 1), plot.title = element_text(size = 25))
# No particular high/specific expression in Inhib.5
### FINAL GRANT VERSION ===
# Remove EN, MU, OPC.OL, N.12 & N.15
load("rdas/zTsMats_libd-AMY_and_ucla-mouseMeA-2019Cell_sharedGenes_25May2020.rda", verbose=T)
# ts.amy, ts.mmMeA, Readme
cor_t_amy <- cor(ts.amy, ts.mmMeA)
rownames(cor_t_amy) = paste0(rownames(cor_t_amy),"_","H")
colnames(cor_t_amy) = paste0(colnames(cor_t_amy),"_","M")
# Re-order mouse labels - move EN/MG/MU to after neuronal subclusters
cor_t_amy <- cor_t_amy[ ,c(1, 5,13:20,6:12, 3,2,4, 21,23,22)]
# Remove those selected
cor_t_amy_sub <- cor_t_amy[ ,-which(colnames(cor_t_amy) %in% c("EN_M", "MU_M", "OPC.OL_M",
"N.12_M", "N.15_M"))]
range(cor_t_amy_sub)
#[1] -0.2203968 0.5023080 --> Threshold to 0.5
cor_t_amy_sub <- ifelse(cor_t_amy_sub >= 0.5, 0.5, cor_t_amy_sub)
### Heatmap - typically use levelplot (e.g. below), but will want pheatmap bc can cluster cols/rows
theSeq.all = seq(-.5, .5, by = 0.01)
my.col.all <- colorRampPalette(brewer.pal(7, "BrBG"))(length(theSeq.all)-1)
pdf("/dcl01/lieber/ajaffe/Matt/MNT_thesis/snRNAseq/10x_pilot_FINAL/pdfs/exploration/HongLab-UCLA_mmMeA/overlap-mouseMeA-2019-fullSubclusters_with_LIBD-10x-AMY_SN-LEVEL-stats_FINAL_May2020.pdf",width=8)
pheatmap(cor_t_amy_sub,
color=my.col.all,
cluster_cols=F, cluster_rows=F,
angle_col=90,
breaks=theSeq.all,
fontsize=11.5, fontsize_row=17, fontsize_col=15,
legend_breaks=c(seq(-0.5,0.5,by=0.25)),
main="Correlation of cluster-specific t's to mouse MeA \n subclusters (Chen-Hu-Wu et al., Cell 2019)")
dev.off()
## For supplement: Print top markers for 'Inhib.5' & corresponding in MeA 'N.8' === ===
# (load AMY SCE - already done in session)
# Prep mouse MeA
load("/dcl01/ajaffe/data/lab/singleCell/ucla_mouse-MeA/2019Cell/SCE_mouse-MeA_downstream-processing_MNT.rda", verbose=T)
# sce.amy.mm, chosen.hvgs.amy.mm
sce.amy.mm.sub <- sce.amy.mm[ ,grep("N.", sce.amy.mm$subCluster)]
sce.amy.mm.sub$subCluster <- droplevels(sce.amy.mm.sub$subCluster)
genes2print <- c("Npffr2", "Tll1", "Grm8", "Foxp2")
pdf("pdfs/pubFigures/suppFig_AMY-vs-MeA_topInhib.5markers_MNTSep2020.pdf", height=2.5, width=5)
# Human AMY
print(
plotExpression(sce.amy, exprs_values = "logcounts", features=toupper(genes2print),
x="cellType.split", colour_by="cellType.split", point_alpha=0.5, point_size=1.0, ncol=4,
add_legend=F) + stat_summary(fun.y = median, fun.ymin = median, fun.ymax = median,
geom = "crossbar", width = 0.3,
colour=rep(tableau20[1:12], length(genes2print))) +
xlab("") +
theme(axis.text.x = element_text(angle = 90, hjust = 1, size = 9.5),
axis.title.y = element_text(angle = 90, size = 10),
panel.grid.major=element_line(colour="grey95", size=0.8),
panel.grid.minor=element_line(colour="grey95", size=0.4))
)
# mouse MeA
print(
plotExpression(sce.amy.mm.sub, exprs_values = "logcounts", features=genes2print,
x="subCluster", colour_by="subCluster", point_alpha=0.5, point_size=1.0, ncol=4,
add_legend=F) + stat_summary(fun.y = median, fun.ymin = median, fun.ymax = median,
geom = "crossbar", width = 0.3,
colour=rep(tableau20[1:16], length(genes2print))) +
xlab("") +
theme(axis.text.x = element_text(angle = 90, hjust = 1, size = 9.5),
axis.title.y = element_text(angle = 90, size = 10),
panel.grid.major=element_line(colour="grey95", size=0.8),
panel.grid.minor=element_line(colour="grey95", size=0.4))
)
dev.off()
## Heatmap version ===
# Take more overlapping, from above exploration
genes2print <- c("Npffr2", "Tll1", "Grm8", "Foxp2", "Sv2c", "Olfm3")
pdf("pdfs/pubFigures/suppFig_AMY-vs-MeA_topInhib.5markers_heatmap_MNTSep2020.pdf", width=5, height=5)
dat <- assay(sce.amy, "logcounts")
cell.idx <- splitit(sce.amy$cellType.split)
current_dat <- do.call(cbind, lapply(cell.idx, function(ii) rowMeans(dat[toupper(genes2print), ii])))
pheatmap(current_dat, cluster_rows = FALSE, cluster_cols = FALSE, breaks = seq(0.02, 4.0, length.out = 101),
color = colorRampPalette(RColorBrewer::brewer.pal(n = 7, name = "BuGn"))(100),
fontsize_row = 18, fontsize_col=16)
dat <- assay(sce.amy.mm.sub, "logcounts")
cell.idx <- splitit(sce.amy.mm.sub$subCluster)
current_dat <- do.call(cbind, lapply(cell.idx, function(ii) rowMeans(dat[genes2print, ii])))
pheatmap(current_dat, cluster_rows = FALSE, cluster_cols = FALSE, breaks = seq(0.02, 1, length.out = 101),
color = colorRampPalette(RColorBrewer::brewer.pal(n = 7, name = "BuGn"))(100),
fontsize_row = 16, fontsize_col=16)
dev.off()
| /10x-pilot_region-specific_Amyg_step04_correl-mm-MeA.R | no_license | BertoLabMUSC/10xPilot_snRNAseq-human | R | false | false | 39,708 | r | ### MNT 10x snRNA-seq workflow: step 04 - downstream comparisons
### **Region-specific analyses**
### - (3x) NAc samples from: Br5161 & Br5212 & Br5287
### * Comparison to UCLA's Drop-seq on mouse medial amyg (MeA)
#####################################################################
library(SingleCellExperiment)
library(EnsDb.Hsapiens.v86)
library(org.Hs.eg.db)
library(scater)
library(scran)
library(batchelor)
library(DropletUtils)
library(jaffelab)
library(limma)
library(lattice)
library(RColorBrewer)
library(pheatmap)
### Palette taken from `scater`
tableau10medium = c("#729ECE", "#FF9E4A", "#67BF5C", "#ED665D",
"#AD8BC9", "#A8786E", "#ED97CA", "#A2A2A2",
"#CDCC5D", "#6DCCDA")
tableau20 = c("#1F77B4", "#AEC7E8", "#FF7F0E", "#FFBB78", "#2CA02C",
"#98DF8A", "#D62728", "#FF9896", "#9467BD", "#C5B0D5",
"#8C564B", "#C49C94", "#E377C2", "#F7B6D2", "#7F7F7F",
"#C7C7C7", "#BCBD22", "#DBDB8D", "#17BECF", "#9EDAE5")
# ===
### Pseudobulk>modeling approach ============================================
# * Skip this -> Now using sn-level stats for this comparison
## load modeling outputs
# 10x-pilot human Amyg
load("/dcl01/lieber/ajaffe/Matt/MNT_thesis/snRNAseq/10x_pilot_FINAL/rdas/markers-stats_Amyg-n2_manualContrasts_MNTMar2020.rda", verbose=T)
# eb_contrasts.amy.broad, eb_list.amy.broad, sce.amy.PB
# UCLA mouse MeA Drop-seq
load("/dcl01/ajaffe/data/lab/singleCell/ucla_mouse-MeA/markers-stats_mouse-MeA-Drop-seq_manualContrasts_MNTApr2020.rda", verbose=T)
# eb_list.amy.mm, corfit.amy.mm, sce.amy.mm.PB
# Add EntrezID for human
hs.entrezIds <- mapIds(org.Hs.eg.db, keys=rowData(sce.amy.PB)$ID,
column="ENTREZID", keytype="ENSEMBL")
# "'select()' returned 1:many mapping between keys and columns"
table(!is.na(hs.entrezIds))
# 20,578 valid entries (remember this is already subsetted for those non-zero genes only)
# Add to rowData
rowData(sce.amy.PB) <- cbind(rowData(sce.amy.PB), hs.entrezIds)
## Bring in 'HomoloGene.ID' for human (already in rowData for mm SCE) ===
## JAX annotation info
hom = read.delim("http://www.informatics.jax.org/downloads/reports/HOM_AllOrganism.rpt",
as.is=TRUE)
hom_hs <- hom[hom$Common.Organism.Name == "human", ]
# of 19,124 entries
table(rowData(sce.amy.PB)$hs.entrezIds %in% hom_hs$EntrezGene.ID)
# 17,261
table(rowData(sce.amy.PB)$Symbol %in% hom_hs$Symbol)
# 16,916 - not a bad difference
# So for mapping === == === ===
# human.entrez > HomoloGene.ID < mm.Symbol
# ^ filter SCE's on this
# Human (by Entrez)
rowData(sce.amy.PB)$HomoloGene.ID <- hom_hs$HomoloGene.ID[match(rowData(sce.amy.PB)$hs.entrezIds,
hom_hs$EntrezGene.ID)]
## Now set/match to shared homologous genes ===
length(intersect(rowData(sce.amy.PB)$HomoloGene.ID,
rowData(sce.amy.mm.PB)$HomoloGene.ID)) # 13,444
sharedHomologs <- intersect(rowData(sce.amy.PB)$HomoloGene.ID,
rowData(sce.amy.mm.PB)$HomoloGene.ID)
# # That first one is NA - get rid of it
# sharedHomologs <- sharedHomologs[-1]
# Human not in mm
length(setdiff(rowData(sce.amy.PB)$HomoloGene.ID,
rowData(sce.amy.mm.PB)$HomoloGene.ID)) # 3657
# mm not in human
length(setdiff(rowData(sce.amy.mm.PB)$HomoloGene.ID,
rowData(sce.amy.PB)$HomoloGene.ID)) # 928
# Subset for those
sce.mm.PBsub <- sce.amy.mm.PB[rowData(sce.amy.mm.PB)$HomoloGene.ID %in% sharedHomologs, ] # 14247
sce.hsap.PBsub <- sce.amy.PB[rowData(sce.amy.PB)$HomoloGene.ID %in% sharedHomologs, ] # 14178
## Many are duplicated...
rowData(sce.mm.PBsub)$Symbol[duplicated(rowData(sce.mm.PBsub)$HomoloGene.ID)]
# shoot many genes are orthologs
rowData(sce.hsap.PBsub)$Symbol[duplicated(rowData(sce.hsap.PBsub)$HomoloGene.ID)]
# same here, slightly less
### -> Take the higher-expressing of the duplicated - just mean across PB clusters:
## mm ===
duplicatedSet.mm <- which(duplicated(rowData(sce.mm.PBsub)$HomoloGene.ID))
genes2compare.mm <- list()
gene2keep.mm <- character()
for(g in 1:length(duplicatedSet.mm)){
genes2compare.mm[[g]] <- rownames(sce.mm.PBsub)[rowData(sce.mm.PBsub)$HomoloGene.ID ==
rowData(sce.mm.PBsub)$HomoloGene.ID[duplicatedSet.mm[g]]]
rowmeansmat <- rowMeans(assay(sce.mm.PBsub[genes2compare.mm[[g]], ], "logcounts"))
gene2keep.mm[g] <- names(rowmeansmat[order(rowmeansmat, decreasing=TRUE)])[1]
}
# Now pull out those that not being compared, so can `c()`
table(rownames(sce.mm.PBsub) %in% unlist(genes2compare.mm)) # 133 - why isn't this ==
sum(lengths(genes2compare.mm)) # 328 ????
length(unique(unlist(genes2compare.mm))) # 133 - oh. also `length(unique(gene2keep.mm)) == 52`
genesNoCompare.mm <- rownames(sce.mm.PBsub)[!(rownames(sce.mm.PBsub) %in% unlist(genes2compare.mm))]
# Finally combine and subset
sce.mm.PBsub <- sce.mm.PBsub[c(genesNoCompare.mm, unique(gene2keep.mm)), ]
table(rowData(sce.mm.PBsub)$HomoloGene.ID %in% sharedHomologs) # 13444 TRUE
table(duplicated(rowData(sce.mm.PBsub)$HomoloGene.ID)) # 13444 FALSE dope.
## Human ===
# First change rownames to EnsemblID
rowData(sce.hsap.PBsub)$Symbol.unique <- rownames(sce.hsap.PBsub)
rownames(sce.hsap.PBsub) <- rowData(sce.hsap.PBsub)$ID
duplicatedSet.hsap <- which(duplicated(rowData(sce.hsap.PBsub)$HomoloGene.ID))
genes2compare.hsap <- list()
gene2keep.hsap <- character()
for(g in 1:length(duplicatedSet.hsap)){
genes2compare.hsap[[g]] <- rownames(sce.hsap.PBsub)[rowData(sce.hsap.PBsub)$HomoloGene.ID ==
rowData(sce.hsap.PBsub)$HomoloGene.ID[duplicatedSet.hsap[g]]]
rowmeansmat <- rowMeans(assay(sce.hsap.PBsub[genes2compare.hsap[[g]], ], "logcounts"))
gene2keep.hsap[g] <- names(rowmeansmat[order(rowmeansmat, decreasing=TRUE)])[1]
}
# Now pull out those that not being compared, so can `c()`
table(rownames(sce.hsap.PBsub) %in% unlist(genes2compare.hsap)) # 109 - why isn't this ==
sum(lengths(genes2compare.hsap)) # 136 ????
length(unique(unlist(genes2compare.hsap))) # 109 - oh. also `length(unique(gene2keep.hsap)) == 52`
genesNoCompare.hsap <- rownames(sce.hsap.PBsub)[!(rownames(sce.hsap.PBsub) %in% unlist(genes2compare.hsap))]
# of length 13392 (which + 52 == 13444)
# Finally combine and subset
sce.hsap.PBsub <- sce.hsap.PBsub[c(genesNoCompare.hsap, unique(gene2keep.hsap)), ]
table(rowData(sce.hsap.PBsub)$HomoloGene.ID %in% sharedHomologs) # 13444 TRUE
table(duplicated(rowData(sce.hsap.PBsub)$HomoloGene.ID)) # 13444 FALSE dope.
## Match order and save
sce.mm.PBsub <- sce.mm.PBsub[match(rowData(sce.hsap.PBsub)$HomoloGene.ID,
rowData(sce.mm.PBsub)$HomoloGene.ID), ]
table(rowData(sce.mm.PBsub)$HomoloGene.ID == rowData(sce.hsap.PBsub)$HomoloGene.ID)
# all TRUE - good
pheatmap(cor(assay(sce.mm.PBsub, "logcounts"), assay(sce.hsap.PBsub, "logcounts")), fontsize=5)
# (ah but this is at the sample:cluster level)
Readme <- "These two SCEs are subsetted and ordered for matching HomoloGene.ID in the rowData. This can be used to subset the nucleus-level SCEs in their respective Rdata files."
save(sce.mm.PBsub, sce.hsap.PBsub, Readme, file="/dcl01/ajaffe/data/lab/singleCell/ucla_mouse-MeA/SCE_mm-MeA-PBd_w_matchingHsap-Amyg-PBd_HomoloGene.IDs_MNT.rda")
### FINALLY resume comparisons === === === === ===
## mm stats
pvals_mm <- sapply(eb_list.amy.mm, function(x) {
x$p.value[, 2, drop = FALSE]
})
rownames(pvals_mm) = rownames(sce.amy.mm.PB)
ts_mm <- sapply(eb_list.amy.mm, function(x) {
x$t[, 2, drop = FALSE]
})
rownames(ts_mm) = rownames(sce.amy.mm.PB)
## Human stats
pvals_hsap <- sapply(eb_list.amy.broad, function(x) {
x$p.value[, 2, drop = FALSE]
})
rownames(pvals_hsap) = rowData(sce.amy.PB)$ID
ts_hsap <- sapply(eb_list.amy.broad, function(x) {
x$t[, 2, drop = FALSE]
})
rownames(ts_hsap) = rowData(sce.amy.PB)$ID
### Subset and check matching 'HomoloGene.ID' === === === ===
pvals_mm <- pvals_mm[rownames(sce.mm.PBsub), ]
ts_mm <- ts_mm[rownames(sce.mm.PBsub), ]
pvals_hsap <- pvals_hsap[rowData(sce.hsap.PBsub)$ID, ]
ts_hsap <- ts_hsap[rowData(sce.hsap.PBsub)$ID, ]
rownames(ts_mm) <- rowData(sce.mm.PBsub)$HomoloGene.ID
rownames(pvals_mm) <- rowData(sce.mm.PBsub)$HomoloGene.ID
rownames(ts_hsap) <- rowData(sce.hsap.PBsub)$HomoloGene.ID
rownames(pvals_hsap) <- rowData(sce.hsap.PBsub)$HomoloGene.ID
table(rownames(ts_mm) == rownames(ts_hsap))
## all 14121 TRUE - good
## Now run correlation
cor_t = cor(ts_mm, ts_hsap)
signif(cor_t, 2)
## On just hsap cluster-specific homologous genes ===
hsap_specific_indices = mapply(function(t, p) {
oo = order(t, decreasing = TRUE)[1:100]
},
as.data.frame(ts_hsap),
as.data.frame(pvals_hsap)
)
hsap_ind = unique(as.numeric(hsap_specific_indices))
cor_t_hsap = cor(ts_mm[hsap_ind, ],
ts_hsap[hsap_ind, ])
signif(cor_t_hsap, 3)
## On just mouse cluster-specific homologous genes ===
mm_specific_indices = mapply(function(t, p) {
oo = order(t, decreasing = TRUE)[1:100]
},
as.data.frame(ts_mm),
as.data.frame(pvals_mm)
)
mm_ind = unique(as.numeric(mm_specific_indices))
cor_t_mm = cor(ts_mm[mm_ind, ],
ts_hsap[mm_ind, ])
signif(cor_t_mm, 3)
### Heatmap
theSeq.all = seq(-.5, .5, by = 0.01)
my.col.all <- colorRampPalette(brewer.pal(7, "PRGn"))(length(theSeq.all))
ct = colData(sce.hsap.PBsub)
ct = ct[!duplicated(sce.hsap.PBsub$cellType.final), ]
cor_t_hsap_toPlot = cor_t_hsap
rownames(cor_t_hsap_toPlot) = paste0(rownames(cor_t_hsap_toPlot),"_","M.mus")
colnames(cor_t_hsap_toPlot) = paste0(colnames(cor_t_hsap_toPlot),"_","H.sap")
cor_t_mm_toPlot = cor_t_mm
rownames(cor_t_mm_toPlot) = paste0(rownames(cor_t_mm_toPlot),"_","M.mus")
colnames(cor_t_mm_toPlot) = paste0(colnames(cor_t_mm_toPlot),"_","H.sap")
cor_t_all_toPlot = cor_t
rownames(cor_t_all_toPlot) = paste0(rownames(cor_t_all_toPlot),"_","M.mus")
colnames(cor_t_all_toPlot) = paste0(colnames(cor_t_all_toPlot),"_","H.sap")
## MNT added 14Apr2020: Reorder to diagonal & threshold at 0.4 for all-gene correlation === === ===
# Start from descending - easier to manually order
#cor_t_all_toPlot <- cor_t_all_toPlot[ ,rev(1:ncol(cor_t_all_toPlot))]
# This is useful:
apply(cor_t_all_toPlot, 2, which.max)
# If want to re-order human labels (but prefer re-ordering mm labels)
#cor_t_all_toPlot <- cor_t_all_toPlot[ ,rev(c(14,5,3,4, 7,10,12,6, 9,8,2,1, 11,13))]
cor_t_all_toPlot <- cor_t_all_toPlot[c(14,11:13,4,3, 2,8,7,9, 6,15,5,16,1,10), ]
# Threshold at 0.4
range(cor_t_all_toPlot)
cor_t_all_toPlot <- ifelse(cor_t_all_toPlot >= 0.4, 0.4, cor_t_all_toPlot)
## Do for other gene subsets ===
# Human
#cor_t_hsap_toPlot <- cor_t_hsap_toPlot[ ,rev(1:ncol(cor_t_hsap_toPlot))]
#cor_t_hsap_toPlot <- cor_t_hsap_toPlot[ ,rev(c(14,5,3,4, 7,10,12,6, 9,8,2,1, 11,13))]
cor_t_hsap_toPlot <- cor_t_hsap_toPlot[c(14,11:13,4,3, 2,8,7,9, 6,15,5,16,1,10), ]
# Threshold at 0.4
range(cor_t_hsap_toPlot)
cor_t_hsap_toPlot <- ifelse(cor_t_hsap_toPlot >= 0.4, 0.4, cor_t_hsap_toPlot)
# mm
#cor_t_mm_toPlot <- cor_t_mm_toPlot[ ,rev(1:ncol(cor_t_mm_toPlot))]
#cor_t_mm_toPlot <- cor_t_mm_toPlot[ ,rev(c(14,5,3,4, 7,10,12,6, 9,8,2,1, 11,13))]
cor_t_mm_toPlot <- cor_t_mm_toPlot[c(14,11:13,4,3, 2,8,7,9, 6,15,5,16,1,10), ]
# Threshold at 0.4
range(cor_t_mm_toPlot)
cor_t_mm_toPlot <- ifelse(cor_t_mm_toPlot >= 0.4, 0.4, cor_t_mm_toPlot)
pdf("/dcl01/lieber/ajaffe/Matt/MNT_thesis/snRNAseq/10x_pilot_FINAL/pdfs/exploration/overlap-mouse-MeA_with_LIBD-10x-Amyg_top100-or-all_Apr2020.pdf")
# Most human-specific
print(
levelplot(
cor_t_hsap_toPlot,
aspect = "fill",
at = theSeq.all,
col.regions = my.col.all,
ylab = "",
xlab = "",
scales = list(x = list(rot = 90, cex = 1), y = list(cex = 1)),
main="Correlation of cluster-specific t's \n (top 100 genes/human (LIBD) clusters)"
)
)
# Most mm-specific
print(
levelplot(
cor_t_mm_toPlot,
aspect = "fill",
at = theSeq.all,
col.regions = my.col.all,
ylab = "",
xlab = "",
scales = list(x = list(rot = 90, cex = 1), y = list(cex = 1)),
main="Correlation of cluster-specific t's \n (top 100 genes/mouse MeA (UCLA) clusters)"
)
)
# All
print(
levelplot(
cor_t_all_toPlot,
aspect = "fill",
at = theSeq.all,
col.regions = my.col.all,
ylab = "",
xlab = "",
scales = list(x = list(rot = 90, cex = 1), y = list(cex = 1)),
main="Correlation of cluster-specific t's \n (all shared 13,444 homologs)",
fontsize = 20
)
)
dev.off()
### Comparison to UCLA mouse MeA with SN-LEVEL stats ==================================
# Added MNT 14May2020 - UPDATED 22May2020 to compare to 2019 dataset
# (previously only 2017 neuronal subclusters), now with neuronal subcluster info
# Load mouse stats
# load("/dcl01/ajaffe/data/lab/singleCell/ucla_mouse-MeA/2017Neuron/markers-stats_mouseMeA-2017-neuSubs_findMarkers-SN-LEVEL_MNTMay2020.rda", verbose=T)
# # markers.mmMeAneu.t.1vAll
load("/dcl01/ajaffe/data/lab/singleCell/ucla_mouse-MeA/2019Cell/markers-stats_mouseMeA-2019-with-16neuSubs_findMarkers-SN-LEVEL_MNTMay2020.rda", verbose=T)
# markers.mmMeA.t.1vAll
# Load mouse SCE
# load("/dcl01/ajaffe/data/lab/singleCell/ucla_mouse-MeA/2017Neuron/SCE_mouse-MeA-2017_neuronalSubclusters_HVGs_MNT.rda", verbose=T)
# # sce.amy.mm17hvgs
load("/dcl01/ajaffe/data/lab/singleCell/ucla_mouse-MeA/2019Cell/SCE_mouse-MeA_downstream-processing_MNT.rda", verbose=T)
# sce.amy.mm, chosen.hvgs.amy.mm
## Calculate and add t-statistic (= std.logFC * sqrt(N)) for mouse clusters
# and fix row order to the first entry "Astrocyte"
fixTo <- rownames(markers.mmMeA.t.1vAll[[1]])
for(x in names(markers.mmMeA.t.1vAll)){
markers.mmMeA.t.1vAll[[x]]$t.stat <- markers.mmMeA.t.1vAll[[x]]$std.logFC * sqrt(ncol(sce.amy.mm))
markers.mmMeA.t.1vAll[[x]] <- markers.mmMeA.t.1vAll[[x]][fixTo, ]
}
# Pull out the t's
ts.mmMeA <- sapply(markers.mmMeA.t.1vAll, function(x){x$t.stat})
rownames(ts.mmMeA) <- fixTo
## Human t stats subset/re-ordering ===
# Bring in human stats; create t's
load("/dcl01/lieber/ajaffe/Matt/MNT_thesis/snRNAseq/10x_pilot_FINAL/rdas/markers-stats_Amyg-n2_findMarkers-SN-LEVEL_MNTMay2020.rda", verbose=T)
# markers.amy.t.1vAll, markers.amy.t.design, markers.amy.wilcox.block
rm(markers.amy.t.design, markers.amy.wilcox.block)
# Need to add t's with N nuclei used in constrasts
load("/dcl01/lieber/ajaffe/Matt/MNT_thesis/snRNAseq/10x_pilot_FINAL/rdas/regionSpecific_Amyg-n2_cleaned-combined_SCE_MNTFeb2020.rda", verbose=T)
#sce.amy, chosen.hvgs.amy, pc.choice.amy, clusterRefTab.amy, ref.sampleInfo
rm(chosen.hvgs.amy, pc.choice.amy, clusterRefTab.amy,ref.sampleInfo)
# First drop "Ambig.lowNtrxts" (50 nuclei)
sce.amy <- sce.amy[ ,sce.amy$cellType.split != "Ambig.lowNtrxts"]
sce.amy$cellType.split <- droplevels(sce.amy$cellType.split)
## As above, calculate and add t-statistic (= std.logFC * sqrt(N)) from contrasts
# and fix row order to the first entry "Astro"
fixTo <- rownames(markers.amy.t.1vAll[["Astro"]])
for(s in names(markers.amy.t.1vAll)){
markers.amy.t.1vAll[[s]]$t.stat <- markers.amy.t.1vAll[[s]]$std.logFC * sqrt(ncol(sce.amy))
markers.amy.t.1vAll[[s]] <- markers.amy.t.1vAll[[s]][fixTo, ]
}
# Pull out the t's
ts.amy <- sapply(markers.amy.t.1vAll, function(x){x$t.stat})
rownames(ts.amy) <- fixTo
## Bring in HomoloGene.ID info to subset/match order
# load("/dcl01/ajaffe/data/lab/singleCell/ucla_mouse-MeA/2019Cell/SCE_mm-MeA-PBd_w_matchingHsap-Amyg-PBd_HomoloGene.IDs_MNT.rda",
# verbose=T)
# # sce.mm.PBsub, sce.hsap.PBsub, Readme
#
# table(rowData(sce.mm.PBsub)$HomoloGene.ID == rowData(sce.hsap.PBsub)$HomoloGene.ID) # all TRUE - dope
# # (see above - these are the intersecting homologs)
#
# ## However!
# table(rownames(ts.mmMeA) %in% rownames(sce.mm.PBsub)) # not all - so will need to get union
# rm(sce.mm.PBsub, sce.hsap.PBsub, Readme)
## HomoloGene.ID for all human genes ====
hom = read.delim("http://www.informatics.jax.org/downloads/reports/HOM_AllOrganism.rpt",
as.is=TRUE)
hom_hs <- hom[hom$Common.Organism.Name == "human", ]
# of 19,124 entries
# First Add EntrezID for human
hs.entrezIds <- mapIds(org.Hs.eg.db, keys=rowData(sce.amy)$ID,
column="ENTREZID", keytype="ENSEMBL")
# "'select()' returned 1:many mapping between keys and columns"
table(!is.na(hs.entrezIds))
# 22,818 valid entries (remember this is already subsetted for those non-zero genes only)
# Add to rowData
rowData(sce.amy) <- cbind(rowData(sce.amy), hs.entrezIds)
# Now how many in JAX db?
table(rowData(sce.amy)$hs.entrezIds %in% hom_hs$EntrezGene.ID)
# 18,865
table(rowData(sce.amy)$Symbol %in% hom_hs$Symbol)
# 18,472 - not a bad difference
# So for mapping === === ===
# human.entrez > HomoloGene.ID < mm.Symbol
# ^ filter SCE's on this
# Human (by Entrez)
rowData(sce.amy)$HomoloGene.ID <- hom_hs$HomoloGene.ID[match(rowData(sce.amy)$hs.entrezIds,
hom_hs$EntrezGene.ID)]
# end chunk ====
# Intersection?
table(rowData(sce.amy.mm)$HomoloGene.ID %in% rowData(sce.amy)$HomoloGene.ID)
# FALSE TRUE
# 665 13845
# First give [human] ts.amy rownames their respective EnsemblID
# (have to use the full sce bc rownames(sce.hsap.PBsub) is EnsemblID and we uniquified the $Symbol)
rownames(ts.amy) <- rowData(sce.amy)$ID[match(rownames(ts.amy), rownames(sce.amy))]
# Then to HomoloGene.ID
rownames(ts.amy) <- rowData(sce.amy)$HomoloGene.ID[match(rownames(ts.amy), rowData(sce.amy)$ID)]
# Btw half are NA
table(is.na(rownames(ts.amy)))
# FALSE TRUE
# 17261 11203
# So subset for those with HomoloGene.ID
ts.amy <- ts.amy[!is.na(rownames(ts.amy)), ]
# Mouse - can just go to HomoloGene.ID
rownames(ts.mmMeA) <- rowData(sce.amy.mm)$HomoloGene.ID[match(rownames(ts.mmMeA), rownames(sce.amy.mm))]
# Intersecting?
table(rownames(ts.mmMeA) %in% rownames(ts.amy))
# FALSE TRUE
# 985 13525 - so we'll be running correlation across these genes
# Subset and match order
ts.mmMeA <- ts.mmMeA[rownames(ts.mmMeA) %in% rownames(ts.amy), ]
ts.amy <- ts.amy[rownames(ts.mmMeA), ]
cor_t_amy <- cor(ts.amy, ts.mmMeA)
rownames(cor_t_amy) = paste0(rownames(cor_t_amy),"_","H")
colnames(cor_t_amy) = paste0(colnames(cor_t_amy),"_","M")
range(cor_t_amy)
#[1] -0.2203968 0.5023080 (previously {-0.2557751, 0.2577207} on only 2017 neuronal subsets)
### Heatmap - typically use levelplot (e.g. below), but will want pheatmap bc can cluster cols/rows
theSeq.all = seq(-.6, .6, by = 0.01)
my.col.all <- colorRampPalette(brewer.pal(7, "BrBG"))(length(theSeq.all)-1)
# Re-order mouse labels - move EN/MG/MU to after neuronal subclusters
cor_t_amy <- cor_t_amy[ ,c(1, 5,13:20,6:12, 3,2,4, 21,23,22)]
# #pdf("/dcl01/lieber/ajaffe/Matt/MNT_thesis/snRNAseq/10x_pilot_FINAL/pdfs/exploration/HongLab-UCLA_mmMeA/overlap-mouseMeA-2017-neuSubs_with_LIBD-10x-AMY_SN-LEVEL-stats_May2020.pdf")
# pdf("/dcl01/lieber/ajaffe/Matt/MNT_thesis/snRNAseq/10x_pilot_FINAL/pdfs/exploration/HongLab-UCLA_mmMeA/overlap-mouseMeA-2019-fullSubclusters_with_LIBD-10x-AMY_SN-LEVEL-stats_May2020.pdf")
# pheatmap(cor_t_amy,
# color=my.col.all,
# cluster_cols=F, cluster_rows=F,
# breaks=theSeq.all,
# fontsize=11, fontsize_row=15, fontsize_col=12,
# #main="Correlation of cluster-specific t's for mouse MeA neuronal subclusters \n (Wu et al., Neuron 2017)")
# main="Correlation of cluster-specific t's to mouse MeA \n complete subclusters (Chen-Hu-Wu et al., Cell 2019)")
# # Version with mouse glial cell types 'missing' in LIBD data dropped:
# pheatmap(cor_t_amy_sub,
# color=my.col.all,
# cluster_cols=F, cluster_rows=F,
# breaks=theSeq.all,
# fontsize=11.5, fontsize_row=15, fontsize_col=13.5,
# main="Correlation of cluster-specific t's to mouse MeA \n complete subclusters (Chen-Hu-Wu et al., Cell 2019)")
#
# dev.off()
## Version with mouse glial cell types 'missing' in LIBD data dropped:
cor_t_amy_sub <- cor_t_amy[ ,-which(colnames(cor_t_amy) %in% c("EN_M", "MU_M", "OPC.OL_M"))]
## Actually just print as second page to the above - will probably get some suggested edits
## Iteration with top N spp:subcluster-specific genes: ========
# Added MNT 25May2020
# -> Basically just run through line 488, under ("Subset and match order")
# Save the ts matrices to reduce work next time
Readme <- "These t-statistic matrices are subsetted and matched for shared 'HomoloGene.ID', so `cor()` can simply be run or other gene subsets applied first."
save(ts.amy, ts.mmMeA, Readme, file="rdas/zTsMats_libd-AMY_and_ucla-mouseMeA-2019Cell_sharedGenes_25May2020.rda")
# # Have to remove the markers objects bc the rows have been fixed (actually don't need to lol)
# rm(markers.amy.t.1vAll, markers.mmMeA.t.1vAll)
#
# # Re-load them
# load("/dcl01/lieber/ajaffe/Matt/MNT_thesis/snRNAseq/10x_pilot_FINAL/rdas/markers-stats_Amyg-n2_findMarkers-SN-LEVEL_MNTMay2020.rda", verbose=T)
# # markers.amy.t.1vAll, markers.amy.t.design, markers.amy.wilcox.block
# rm(markers.amy.t.design, markers.amy.wilcox.block)
#
# load("/dcl01/ajaffe/data/lab/singleCell/ucla_mouse-MeA/2019Cell/markers-stats_mouseMeA-2019-with-16neuSubs_findMarkers-SN-LEVEL_MNTMay2020.rda", verbose=T)
# # markers.mmMeA.t.1vAll
#
## On just hsap cluster-specific homologous genes ===
hsap_specific_indices = mapply(function(t) {
oo = order(t, decreasing = TRUE)[1:100]
},
as.data.frame(ts.amy)
)
hsap_ind = unique(as.numeric(hsap_specific_indices))
length(hsap_ind) # so of 1200 (100 x 12 cellType.split), 919 unique
cor_t_hsap = cor(ts.amy[hsap_ind, ],
ts.mmMeA[hsap_ind, ])
range(cor_t_hsap)
#[1] -0.2738376 0.6612352
## On just mouse cluster-specific homologous genes ===
mouse_specific_indices = mapply(function(t) {
oo = order(t, decreasing = TRUE)[1:100]
},
as.data.frame(ts.mmMeA)
)
mouse_ind = unique(as.numeric(mouse_specific_indices))
length(mouse_ind) # so of 2300 (100 x 23 subCluster), 1543 unique
cor_t_mouse = cor(ts.amy[mouse_ind, ],
ts.mmMeA[mouse_ind, ])
range(cor_t_mouse)
# [1] -0.2731605 0.6113445
## UPDATED heatmap:
theSeq.all = seq(-.65, .65, by = 0.01)
my.col.all <- colorRampPalette(brewer.pal(7, "BrBG"))(length(theSeq.all)-1)
# Re-order mouse labels - move EN/MG/MU to after neuronal subclusters
cor_t_hsap <- cor_t_hsap[ ,c(1, 5,13:20,6:12, 3,2,4, 21,23,22)]
# Then treshold this one to 0.65 max (max is 0.6612)
cor_t_hsap <- ifelse(cor_t_hsap >= 0.65, 0.65, cor_t_hsap)
# (and)
cor_t_amy_sub <- cor_t_amy[ ,-which(colnames(cor_t_amy) %in% c("EN_M", "MU_M", "OPC.OL_M"))]
cor_t_mouse <- cor_t_mouse[ ,c(1, 5,13:20,6:12, 3,2,4, 21,23,22)]
#pdf("/dcl01/lieber/ajaffe/Matt/MNT_thesis/snRNAseq/10x_pilot_FINAL/pdfs/exploration/HongLab-UCLA_mmMeA/overlap-mouseMeA-2019-fullSubclusters_with_LIBD-10x-AMY_SN-LEVEL-stats_May2020.pdf")
# or
pdf("/dcl01/lieber/ajaffe/Matt/MNT_thesis/snRNAseq/10x_pilot_FINAL/pdfs/exploration/HongLab-UCLA_mmMeA/overlap-mouseMeA-2019-fullSubclusters_with_LIBD-10x-AMY_SN-LEVEL-stats_numbersPrinted_May2020.pdf")
pheatmap(cor_t_amy,
color=my.col.all,
cluster_cols=F, cluster_rows=F,
breaks=theSeq.all,
fontsize=11, fontsize_row=15, fontsize_col=12,
display_numbers=T, number_format="%.2f", fontsize_number=6.5,
main="Correlation of cluster-specific t's to mouse MeA \n complete subclusters (Chen-Hu-Wu et al., Cell 2019)")
# Version with mouse glial cell types 'missing' in LIBD data dropped:
pheatmap(cor_t_amy_sub,
color=my.col.all,
cluster_cols=F, cluster_rows=F,
breaks=theSeq.all,
fontsize=11.5, fontsize_row=15, fontsize_col=13.5,
display_numbers=T, number_format="%.2f", fontsize_number=6.5,
main="Correlation of cluster-specific t's to mouse MeA \n complete subclusters (Chen-Hu-Wu et al., Cell 2019)")
# On human-specific genes (slightly thresholded)
pheatmap(cor_t_hsap,
color=my.col.all,
cluster_cols=F, cluster_rows=F,
breaks=theSeq.all,
fontsize=11.5, fontsize_row=15, fontsize_col=13.5,
display_numbers=T, number_format="%.2f", fontsize_number=6.5,
main="Correlation of top-100 cluster-specific t's to \n (Chen-Hu-Wu et al., Cell 2019) subclusters")
# On mm-MeA-specific genes
pheatmap(cor_t_mouse,
color=my.col.all,
cluster_cols=F, cluster_rows=F,
breaks=theSeq.all,
fontsize=11.5, fontsize_row=15, fontsize_col=13.5,
display_numbers=T, number_format="%.2f", fontsize_number=6.5,
main="Correlation of LIBD-AMY subclusters to \n (Chen-Hu-Wu et al., Cell 2019) top-100 subcluster t's")
dev.off()
## Intersecting some of the top markers =====================
load("/dcl01/lieber/ajaffe/Matt/MNT_thesis/snRNAseq/10x_pilot_FINAL/rdas/markers-stats_Amyg-n2_findMarkers-SN-LEVEL_MNTMay2020.rda", verbose=T)
# markers.amy.t.1vAll, markers.amy.t.design, markers.amy.wilcox.block
rm(markers.amy.t.design, markers.amy.wilcox.block)
# Take top 100
markerList.t.hsap <- lapply(markers.amy.t.1vAll, function(x){
rownames(x)[x$log.FDR < log10(1e-6)]
}
)
genes.top100.hsap <- lapply(markerList.t.hsap, function(x){head(x, n=100)})
load("/dcl01/ajaffe/data/lab/singleCell/ucla_mouse-MeA/2019Cell/markers-stats_mouseMeA-2019-with-16neuSubs_findMarkers-SN-LEVEL_MNTMay2020.rda", verbose=T)
# markers.mmMeA.t.1vAll
# Just `toupper()` it
markerList.t.mm <- lapply(markers.mmMeA.t.1vAll, function(x){
rownames(x)[x$log.FDR < log10(1e-6)]
}
)
genes.top100.mm <- lapply(markerList.t.mm, function(x){toupper(head(x, n=100))})
genes.top100.mm <- sapply(genes.top100.mm, cbind)
## sapply
sapply(genes.top100.hsap, function(x){
apply(genes.top100.mm,2,function(y){length(intersect(x,y))})
})
# Astro Excit.1 Excit.2 Excit.3 Inhib.1 Inhib.2 Inhib.3 Inhib.4 Inhib.5 Micro Oligo OPC
# AS 20 0 1 0 0 1 0 0 1 0 0 1
# EN 2 1 1 0 1 2 1 0 0 0 0 2
# MG 0 0 0 1 0 0 0 0 0 19 0 0
# MU 2 1 0 1 0 0 1 1 0 0 0 0
# N.1 1 4 2 0 14 8 3 4 9 0 1 1
# N.10 0 6 1 4 7 7 2 0 6 0 0 0
# N.11 1 10 5 2 8 3 4 6 8 0 0 4
# N.12 2 7 4 3 7 5 2 3 5 0 2 2
# N.13 1 2 1 3 1 1 0 0 5 1 0 1
# N.14 0 7 2 4 9 6 0 4 7 1 1 2
# N.15 0 7 1 6 0 1 1 0 1 0 0 1
# N.16 1 3 4 1 7 3 3 6 4 0 0 4
# N.2 2 6 2 1 9 5 2 3 6 0 0 3
# N.3 2 3 1 4 0 3 0 0 2 0 0 0
# N.4 2 5 3 1 10 7 3 10 6 1 1 3
# N.5 0 4 3 2 4 4 1 2 5 0 0 2
# N.6 1 2 3 0 13 10 6 8 9 0 3 2
# N.7 0 4 10 1 1 3 1 2 2 0 0 1
# N.8 1 7 4 4 6 6 2 3 19 1 1 3
# N.9 0 3 1 1 10 5 2 5 4 0 0 1
# OL 0 0 2 0 0 0 0 0 0 0 19 0
# OPC 0 0 0 0 0 1 0 0 0 0 0 26
# OPC.OL 0 0 0 1 0 0 0 0 1 0 5 7
## Amonst top 40 ===
genes.top40.hsap <- lapply(markerList.t.hsap, function(x){head(x, n=40)})
genes.top40.mm <- lapply(markerList.t.mm, function(x){toupper(head(x, n=40))})
genes.top40.mm <- sapply(genes.top40.mm, cbind)
sapply(genes.top40.hsap, function(x){
apply(genes.top40.mm,2,function(y){length(intersect(x,y))})
})
# Astro Excit.1 Excit.2 Excit.3 Inhib.1 Inhib.2 Inhib.3 Inhib.4 Inhib.5 Micro Oligo OPC
# AS 7 0 0 0 0 0 0 0 0 0 0 0
# EN 1 0 0 0 0 0 0 0 0 0 0 0
# MG 0 0 0 0 0 0 0 0 0 4 0 0
# MU 0 0 0 0 0 0 0 0 0 0 0 0
# N.1 0 0 1 0 1 0 0 0 0 0 0 0
# N.10 0 0 0 0 1 2 0 0 2 0 0 0
# N.11 0 4 0 0 2 0 0 2 0 0 0 1
# N.12 1 2 2 0 0 1 0 0 2 0 0 1
# N.13 0 0 0 1 0 0 0 0 1 0 0 0
# N.14 0 2 0 1 0 1 0 0 1 1 0 0
# N.15 0 3 0 0 0 0 0 0 1 0 0 0
# N.16 0 1 1 0 0 1 0 0 1 0 0 2
# N.2 0 1 1 0 1 0 0 0 1 0 0 0
# N.3 0 1 0 0 0 2 0 0 0 0 0 0
# N.4 0 1 1 0 3 3 0 1 0 0 0 1
# N.5 0 0 0 0 1 0 0 0 0 0 0 1
# N.6 0 1 0 0 2 2 0 0 0 0 0 0
# N.7 0 0 2 0 0 1 0 1 0 0 0 0
# N.8 0 1 0 0 1 1 1 2 1 0 0 0
# N.9 0 0 0 0 0 1 0 1 1 0 0 0
# OL 0 0 1 0 0 0 0 0 0 0 7 0
# OPC 0 0 0 0 0 0 0 0 0 0 0 10
# OPC.OL 0 0 0 0 0 0 0 0 0 0 1 1
# Inhib.5 : N.8 genes ==
intersect(genes.top40.hsap[["Inhib.5"]], genes.top100.mm[ ,"N.8"])
# [1] "NPFFR2" "SV2C" "OTOF" "GRM8" "OLFM3" "FOXP2"
# round(ts.mmMeA["49202", ],3) # (Tll1 - looking because a highlighted gene in text)
# # AS EN MG MU N.1 N.10 N.11 N.12 N.13 N.14 N.15 N.16
# # -5.939 -5.932 -6.699 1.698 8.835 2.691 107.521 -5.323 20.345 86.122 -5.484 -5.423
# # N.2 N.3 N.4 N.5 N.6 N.7 N.8 N.9 OL OPC OPC.OL
# # 13.117 -5.297 33.339 16.283 -6.203 -5.520 108.310 22.783 -5.886 -4.273 -5.318
#
plotExpression(sce.amy.mm, exprs_values="logcounts", x="subCluster", colour_by="subCluster", features="Tll1")
# # ahh nothing but a few outliers
sce.amy.mm.sub <- sce.amy.mm[ ,grep("N.", sce.amy.mm$subCluster)]
sce.amy.mm.sub$subCluster <- droplevels(sce.amy.mm.sub$subCluster)
plotExpression(sce.amy.mm.sub, exprs_values="logcounts", x="subCluster", colour_by="subCluster",
features=c("Npffr2","Sv2c","Otof","Grm8","Olfm3","Foxp2"))
# Actually nothing suuuper convicing - mostly outlier. These just happen to have _more_ lol
# N.8 top genes include Pcdh8 & Lamp5
plotExpression(sce.amy.mm.sub, exprs_values="logcounts", x="subCluster", colour_by="subCluster",
features=c("Pcdh8","Lamp5"))
# N.12 reported marker genes (reported in supplementals "mmc2.xlsx" with paper)
plotExpression(sce.amy.mm.sub, exprs_values="logcounts", x="subCluster", colour_by="subCluster",
features=c("Eomes","Dsp","Nhlh2","Samd3","Trpc3","Cdhr1","Lhx1"))
# Oh six of these were of the top 10 from my own test and plotted lol. Well good.
# (and btw) ===
table(sce.amy$cellType.split, sce.amy$donor)
# Br5161 Br5212
# Ambig.lowNtrxts 34 16
# Astro 489 363
# Excit.1 141 193
# Excit.2 0 40
# Excit.3 0 55
# Inhib.1 16 155
# Inhib.2 33 76
# Inhib.3 11 24
# Inhib.4 24 0
# Inhib.5 85 13
# Micro 425 339
# Oligo 1697 1776
# OPC 335 292
# Glucocorticoid receptors? (in relation to TLL1, as per https://doi.org/10.1016/j.molbrainres.2005.09.016)
plotExpression(sce.amy, exprs_values="logcounts", x="cellType.split", colour_by="cellType.split",
features=c("NR3C1","NR3C2")) + stat_summary(fun.y = median, fun.ymin = median, fun.ymax = median,
geom = "crossbar", width = 0.3,
colour=rep(tableau20[1:12], 2)) +
theme(axis.text.x = element_text(angle = 90, hjust = 1), plot.title = element_text(size = 25))
# No particular high/specific expression in Inhib.5
### FINAL GRANT VERSION ===
# Remove EN, MU, OPC.OL, N.12 & N.15
load("rdas/zTsMats_libd-AMY_and_ucla-mouseMeA-2019Cell_sharedGenes_25May2020.rda", verbose=T)
# ts.amy, ts.mmMeA, Readme
cor_t_amy <- cor(ts.amy, ts.mmMeA)
rownames(cor_t_amy) = paste0(rownames(cor_t_amy),"_","H")
colnames(cor_t_amy) = paste0(colnames(cor_t_amy),"_","M")
# Re-order mouse labels - move EN/MG/MU to after neuronal subclusters
cor_t_amy <- cor_t_amy[ ,c(1, 5,13:20,6:12, 3,2,4, 21,23,22)]
# Remove those selected
cor_t_amy_sub <- cor_t_amy[ ,-which(colnames(cor_t_amy) %in% c("EN_M", "MU_M", "OPC.OL_M",
"N.12_M", "N.15_M"))]
range(cor_t_amy_sub)
#[1] -0.2203968 0.5023080 --> Threshold to 0.5
cor_t_amy_sub <- ifelse(cor_t_amy_sub >= 0.5, 0.5, cor_t_amy_sub)
### Heatmap - typically use levelplot (e.g. below), but will want pheatmap bc can cluster cols/rows
theSeq.all = seq(-.5, .5, by = 0.01)
my.col.all <- colorRampPalette(brewer.pal(7, "BrBG"))(length(theSeq.all)-1)
pdf("/dcl01/lieber/ajaffe/Matt/MNT_thesis/snRNAseq/10x_pilot_FINAL/pdfs/exploration/HongLab-UCLA_mmMeA/overlap-mouseMeA-2019-fullSubclusters_with_LIBD-10x-AMY_SN-LEVEL-stats_FINAL_May2020.pdf",width=8)
pheatmap(cor_t_amy_sub,
color=my.col.all,
cluster_cols=F, cluster_rows=F,
angle_col=90,
breaks=theSeq.all,
fontsize=11.5, fontsize_row=17, fontsize_col=15,
legend_breaks=c(seq(-0.5,0.5,by=0.25)),
main="Correlation of cluster-specific t's to mouse MeA \n subclusters (Chen-Hu-Wu et al., Cell 2019)")
dev.off()
## For supplement: Print top markers for 'Inhib.5' & corresponding in MeA 'N.8' === ===
# (load AMY SCE - already done in session)
# Prep mouse MeA
load("/dcl01/ajaffe/data/lab/singleCell/ucla_mouse-MeA/2019Cell/SCE_mouse-MeA_downstream-processing_MNT.rda", verbose=T)
# sce.amy.mm, chosen.hvgs.amy.mm
sce.amy.mm.sub <- sce.amy.mm[ ,grep("N.", sce.amy.mm$subCluster)]
sce.amy.mm.sub$subCluster <- droplevels(sce.amy.mm.sub$subCluster)
genes2print <- c("Npffr2", "Tll1", "Grm8", "Foxp2")
pdf("pdfs/pubFigures/suppFig_AMY-vs-MeA_topInhib.5markers_MNTSep2020.pdf", height=2.5, width=5)
# Human AMY
print(
plotExpression(sce.amy, exprs_values = "logcounts", features=toupper(genes2print),
x="cellType.split", colour_by="cellType.split", point_alpha=0.5, point_size=1.0, ncol=4,
add_legend=F) + stat_summary(fun.y = median, fun.ymin = median, fun.ymax = median,
geom = "crossbar", width = 0.3,
colour=rep(tableau20[1:12], length(genes2print))) +
xlab("") +
theme(axis.text.x = element_text(angle = 90, hjust = 1, size = 9.5),
axis.title.y = element_text(angle = 90, size = 10),
panel.grid.major=element_line(colour="grey95", size=0.8),
panel.grid.minor=element_line(colour="grey95", size=0.4))
)
# mouse MeA
print(
plotExpression(sce.amy.mm.sub, exprs_values = "logcounts", features=genes2print,
x="subCluster", colour_by="subCluster", point_alpha=0.5, point_size=1.0, ncol=4,
add_legend=F) + stat_summary(fun.y = median, fun.ymin = median, fun.ymax = median,
geom = "crossbar", width = 0.3,
colour=rep(tableau20[1:16], length(genes2print))) +
xlab("") +
theme(axis.text.x = element_text(angle = 90, hjust = 1, size = 9.5),
axis.title.y = element_text(angle = 90, size = 10),
panel.grid.major=element_line(colour="grey95", size=0.8),
panel.grid.minor=element_line(colour="grey95", size=0.4))
)
dev.off()
## Heatmap version ===
# Take more overlapping, from above exploration
genes2print <- c("Npffr2", "Tll1", "Grm8", "Foxp2", "Sv2c", "Olfm3")
pdf("pdfs/pubFigures/suppFig_AMY-vs-MeA_topInhib.5markers_heatmap_MNTSep2020.pdf", width=5, height=5)
dat <- assay(sce.amy, "logcounts")
cell.idx <- splitit(sce.amy$cellType.split)
current_dat <- do.call(cbind, lapply(cell.idx, function(ii) rowMeans(dat[toupper(genes2print), ii])))
pheatmap(current_dat, cluster_rows = FALSE, cluster_cols = FALSE, breaks = seq(0.02, 4.0, length.out = 101),
color = colorRampPalette(RColorBrewer::brewer.pal(n = 7, name = "BuGn"))(100),
fontsize_row = 18, fontsize_col=16)
dat <- assay(sce.amy.mm.sub, "logcounts")
cell.idx <- splitit(sce.amy.mm.sub$subCluster)
current_dat <- do.call(cbind, lapply(cell.idx, function(ii) rowMeans(dat[genes2print, ii])))
pheatmap(current_dat, cluster_rows = FALSE, cluster_cols = FALSE, breaks = seq(0.02, 1, length.out = 101),
color = colorRampPalette(RColorBrewer::brewer.pal(n = 7, name = "BuGn"))(100),
fontsize_row = 16, fontsize_col=16)
dev.off()
|
rm(list=ls())
library("shiny")
library("rhandsontable")
library("deSolve")
library("ggplot2")
source('library/r_general/ubiquity.r')
source('transient/auto_rcomponents.r')
Rmdfile = "my_report.Rmd"
load("transient/rgui/default/gui_som.RData")
load("transient/rgui/default/gui_state.RData")
params = list()
params$cfg = cfg
params$som = som
# This is needed for debugging
cfg$options$misc$operating_environment = 'script'
rmarkdown::render(Rmdfile,
params = params,
output_format = "html_document")
| /Code/osimertinib_dacomitinib/library/templates/r_test_rmd.r | no_license | Michorlab/NSCLC_OsimDacoOptimization | R | false | false | 545 | r | rm(list=ls())
library("shiny")
library("rhandsontable")
library("deSolve")
library("ggplot2")
source('library/r_general/ubiquity.r')
source('transient/auto_rcomponents.r')
Rmdfile = "my_report.Rmd"
load("transient/rgui/default/gui_som.RData")
load("transient/rgui/default/gui_state.RData")
params = list()
params$cfg = cfg
params$som = som
# This is needed for debugging
cfg$options$misc$operating_environment = 'script'
rmarkdown::render(Rmdfile,
params = params,
output_format = "html_document")
|
targets::tar_test("tar_file_read() manifest", {
targets::tar_script({
write_file <- function() {
write.csv(mtcars, "mtcars.csv")
"mtcars.csv"
}
get_nrows <- function() {
2L
}
tar_file_read(
data,
write_file(),
utils::read.csv(file = !!.x, nrows = get_nrows())
)
})
out <- targets::tar_manifest(callr_function = NULL)
expect_equal(nrow(out), 2L)
out <- targets::tar_manifest(data_file, callr_function = NULL)
expect_true(grepl("write_file", out$command))
out <- targets::tar_manifest(data, callr_function = NULL)
expect_true(grepl("^utils::read\\.csv", out$command))
expect_true(grepl("data_file", out$command))
expect_true(grepl("get_nrows", out$command))
})
targets::tar_test("tar_file_read() graph", {
targets::tar_script({
write_file <- function() {
write.csv(mtcars, "mtcars.csv")
"mtcars.csv"
}
get_nrows <- function() {
2L
}
tar_file_read(
data,
write_file(),
utils::read.csv(file = !!.x, nrows = get_nrows())
)
})
out <- targets::tar_network(callr_function = NULL)$edges
expect_equal(out$to[out$from == "data_file"], "data")
expect_equal(out$to[out$from == "get_nrows"], "data")
expect_equal(out$to[out$from == "write_file"], "data_file")
})
targets::tar_test("tar_file_read() pipeline", {
targets::tar_script({
write_file <- function() {
write.csv(mtcars, "mtcars.csv")
"mtcars.csv"
}
get_nrows <- function() {
2L
}
tar_file_read(
data,
write_file(),
utils::read.csv(file = !!.x, nrows = get_nrows())
)
})
targets::tar_make(callr_function = NULL)$edges
expect_equal(nrow(targets::tar_read(data)), 2L)
})
| /tests/testthat/test-tar_file_read.R | permissive | ropensci/tarchetypes | R | false | false | 1,734 | r | targets::tar_test("tar_file_read() manifest", {
targets::tar_script({
write_file <- function() {
write.csv(mtcars, "mtcars.csv")
"mtcars.csv"
}
get_nrows <- function() {
2L
}
tar_file_read(
data,
write_file(),
utils::read.csv(file = !!.x, nrows = get_nrows())
)
})
out <- targets::tar_manifest(callr_function = NULL)
expect_equal(nrow(out), 2L)
out <- targets::tar_manifest(data_file, callr_function = NULL)
expect_true(grepl("write_file", out$command))
out <- targets::tar_manifest(data, callr_function = NULL)
expect_true(grepl("^utils::read\\.csv", out$command))
expect_true(grepl("data_file", out$command))
expect_true(grepl("get_nrows", out$command))
})
targets::tar_test("tar_file_read() graph", {
targets::tar_script({
write_file <- function() {
write.csv(mtcars, "mtcars.csv")
"mtcars.csv"
}
get_nrows <- function() {
2L
}
tar_file_read(
data,
write_file(),
utils::read.csv(file = !!.x, nrows = get_nrows())
)
})
out <- targets::tar_network(callr_function = NULL)$edges
expect_equal(out$to[out$from == "data_file"], "data")
expect_equal(out$to[out$from == "get_nrows"], "data")
expect_equal(out$to[out$from == "write_file"], "data_file")
})
targets::tar_test("tar_file_read() pipeline", {
targets::tar_script({
write_file <- function() {
write.csv(mtcars, "mtcars.csv")
"mtcars.csv"
}
get_nrows <- function() {
2L
}
tar_file_read(
data,
write_file(),
utils::read.csv(file = !!.x, nrows = get_nrows())
)
})
targets::tar_make(callr_function = NULL)$edges
expect_equal(nrow(targets::tar_read(data)), 2L)
})
|
island.pars <- list(isl.r=5, isl.c=5, E.mean=20, E.sd=0, prod.mean=20, prod.sd=0, incComp=NA, incComp.time=NA)
pop.pars <- list(N.init=40, w.mean=1, w.CV=0.04)
move.pars <- list(m=0.5, move.fn='random')
pred.pars <- list(p=0.5, prP=0.2, predOpt=10, predEnd=300, pred.fn='none')
feed.pars <- list(st=0.4314286, feed.fn='antilogit')
repro.pars <- list(f=1, babybump=1, repro.fn='log')
nsims <- 50
MeanW.init <- 0.9935847
MeanW.pF <- 2.535865
VarW.pF <- NA
MeanW.pR <- 2.585075
SumW.init <- 39.74339
SumW.pF <- 500.3323
SumW.pR <- 1151.401
N.init <- 40
N.pF <- 199.12
N.pR <- 448.26
| /SimOutput/antilogit/st_ParSet_6/parameters.R | no_license | Sz-Tim/IslandRule | R | false | false | 580 | r | island.pars <- list(isl.r=5, isl.c=5, E.mean=20, E.sd=0, prod.mean=20, prod.sd=0, incComp=NA, incComp.time=NA)
pop.pars <- list(N.init=40, w.mean=1, w.CV=0.04)
move.pars <- list(m=0.5, move.fn='random')
pred.pars <- list(p=0.5, prP=0.2, predOpt=10, predEnd=300, pred.fn='none')
feed.pars <- list(st=0.4314286, feed.fn='antilogit')
repro.pars <- list(f=1, babybump=1, repro.fn='log')
nsims <- 50
MeanW.init <- 0.9935847
MeanW.pF <- 2.535865
VarW.pF <- NA
MeanW.pR <- 2.585075
SumW.init <- 39.74339
SumW.pF <- 500.3323
SumW.pR <- 1151.401
N.init <- 40
N.pF <- 199.12
N.pR <- 448.26
|
#' Takes a square matrix and returns a vector of unique elements
#'
#' @param myMatrix A square matrix
#'
#' @return vector of unique elements
#'
#' @examples vech(df, 2018, 08)
#'
#' @export
# ------------------------------------------------------------------------------
vech <- function(myMatrix) {
# -------------------------
# Set up some checks - ex., square vector etc
# -------------------------
if (class(myMatrix)!="matrix") {
stop("input needs to be a square matrix")
}
if (ncol(myMatrix)!=nrow(myMatrix)) {
stop("input needs to be a square matrix")
}
if (is.null(rownames(myMatrix))) {
rownames(myMatrix) <- paste0("V", 1:nrow(myMatrix))
}
if (is.null(colnames(myMatrix))) {
colnames(myMatrix) <- paste0("V", 1:ncol(myMatrix))
}
# generic counter
ii = 0
# save number of rows in A
p = nrow(myMatrix)
# number of unique elements
pstar = p*(p+1)/2
# empty vector
Va = matrix(nrow = pstar, ncol = 1)
rownames(Va) <- 1:pstar
# looping rows and columns of matrix to fill vector
for (cc in 1:p) {
for (rr in cc:p) {
ii = ii+1
Va[ii] <- myMatrix[rr, cc]
rownames(Va)[ii] <- paste0(rownames(myMatrix)[rr], "...", colnames(myMatrix)[[cc]])
}
}
return(Va)
}
| /R/vech.R | no_license | mlgiordano1/MIIVmsem | R | false | false | 1,260 | r | #' Takes a square matrix and returns a vector of unique elements
#'
#' @param myMatrix A square matrix
#'
#' @return vector of unique elements
#'
#' @examples vech(df, 2018, 08)
#'
#' @export
# ------------------------------------------------------------------------------
vech <- function(myMatrix) {
# -------------------------
# Set up some checks - ex., square vector etc
# -------------------------
if (class(myMatrix)!="matrix") {
stop("input needs to be a square matrix")
}
if (ncol(myMatrix)!=nrow(myMatrix)) {
stop("input needs to be a square matrix")
}
if (is.null(rownames(myMatrix))) {
rownames(myMatrix) <- paste0("V", 1:nrow(myMatrix))
}
if (is.null(colnames(myMatrix))) {
colnames(myMatrix) <- paste0("V", 1:ncol(myMatrix))
}
# generic counter
ii = 0
# save number of rows in A
p = nrow(myMatrix)
# number of unique elements
pstar = p*(p+1)/2
# empty vector
Va = matrix(nrow = pstar, ncol = 1)
rownames(Va) <- 1:pstar
# looping rows and columns of matrix to fill vector
for (cc in 1:p) {
for (rr in cc:p) {
ii = ii+1
Va[ii] <- myMatrix[rr, cc]
rownames(Va)[ii] <- paste0(rownames(myMatrix)[rr], "...", colnames(myMatrix)[[cc]])
}
}
return(Va)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.rds_operations.R
\name{modify_db_cluster_snapshot_attribute}
\alias{modify_db_cluster_snapshot_attribute}
\title{Adds an attribute and values to, or removes an attribute and values from, a manual DB cluster snapshot}
\usage{
modify_db_cluster_snapshot_attribute(DBClusterSnapshotIdentifier,
AttributeName, ValuesToAdd = NULL, ValuesToRemove = NULL)
}
\arguments{
\item{DBClusterSnapshotIdentifier}{[required] The identifier for the DB cluster snapshot to modify the attributes for.}
\item{AttributeName}{[required] The name of the DB cluster snapshot attribute to modify.
To manage authorization for other AWS accounts to copy or restore a manual DB cluster snapshot, set this value to \code{restore}.}
\item{ValuesToAdd}{A list of DB cluster snapshot attributes to add to the attribute specified by \code{AttributeName}.
To authorize other AWS accounts to copy or restore a manual DB cluster snapshot, set this list to include one or more AWS account IDs, or \code{all} to make the manual DB cluster snapshot restorable by any AWS account. Do not add the \code{all} value for any manual DB cluster snapshots that contain private information that you don't want available to all AWS accounts.}
\item{ValuesToRemove}{A list of DB cluster snapshot attributes to remove from the attribute specified by \code{AttributeName}.
To remove authorization for other AWS accounts to copy or restore a manual DB cluster snapshot, set this list to include one or more AWS account identifiers, or \code{all} to remove authorization for any AWS account to copy or restore the DB cluster snapshot. If you specify \code{all}, an AWS account whose account ID is explicitly added to the \code{restore} attribute can still copy or restore a manual DB cluster snapshot.}
}
\description{
Adds an attribute and values to, or removes an attribute and values from, a manual DB cluster snapshot.
}
\details{
To share a manual DB cluster snapshot with other AWS accounts, specify \code{restore} as the \code{AttributeName} and use the \code{ValuesToAdd} parameter to add a list of IDs of the AWS accounts that are authorized to restore the manual DB cluster snapshot. Use the value \code{all} to make the manual DB cluster snapshot public, which means that it can be copied or restored by all AWS accounts. Do not add the \code{all} value for any manual DB cluster snapshots that contain private information that you don't want available to all AWS accounts. If a manual DB cluster snapshot is encrypted, it can be shared, but only by specifying a list of authorized AWS account IDs for the \code{ValuesToAdd} parameter. You can't use \code{all} as a value for that parameter in this case.
To view which AWS accounts have access to copy or restore a manual DB cluster snapshot, or whether a manual DB cluster snapshot public or private, use the DescribeDBClusterSnapshotAttributes API action.
}
\section{Accepted Parameters}{
\preformatted{modify_db_cluster_snapshot_attribute(
DBClusterSnapshotIdentifier = "string",
AttributeName = "string",
ValuesToAdd = list(
"string"
),
ValuesToRemove = list(
"string"
)
)
}
}
| /service/paws.rds/man/modify_db_cluster_snapshot_attribute.Rd | permissive | CR-Mercado/paws | R | false | true | 3,201 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.rds_operations.R
\name{modify_db_cluster_snapshot_attribute}
\alias{modify_db_cluster_snapshot_attribute}
\title{Adds an attribute and values to, or removes an attribute and values from, a manual DB cluster snapshot}
\usage{
modify_db_cluster_snapshot_attribute(DBClusterSnapshotIdentifier,
AttributeName, ValuesToAdd = NULL, ValuesToRemove = NULL)
}
\arguments{
\item{DBClusterSnapshotIdentifier}{[required] The identifier for the DB cluster snapshot to modify the attributes for.}
\item{AttributeName}{[required] The name of the DB cluster snapshot attribute to modify.
To manage authorization for other AWS accounts to copy or restore a manual DB cluster snapshot, set this value to \code{restore}.}
\item{ValuesToAdd}{A list of DB cluster snapshot attributes to add to the attribute specified by \code{AttributeName}.
To authorize other AWS accounts to copy or restore a manual DB cluster snapshot, set this list to include one or more AWS account IDs, or \code{all} to make the manual DB cluster snapshot restorable by any AWS account. Do not add the \code{all} value for any manual DB cluster snapshots that contain private information that you don't want available to all AWS accounts.}
\item{ValuesToRemove}{A list of DB cluster snapshot attributes to remove from the attribute specified by \code{AttributeName}.
To remove authorization for other AWS accounts to copy or restore a manual DB cluster snapshot, set this list to include one or more AWS account identifiers, or \code{all} to remove authorization for any AWS account to copy or restore the DB cluster snapshot. If you specify \code{all}, an AWS account whose account ID is explicitly added to the \code{restore} attribute can still copy or restore a manual DB cluster snapshot.}
}
\description{
Adds an attribute and values to, or removes an attribute and values from, a manual DB cluster snapshot.
}
\details{
To share a manual DB cluster snapshot with other AWS accounts, specify \code{restore} as the \code{AttributeName} and use the \code{ValuesToAdd} parameter to add a list of IDs of the AWS accounts that are authorized to restore the manual DB cluster snapshot. Use the value \code{all} to make the manual DB cluster snapshot public, which means that it can be copied or restored by all AWS accounts. Do not add the \code{all} value for any manual DB cluster snapshots that contain private information that you don't want available to all AWS accounts. If a manual DB cluster snapshot is encrypted, it can be shared, but only by specifying a list of authorized AWS account IDs for the \code{ValuesToAdd} parameter. You can't use \code{all} as a value for that parameter in this case.
To view which AWS accounts have access to copy or restore a manual DB cluster snapshot, or whether a manual DB cluster snapshot public or private, use the DescribeDBClusterSnapshotAttributes API action.
}
\section{Accepted Parameters}{
\preformatted{modify_db_cluster_snapshot_attribute(
DBClusterSnapshotIdentifier = "string",
AttributeName = "string",
ValuesToAdd = list(
"string"
),
ValuesToRemove = list(
"string"
)
)
}
}
|
# ------------- Initialize variables we are interested in for modeling -------------
buildHospModel <- function(train, planVars, behaviorVars, controlVars, nonHospWt, hospWt) {
target <- 'IPDIS15'
weights <- 'w'
vars <- c(target, planVars, behaviorVars, controlVars, weights)
predVars <- c(planVars, behaviorVars, controlVars)
ordered <- c('PLANMETL','ADGENH42', 'age.cat', behaviors)
factors <- c('IPDIS15', 'HOSPINSX', 'HSAACCT','COBRA', 'PREGNT53')
#Coerce to fewer factors
train$ANNDEDCT <- as.numeric(train$ANNDEDCT)
for(variable in c(planVars, behaviorVars)){
train[,variable] <- as.numeric(train[,variable])
train[train[,variable] < 0, variable] <- 0
}
for(factor in factors){
train[,factor] <- as.factor(train[, factor])
}
for(factor in ordered){
train[,factor] <- as.ordered(train[, factor])
}
train$w[train$w_copy < 1] <- nonHospWt
train$w[train$w_copy > 1] <- hospWt
#ds <- downSample(train, train[,target], list = FALSE)
f <- formula(paste(target, paste(predVars, collapse = '+' ), sep = '~'))
fit <- ranger(formula = f,
data = train,
case.weights = train$w,
num.trees = 2500,
importance = 'impurity',
min.node.size = 150,
probability = TRUE,
classification = TRUE,
sample.fraction = .7)
return(fit)
}
| /r-shiny/template/data/shiny_ranger.R | no_license | Anderson-Lab/capstone-spring-2018-team-2 | R | false | false | 1,426 | r |
# ------------- Initialize variables we are interested in for modeling -------------
buildHospModel <- function(train, planVars, behaviorVars, controlVars, nonHospWt, hospWt) {
target <- 'IPDIS15'
weights <- 'w'
vars <- c(target, planVars, behaviorVars, controlVars, weights)
predVars <- c(planVars, behaviorVars, controlVars)
ordered <- c('PLANMETL','ADGENH42', 'age.cat', behaviors)
factors <- c('IPDIS15', 'HOSPINSX', 'HSAACCT','COBRA', 'PREGNT53')
#Coerce to fewer factors
train$ANNDEDCT <- as.numeric(train$ANNDEDCT)
for(variable in c(planVars, behaviorVars)){
train[,variable] <- as.numeric(train[,variable])
train[train[,variable] < 0, variable] <- 0
}
for(factor in factors){
train[,factor] <- as.factor(train[, factor])
}
for(factor in ordered){
train[,factor] <- as.ordered(train[, factor])
}
train$w[train$w_copy < 1] <- nonHospWt
train$w[train$w_copy > 1] <- hospWt
#ds <- downSample(train, train[,target], list = FALSE)
f <- formula(paste(target, paste(predVars, collapse = '+' ), sep = '~'))
fit <- ranger(formula = f,
data = train,
case.weights = train$w,
num.trees = 2500,
importance = 'impurity',
min.node.size = 150,
probability = TRUE,
classification = TRUE,
sample.fraction = .7)
return(fit)
}
|
library("xlsx")
data = read.xlsx("input.xlsx", sheetIndex = 1)
print(data)
summary(data)
print(min(data$salary))
subdata = subset(data,salary > min(data$salary))
print(subdata)
#write.xlsx(subdata,"output.xlsx")
write.xlsx(subdata,"output.xlsx", row.names=FALSE) | /working with external file/process_xlsx.R | no_license | Rahulkala/R-workspace | R | false | false | 264 | r | library("xlsx")
data = read.xlsx("input.xlsx", sheetIndex = 1)
print(data)
summary(data)
print(min(data$salary))
subdata = subset(data,salary > min(data$salary))
print(subdata)
#write.xlsx(subdata,"output.xlsx")
write.xlsx(subdata,"output.xlsx", row.names=FALSE) |
### =========================================================================
### SummarizedExperiment0 objects
### -------------------------------------------------------------------------
###
### TODO: Once the "old" SummarizedExperiment class in GenomicRanges is gone
### (in BioC 2.4) the name will be available again, so it may be used to
### rename either the SummarizedExperiment0 or the RangedSummarizedExperiment
### class.
###
setClass("SummarizedExperiment0",
contains="Vector",
representation(
colData="DataFrame", # columns and their annotations
assays="Assays", # Data -- e.g., list of matricies
NAMES="characterORNULL",
elementMetadata="DataFrame"
),
prototype(
assays=Assays()
)
)
### Combine the new parallel slots with those of the parent class. Make sure
### to put the new parallel slots *first*.
setMethod("parallelSlotNames", "SummarizedExperiment0",
function(x) c("NAMES", callNextMethod())
)
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Validity.
###
.valid.SummarizedExperiment0.assays_current <- function(x)
{
if (!is(slot(x, "assays"), "Assays"))
return("'assays' is out-of-date; use updateObject()")
NULL
}
.valid.SummarizedExperiment0.assays_nrow <- function(x)
{
if (length(x@assays) == 0L)
return(NULL)
if (nrow(x@assays) != length(x))
return("'assays' nrow differs from 'mcols' nrow")
NULL
}
.valid.SummarizedExperiment0.assays_ncol <- function(x)
{
if (length(x@assays) == 0L)
return(NULL)
if (ncol(x@assays) != nrow(colData(x)))
return("'assays' ncol differs from 'colData' nrow")
NULL
}
.valid.SummarizedExperiment0.assays_dim <- function(x)
{
c(.valid.SummarizedExperiment0.assays_nrow(x),
.valid.SummarizedExperiment0.assays_ncol(x))
}
.valid.SummarizedExperiment0 <- function(x)
{
c(.valid.SummarizedExperiment0.assays_current(x),
.valid.SummarizedExperiment0.assays_dim(x))
}
setValidity2("SummarizedExperiment0", .valid.SummarizedExperiment0)
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Low-level constructor (not exported).
###
new_SummarizedExperiment0 <- function(assays, names, rowData, colData,
metadata)
{
if (!is(assays, "Assays"))
assays <- Assays(assays)
if (is.null(rowData)) {
if (is.null(names))
nrow <- nrow(assays)
else
nrow <- length(names)
rowData <- new("DataFrame", nrows=nrow)
}
new("SummarizedExperiment0", NAMES=names,
elementMetadata=rowData,
colData=colData,
assays=assays,
metadata=as.list(metadata))
}
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Getters and setters.
###
setMethod("length", "SummarizedExperiment0",
function(x) nrow(x@elementMetadata)
)
setMethod("names", "SummarizedExperiment0", function(x) x@NAMES)
setReplaceMethod("names", "SummarizedExperiment0",
function(x, value)
{
NAMES <- S4Vectors:::normalize_names_replacement_value(value, x)
BiocGenerics:::replaceSlots(x, NAMES=NAMES, check=FALSE)
}
)
### We define an exptData() getter and setter for backward compatibility with
### "classic" SummarizedExperiment objects.
setMethod("exptData", "SummarizedExperiment0",
function(x, ...)
{
.Deprecated("metadata")
SimpleList(metadata(x, ...))
})
setReplaceMethod("exptData", "SummarizedExperiment0",
function(x, ..., value)
{
.Deprecated("metadata<-")
`metadata<-`(x, ..., value=value)
})
setMethod(colData, "SummarizedExperiment0", function(x, ...) x@colData)
setReplaceMethod("colData", c("SummarizedExperiment0", "DataFrame"),
function(x, ..., value)
{
if (nrow(value) != ncol(x))
stop("nrow of supplied 'colData' must equal ncol of object")
BiocGenerics:::replaceSlots(x, colData=value, check=FALSE)
})
setMethod(assays, "SummarizedExperiment0",
function(x, ..., withDimnames=TRUE)
{
assays <- as(x@assays, "SimpleList")
if (withDimnames)
endoapply(assays, "dimnames<-", dimnames(x))
else
assays
})
.SummarizedExperiment.assays.replace <-
function(x, ..., withDimnames=TRUE, value)
{
## withDimnames arg allows names(assays(se, withDimnames=FALSE)) <- value
ok <- vapply(value, function(elt, xdimnames) {
e <- dimnames(elt)
(is.null(e[[1]]) || identical(e[[1]], xdimnames[[1]])) &&
(is.null(e[[2]]) || identical(e[[2]], xdimnames[[2]]))
}, logical(1), xdimnames=dimnames(x))
if (!all(ok))
stop("current and replacement dimnames() differ")
x <- BiocGenerics:::replaceSlots(x, assays=Assays(value), check=FALSE)
## validObject(x) should be called below because it would then fully
## re-validate objects that derive from SummarizedExperiment0 (e.g.
## DESeqDataSet objects) after the user sets the assays slot with
## assays(x) <- value. For example the assays slot of a DESeqDataSet
## object must contain a matrix named 'counts' and calling validObject(x)
## would check that but .valid.SummarizedExperiment0(x) doesn't.
## The FourC() constructor function defined in the FourCSeq package
## actually takes advantage of the incomplete validation below to
## purposedly return invalid FourC objects!
msg <- .valid.SummarizedExperiment0(x)
if (!is.null(msg))
stop(msg)
x
}
setReplaceMethod("assays", c("SummarizedExperiment0", "SimpleList"),
.SummarizedExperiment.assays.replace)
setReplaceMethod("assays", c("SummarizedExperiment0", "list"),
.SummarizedExperiment.assays.replace)
## convenience for common use case
setMethod(assay, c("SummarizedExperiment0", "missing"),
function(x, i, ...)
{
assays <- assays(x, ...)
if (0L == length(assays))
stop("'assay(<", class(x), ">, i=\"missing\", ...) ",
"length(assays(<", class(x), ">)) is 0'")
assays[[1]]
})
setMethod(assay, c("SummarizedExperiment0", "numeric"),
function(x, i, ...)
{
tryCatch({
assays(x, ...)[[i]]
}, error=function(err) {
stop("'assay(<", class(x), ">, i=\"numeric\", ...)' ",
"invalid subscript 'i'\n", conditionMessage(err))
})
})
setMethod(assay, c("SummarizedExperiment0", "character"),
function(x, i, ...)
{
msg <- paste0("'assay(<", class(x), ">, i=\"character\", ...)' ",
"invalid subscript 'i'")
res <- tryCatch({
assays(x, ...)[[i]]
}, error=function(err) {
stop(msg, "\n", conditionMessage(err))
})
if (is.null(res))
stop(msg, "\n'i' not in names(assays(<", class(x), ">))")
res
})
setReplaceMethod("assay", c("SummarizedExperiment0", "missing", "matrix"),
function(x, i, ..., value)
{
if (0L == length(assays(x)))
stop("'assay(<", class(x), ">) <- value' ", "length(assays(<",
class(x), ">)) is 0")
assays(x)[[1]] <- value
x
})
setReplaceMethod("assay",
c("SummarizedExperiment0", "numeric", "matrix"),
function(x, i = 1, ..., value)
{
assays(x, ...)[[i]] <- value
x
})
setReplaceMethod("assay",
c("SummarizedExperiment0", "character", "matrix"),
function(x, i, ..., value)
{
assays(x, ...)[[i]] <- value
x
})
setMethod("assayNames", "SummarizedExperiment0",
function(x, ...)
{
names(assays(x, withDimnames=FALSE))
})
setReplaceMethod("assayNames", c("SummarizedExperiment0", "character"),
function(x, ..., value)
{
names(assays(x, withDimnames=FALSE)) <- value
x
})
## cannonical location for dim, dimnames; dimnames should be checked
## for consistency (if non-null) and stripped from assays on
## construction, or added from assays if row/col names are NULL in
## <SummarizedExperiment0> but not assays. dimnames need to be added on
## to assays when assays() invoked
setMethod(dim, "SummarizedExperiment0",
function(x)
{
c(length(x), nrow(colData(x)))
})
setMethod(dimnames, "SummarizedExperiment0",
function(x)
{
list(names(x), rownames(colData(x)))
})
setReplaceMethod("dimnames", c("SummarizedExperiment0", "list"),
function(x, value)
{
NAMES <- S4Vectors:::normalize_names_replacement_value(value[[1]], x)
colData <- colData(x)
rownames(colData) <- value[[2]]
BiocGenerics:::replaceSlots(x, NAMES=NAMES, colData=colData, check=FALSE)
})
setReplaceMethod("dimnames", c("SummarizedExperiment0", "NULL"),
function(x, value)
{
dimnames(x) <- list(NULL, NULL)
x
})
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Subsetting.
###
.SummarizedExperiment.charbound <-
function(idx, txt, fmt)
{
orig <- idx
idx <- match(idx, txt)
if (any(bad <- is.na(idx))) {
msg <- paste(S4Vectors:::selectSome(orig[bad]), collapse=" ")
stop(sprintf(fmt, msg))
}
idx
}
setMethod("[", c("SummarizedExperiment0", "ANY", "ANY"),
function(x, i, j, ..., drop=TRUE)
{
if (1L != length(drop) || (!missing(drop) && drop))
warning("'drop' ignored '[,", class(x), ",ANY,ANY-method'")
if (missing(i) && missing(j))
return(x)
if (!missing(i)) {
if (is.character(i)) {
fmt <- paste0("<", class(x), ">[i,] index out of bounds: %s")
i <- .SummarizedExperiment.charbound(i, rownames(x), fmt)
}
ii <- as.vector(i)
ans_elementMetadata <- x@elementMetadata[i, , drop=FALSE]
if (is(x, "RangedSummarizedExperiment")) {
ans_rowRanges <- x@rowRanges[i]
} else {
ans_NAMES <- x@NAMES[ii]
}
}
if (!missing(j)) {
if (is.character(j)) {
fmt <- paste0("<", class(x), ">[,j] index out of bounds: %s")
j <- .SummarizedExperiment.charbound(j, colnames(x), fmt)
}
ans_colData <- x@colData[j, , drop=FALSE]
jj <- as.vector(j)
}
if (missing(i)) {
ans_assays <- x@assays[ , jj]
ans <- BiocGenerics:::replaceSlots(x, ...,
colData=ans_colData,
assays=ans_assays,
check=FALSE)
} else if (missing(j)) {
ans_assays <- x@assays[ii, ]
if (is(x, "RangedSummarizedExperiment")) {
ans <- BiocGenerics:::replaceSlots(x, ...,
elementMetadata=ans_elementMetadata,
rowRanges=ans_rowRanges,
assays=ans_assays,
check=FALSE)
} else {
ans <- BiocGenerics:::replaceSlots(x, ...,
elementMetadata=ans_elementMetadata,
NAMES=ans_NAMES,
assays=ans_assays,
check=FALSE)
}
} else {
ans_assays <- x@assays[ii, jj]
if (is(x, "RangedSummarizedExperiment")) {
ans <- BiocGenerics:::replaceSlots(x, ...,
elementMetadata=ans_elementMetadata,
rowRanges=ans_rowRanges,
colData=ans_colData,
assays=ans_assays,
check=FALSE)
} else {
ans <- BiocGenerics:::replaceSlots(x, ...,
elementMetadata=ans_elementMetadata,
NAMES=ans_NAMES,
colData=ans_colData,
assays=ans_assays,
check=FALSE)
}
}
ans
})
setReplaceMethod("[",
c("SummarizedExperiment0", "ANY", "ANY", "SummarizedExperiment0"),
function(x, i, j, ..., value)
{
if (missing(i) && missing(j))
return(value)
ans_metadata <- c(metadata(x), metadata(value))
if (!missing(i)) {
if (is.character(i)) {
fmt <- paste0("<", class(x), ">[i,] index out of bounds: %s")
i <- .SummarizedExperiment.charbound(i, rownames(x), fmt)
}
ii <- as.vector(i)
ans_elementMetadata <- local({
emd <- x@elementMetadata
emd[i,] <- value@elementMetadata
emd
})
if (is(x, "RangedSummarizedExperiment")) {
ans_rowRanges <- local({
r <- x@rowRanges
r[i] <- value@rowRanges
names(r)[ii] <- names(value@rowRanges)
r
})
} else {
ans_NAMES <- local({
nms <- x@NAMES
nms[ii] <- value@NAMES
nms
})
}
}
if (!missing(j)) {
if (is.character(j)) {
fmt <- paste0("<", class(x), ">[,j] index out of bounds: %s")
j <- .SummarizedExperiment.charbound(j, colnames(x), fmt)
}
jj <- as.vector(j)
ans_colData <- local({
c <- x@colData
c[j,] <- value@colData
rownames(c)[jj] <- rownames(value@colData)
c
})
}
if (missing(i)) {
ans_assays <- local({
a <- x@assays
a[ , jj] <- value@assays
a
})
ans <- BiocGenerics:::replaceSlots(x, ...,
metadata=ans_metadata,
colData=ans_colData,
assays=ans_assays,
check=FALSE)
msg <- .valid.SummarizedExperiment0.assays_ncol(ans)
} else if (missing(j)) {
ans_assays <- local({
a <- x@assays
a[ii, ] <- value@assays
a
})
if (is(x, "RangedSummarizedExperiment")) {
ans <- BiocGenerics:::replaceSlots(x, ...,
metadata=ans_metadata,
elementMetadata=ans_elementMetadata,
rowRanges=ans_rowRanges,
assays=ans_assays,
check=FALSE)
} else {
ans <- BiocGenerics:::replaceSlots(x, ...,
metadata=ans_metadata,
elementMetadata=ans_elementMetadata,
NAMES=ans_NAMES,
assays=ans_assays,
check=FALSE)
}
msg <- .valid.SummarizedExperiment0.assays_nrow(ans)
} else {
ans_assays <- local({
a <- x@assays
a[ii, jj] <- value@assays
a
})
if (is(x, "RangedSummarizedExperiment")) {
ans <- BiocGenerics:::replaceSlots(x, ...,
metadata=ans_metadata,
elementMetadata=ans_elementMetadata,
rowRanges=ans_rowRanges,
colData=ans_colData,
assays=ans_assays,
check=FALSE)
} else {
ans <- BiocGenerics:::replaceSlots(x, ...,
metadata=ans_metadata,
elementMetadata=ans_elementMetadata,
NAMES=ans_NAMES,
colData=ans_colData,
assays=ans_assays,
check=FALSE)
}
msg <- .valid.SummarizedExperiment0.assays_dim(ans)
}
if (!is.null(msg))
stop(msg)
ans
})
setMethod("extractROWS", "SummarizedExperiment0",
function(x, i)
{
i <- normalizeSingleBracketSubscript(i, x)
x[i, ]
}
)
setMethod("replaceROWS", "SummarizedExperiment0",
function(x, i, value)
{
i <- normalizeSingleBracketSubscript(i, x)
x[i, ] <- value
x
}
)
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Quick colData access.
###
setMethod("[[", c("SummarizedExperiment0", "ANY", "missing"),
function(x, i, j, ...)
{
colData(x)[[i, ...]]
})
setReplaceMethod("[[", c("SummarizedExperiment0", "ANY", "missing"),
function(x, i, j, ..., value)
{
colData(x)[[i, ...]] <- value
x
})
.DollarNames.SummarizedExperiment0 <- function(x, pattern)
grep(pattern, names(colData(x)), value=TRUE)
setMethod("$", "SummarizedExperiment0",
function(x, name)
{
colData(x)[[name]]
})
setReplaceMethod("$", "SummarizedExperiment0",
function(x, name, value)
{
colData(x)[[name]] <- value
x
})
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Display.
###
setMethod("show", "SummarizedExperiment0",
function(object)
{
selectSome <- S4Vectors:::selectSome
scat <- function(fmt, vals=character(), exdent=2, ...)
{
vals <- ifelse(nzchar(vals), vals, "''")
lbls <- paste(S4Vectors:::selectSome(vals), collapse=" ")
txt <- sprintf(fmt, length(vals), lbls)
cat(strwrap(txt, exdent=exdent, ...), sep="\n")
}
cat("class:", class(object), "\n")
cat("dim:", dim(object), "\n")
## metadata()
expt <- names(metadata(object))
if (is.null(expt))
expt <- character(length(metadata(object)))
scat("metadata(%d): %s\n", expt)
## assays()
nms <- assayNames(object)
if (is.null(nms))
nms <- character(length(assays(object, withDimnames=FALSE)))
scat("assays(%d): %s\n", nms)
## rownames()
dimnames <- dimnames(object)
dlen <- sapply(dimnames, length)
if (dlen[[1]]) scat("rownames(%d): %s\n", dimnames[[1]])
else scat("rownames: NULL\n")
## mcols()
mcolnames <- names(mcols(object))
fmt <- "metadata column names(%d): %s\n"
if (is(object, "RangedSummarizedExperiment"))
fmt <- paste("rowRanges", fmt)
scat(fmt, mcolnames)
## colnames()
if (dlen[[2]]) scat("colnames(%d): %s\n", dimnames[[2]])
else cat("colnames: NULL\n")
## colData()
scat("colData names(%d): %s\n", names(colData(object)))
})
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Combine.
###
### Appropriate for objects with different ranges and same samples.
setMethod("rbind", "SummarizedExperiment0",
function(..., deparse.level=1)
{
args <- unname(list(...))
.rbind.SummarizedExperiment(args)
})
.rbind.SummarizedExperiment <- function(args)
{
if (!.compare(lapply(args, colnames)))
stop("'...' objects must have the same colnames")
if (!.compare(lapply(args, ncol)))
stop("'...' objects must have the same number of samples")
if (is(args[[1L]], "RangedSummarizedExperiment")) {
rowRanges <- do.call(c, lapply(args, rowRanges))
} else {
## Code below taken from combine_GAlignments_objects() from the
## GenomicAlignments package.
## Combine "NAMES" slots.
NAMES_slots <- lapply(args, function(x) x@NAMES)
## TODO: Use elementIsNull() here when it becomes available.
has_no_names <- sapply(NAMES_slots, is.null, USE.NAMES=FALSE)
if (all(has_no_names)) {
NAMES <- NULL
} else {
noname_idx <- which(has_no_names)
if (length(noname_idx) != 0L)
NAMES_slots[noname_idx] <-
lapply(elementLengths(args[noname_idx]), character)
NAMES <- unlist(NAMES_slots, use.names=FALSE)
}
}
colData <- .cbind.DataFrame(args, colData, "colData")
assays <- do.call(rbind, lapply(args, slot, "assays"))
elementMetadata <- do.call(rbind, lapply(args, slot, "elementMetadata"))
metadata <- do.call(c, lapply(args, metadata))
if (is(args[[1L]], "RangedSummarizedExperiment")) {
BiocGenerics:::replaceSlots(args[[1L]],
rowRanges=rowRanges, colData=colData, assays=assays,
elementMetadata=elementMetadata, metadata=metadata)
} else {
BiocGenerics:::replaceSlots(args[[1L]],
NAMES=NAMES, colData=colData, assays=assays,
elementMetadata=elementMetadata, metadata=metadata)
}
}
### Appropriate for objects with same ranges and different samples.
setMethod("cbind", "SummarizedExperiment0",
function(..., deparse.level=1)
{
args <- unname(list(...))
.cbind.SummarizedExperiment(args)
})
.cbind.SummarizedExperiment <- function(args)
{
if (is(args[[1L]], "RangedSummarizedExperiment")) {
if (!.compare(lapply(args, rowRanges), TRUE))
stop("'...' object ranges (rows) are not compatible")
rowRanges <- rowRanges(args[[1L]])
mcols(rowRanges) <- .cbind.DataFrame(args, mcols, "mcols")
} else {
elementMetadata <- .cbind.DataFrame(args, mcols, "mcols")
}
colData <- do.call(rbind, lapply(args, colData))
assays <- do.call(cbind, lapply(args, slot, "assays"))
metadata <- do.call(c, lapply(args, metadata))
if (is(args[[1L]], "RangedSummarizedExperiment")) {
BiocGenerics:::replaceSlots(args[[1L]],
rowRanges=rowRanges,
colData=colData, assays=assays, metadata=metadata)
} else {
BiocGenerics:::replaceSlots(args[[1L]],
elementMetadata=elementMetadata,
colData=colData, assays=assays, metadata=metadata)
}
}
.compare <- function(x, GenomicRanges=FALSE)
{
x1 <- x[[1]]
if (GenomicRanges) {
if (is(x1, "GRangesList")) {
x <- lapply(x, unlist)
x1 <- x[[1]]
}
for (i in seq_along(x)[-1]) {
if (length(x1) != length(x[[i]]))
return(FALSE)
ok <- x1 == x[[i]]
if (!all(ok))
return(FALSE)
}
return(TRUE)
} else {
all(sapply(x[-1],
function(xelt) all(identical(xelt, x[[1]]))))
}
}
.cbind.DataFrame <- function(args, accessor, accessorName)
{
lst <- lapply(args, accessor)
if (!.compare(lst)) {
nms <- lapply(lst, names)
nmsv <- unlist(nms, use.names=FALSE)
names(nmsv) <- rep(seq_along(nms), elementLengths(nms))
dups <- duplicated(nmsv)
## no duplicates
if (!any(dups))
return(do.call(cbind, lst))
## confirm duplicates are the same
lapply(nmsv[duplicated(nmsv)], function(d) {
if (!.compare(lapply(lst, "[", d)))
stop("column(s) '", unname(d),
"' in ", sQuote(accessorName),
" are duplicated and the data do not match")})
## remove duplicates
do.call(cbind, lst)[,!dups]
} else {
lst[[1]]
}
}
| /R/SummarizedExperiment0-class.R | no_license | amustaf5/SummarizedExperiment | R | false | false | 22,498 | r | ### =========================================================================
### SummarizedExperiment0 objects
### -------------------------------------------------------------------------
###
### TODO: Once the "old" SummarizedExperiment class in GenomicRanges is gone
### (in BioC 2.4) the name will be available again, so it may be used to
### rename either the SummarizedExperiment0 or the RangedSummarizedExperiment
### class.
###
setClass("SummarizedExperiment0",
contains="Vector",
representation(
colData="DataFrame", # columns and their annotations
assays="Assays", # Data -- e.g., list of matricies
NAMES="characterORNULL",
elementMetadata="DataFrame"
),
prototype(
assays=Assays()
)
)
### Combine the new parallel slots with those of the parent class. Make sure
### to put the new parallel slots *first*.
setMethod("parallelSlotNames", "SummarizedExperiment0",
function(x) c("NAMES", callNextMethod())
)
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Validity.
###
.valid.SummarizedExperiment0.assays_current <- function(x)
{
if (!is(slot(x, "assays"), "Assays"))
return("'assays' is out-of-date; use updateObject()")
NULL
}
.valid.SummarizedExperiment0.assays_nrow <- function(x)
{
if (length(x@assays) == 0L)
return(NULL)
if (nrow(x@assays) != length(x))
return("'assays' nrow differs from 'mcols' nrow")
NULL
}
.valid.SummarizedExperiment0.assays_ncol <- function(x)
{
if (length(x@assays) == 0L)
return(NULL)
if (ncol(x@assays) != nrow(colData(x)))
return("'assays' ncol differs from 'colData' nrow")
NULL
}
.valid.SummarizedExperiment0.assays_dim <- function(x)
{
c(.valid.SummarizedExperiment0.assays_nrow(x),
.valid.SummarizedExperiment0.assays_ncol(x))
}
.valid.SummarizedExperiment0 <- function(x)
{
c(.valid.SummarizedExperiment0.assays_current(x),
.valid.SummarizedExperiment0.assays_dim(x))
}
setValidity2("SummarizedExperiment0", .valid.SummarizedExperiment0)
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Low-level constructor (not exported).
###
new_SummarizedExperiment0 <- function(assays, names, rowData, colData,
metadata)
{
if (!is(assays, "Assays"))
assays <- Assays(assays)
if (is.null(rowData)) {
if (is.null(names))
nrow <- nrow(assays)
else
nrow <- length(names)
rowData <- new("DataFrame", nrows=nrow)
}
new("SummarizedExperiment0", NAMES=names,
elementMetadata=rowData,
colData=colData,
assays=assays,
metadata=as.list(metadata))
}
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Getters and setters.
###
setMethod("length", "SummarizedExperiment0",
function(x) nrow(x@elementMetadata)
)
setMethod("names", "SummarizedExperiment0", function(x) x@NAMES)
setReplaceMethod("names", "SummarizedExperiment0",
function(x, value)
{
NAMES <- S4Vectors:::normalize_names_replacement_value(value, x)
BiocGenerics:::replaceSlots(x, NAMES=NAMES, check=FALSE)
}
)
### We define an exptData() getter and setter for backward compatibility with
### "classic" SummarizedExperiment objects.
setMethod("exptData", "SummarizedExperiment0",
function(x, ...)
{
.Deprecated("metadata")
SimpleList(metadata(x, ...))
})
setReplaceMethod("exptData", "SummarizedExperiment0",
function(x, ..., value)
{
.Deprecated("metadata<-")
`metadata<-`(x, ..., value=value)
})
setMethod(colData, "SummarizedExperiment0", function(x, ...) x@colData)
setReplaceMethod("colData", c("SummarizedExperiment0", "DataFrame"),
function(x, ..., value)
{
if (nrow(value) != ncol(x))
stop("nrow of supplied 'colData' must equal ncol of object")
BiocGenerics:::replaceSlots(x, colData=value, check=FALSE)
})
setMethod(assays, "SummarizedExperiment0",
function(x, ..., withDimnames=TRUE)
{
assays <- as(x@assays, "SimpleList")
if (withDimnames)
endoapply(assays, "dimnames<-", dimnames(x))
else
assays
})
.SummarizedExperiment.assays.replace <-
function(x, ..., withDimnames=TRUE, value)
{
## withDimnames arg allows names(assays(se, withDimnames=FALSE)) <- value
ok <- vapply(value, function(elt, xdimnames) {
e <- dimnames(elt)
(is.null(e[[1]]) || identical(e[[1]], xdimnames[[1]])) &&
(is.null(e[[2]]) || identical(e[[2]], xdimnames[[2]]))
}, logical(1), xdimnames=dimnames(x))
if (!all(ok))
stop("current and replacement dimnames() differ")
x <- BiocGenerics:::replaceSlots(x, assays=Assays(value), check=FALSE)
## validObject(x) should be called below because it would then fully
## re-validate objects that derive from SummarizedExperiment0 (e.g.
## DESeqDataSet objects) after the user sets the assays slot with
## assays(x) <- value. For example the assays slot of a DESeqDataSet
## object must contain a matrix named 'counts' and calling validObject(x)
## would check that but .valid.SummarizedExperiment0(x) doesn't.
## The FourC() constructor function defined in the FourCSeq package
## actually takes advantage of the incomplete validation below to
## purposedly return invalid FourC objects!
msg <- .valid.SummarizedExperiment0(x)
if (!is.null(msg))
stop(msg)
x
}
setReplaceMethod("assays", c("SummarizedExperiment0", "SimpleList"),
.SummarizedExperiment.assays.replace)
setReplaceMethod("assays", c("SummarizedExperiment0", "list"),
.SummarizedExperiment.assays.replace)
## convenience for common use case
setMethod(assay, c("SummarizedExperiment0", "missing"),
function(x, i, ...)
{
assays <- assays(x, ...)
if (0L == length(assays))
stop("'assay(<", class(x), ">, i=\"missing\", ...) ",
"length(assays(<", class(x), ">)) is 0'")
assays[[1]]
})
setMethod(assay, c("SummarizedExperiment0", "numeric"),
function(x, i, ...)
{
tryCatch({
assays(x, ...)[[i]]
}, error=function(err) {
stop("'assay(<", class(x), ">, i=\"numeric\", ...)' ",
"invalid subscript 'i'\n", conditionMessage(err))
})
})
setMethod(assay, c("SummarizedExperiment0", "character"),
function(x, i, ...)
{
msg <- paste0("'assay(<", class(x), ">, i=\"character\", ...)' ",
"invalid subscript 'i'")
res <- tryCatch({
assays(x, ...)[[i]]
}, error=function(err) {
stop(msg, "\n", conditionMessage(err))
})
if (is.null(res))
stop(msg, "\n'i' not in names(assays(<", class(x), ">))")
res
})
setReplaceMethod("assay", c("SummarizedExperiment0", "missing", "matrix"),
function(x, i, ..., value)
{
if (0L == length(assays(x)))
stop("'assay(<", class(x), ">) <- value' ", "length(assays(<",
class(x), ">)) is 0")
assays(x)[[1]] <- value
x
})
setReplaceMethod("assay",
c("SummarizedExperiment0", "numeric", "matrix"),
function(x, i = 1, ..., value)
{
assays(x, ...)[[i]] <- value
x
})
setReplaceMethod("assay",
c("SummarizedExperiment0", "character", "matrix"),
function(x, i, ..., value)
{
assays(x, ...)[[i]] <- value
x
})
setMethod("assayNames", "SummarizedExperiment0",
function(x, ...)
{
names(assays(x, withDimnames=FALSE))
})
setReplaceMethod("assayNames", c("SummarizedExperiment0", "character"),
function(x, ..., value)
{
names(assays(x, withDimnames=FALSE)) <- value
x
})
## cannonical location for dim, dimnames; dimnames should be checked
## for consistency (if non-null) and stripped from assays on
## construction, or added from assays if row/col names are NULL in
## <SummarizedExperiment0> but not assays. dimnames need to be added on
## to assays when assays() invoked
setMethod(dim, "SummarizedExperiment0",
function(x)
{
c(length(x), nrow(colData(x)))
})
setMethod(dimnames, "SummarizedExperiment0",
function(x)
{
list(names(x), rownames(colData(x)))
})
setReplaceMethod("dimnames", c("SummarizedExperiment0", "list"),
function(x, value)
{
NAMES <- S4Vectors:::normalize_names_replacement_value(value[[1]], x)
colData <- colData(x)
rownames(colData) <- value[[2]]
BiocGenerics:::replaceSlots(x, NAMES=NAMES, colData=colData, check=FALSE)
})
setReplaceMethod("dimnames", c("SummarizedExperiment0", "NULL"),
function(x, value)
{
dimnames(x) <- list(NULL, NULL)
x
})
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Subsetting.
###
.SummarizedExperiment.charbound <-
function(idx, txt, fmt)
{
orig <- idx
idx <- match(idx, txt)
if (any(bad <- is.na(idx))) {
msg <- paste(S4Vectors:::selectSome(orig[bad]), collapse=" ")
stop(sprintf(fmt, msg))
}
idx
}
setMethod("[", c("SummarizedExperiment0", "ANY", "ANY"),
function(x, i, j, ..., drop=TRUE)
{
if (1L != length(drop) || (!missing(drop) && drop))
warning("'drop' ignored '[,", class(x), ",ANY,ANY-method'")
if (missing(i) && missing(j))
return(x)
if (!missing(i)) {
if (is.character(i)) {
fmt <- paste0("<", class(x), ">[i,] index out of bounds: %s")
i <- .SummarizedExperiment.charbound(i, rownames(x), fmt)
}
ii <- as.vector(i)
ans_elementMetadata <- x@elementMetadata[i, , drop=FALSE]
if (is(x, "RangedSummarizedExperiment")) {
ans_rowRanges <- x@rowRanges[i]
} else {
ans_NAMES <- x@NAMES[ii]
}
}
if (!missing(j)) {
if (is.character(j)) {
fmt <- paste0("<", class(x), ">[,j] index out of bounds: %s")
j <- .SummarizedExperiment.charbound(j, colnames(x), fmt)
}
ans_colData <- x@colData[j, , drop=FALSE]
jj <- as.vector(j)
}
if (missing(i)) {
ans_assays <- x@assays[ , jj]
ans <- BiocGenerics:::replaceSlots(x, ...,
colData=ans_colData,
assays=ans_assays,
check=FALSE)
} else if (missing(j)) {
ans_assays <- x@assays[ii, ]
if (is(x, "RangedSummarizedExperiment")) {
ans <- BiocGenerics:::replaceSlots(x, ...,
elementMetadata=ans_elementMetadata,
rowRanges=ans_rowRanges,
assays=ans_assays,
check=FALSE)
} else {
ans <- BiocGenerics:::replaceSlots(x, ...,
elementMetadata=ans_elementMetadata,
NAMES=ans_NAMES,
assays=ans_assays,
check=FALSE)
}
} else {
ans_assays <- x@assays[ii, jj]
if (is(x, "RangedSummarizedExperiment")) {
ans <- BiocGenerics:::replaceSlots(x, ...,
elementMetadata=ans_elementMetadata,
rowRanges=ans_rowRanges,
colData=ans_colData,
assays=ans_assays,
check=FALSE)
} else {
ans <- BiocGenerics:::replaceSlots(x, ...,
elementMetadata=ans_elementMetadata,
NAMES=ans_NAMES,
colData=ans_colData,
assays=ans_assays,
check=FALSE)
}
}
ans
})
setReplaceMethod("[",
c("SummarizedExperiment0", "ANY", "ANY", "SummarizedExperiment0"),
function(x, i, j, ..., value)
{
if (missing(i) && missing(j))
return(value)
ans_metadata <- c(metadata(x), metadata(value))
if (!missing(i)) {
if (is.character(i)) {
fmt <- paste0("<", class(x), ">[i,] index out of bounds: %s")
i <- .SummarizedExperiment.charbound(i, rownames(x), fmt)
}
ii <- as.vector(i)
ans_elementMetadata <- local({
emd <- x@elementMetadata
emd[i,] <- value@elementMetadata
emd
})
if (is(x, "RangedSummarizedExperiment")) {
ans_rowRanges <- local({
r <- x@rowRanges
r[i] <- value@rowRanges
names(r)[ii] <- names(value@rowRanges)
r
})
} else {
ans_NAMES <- local({
nms <- x@NAMES
nms[ii] <- value@NAMES
nms
})
}
}
if (!missing(j)) {
if (is.character(j)) {
fmt <- paste0("<", class(x), ">[,j] index out of bounds: %s")
j <- .SummarizedExperiment.charbound(j, colnames(x), fmt)
}
jj <- as.vector(j)
ans_colData <- local({
c <- x@colData
c[j,] <- value@colData
rownames(c)[jj] <- rownames(value@colData)
c
})
}
if (missing(i)) {
ans_assays <- local({
a <- x@assays
a[ , jj] <- value@assays
a
})
ans <- BiocGenerics:::replaceSlots(x, ...,
metadata=ans_metadata,
colData=ans_colData,
assays=ans_assays,
check=FALSE)
msg <- .valid.SummarizedExperiment0.assays_ncol(ans)
} else if (missing(j)) {
ans_assays <- local({
a <- x@assays
a[ii, ] <- value@assays
a
})
if (is(x, "RangedSummarizedExperiment")) {
ans <- BiocGenerics:::replaceSlots(x, ...,
metadata=ans_metadata,
elementMetadata=ans_elementMetadata,
rowRanges=ans_rowRanges,
assays=ans_assays,
check=FALSE)
} else {
ans <- BiocGenerics:::replaceSlots(x, ...,
metadata=ans_metadata,
elementMetadata=ans_elementMetadata,
NAMES=ans_NAMES,
assays=ans_assays,
check=FALSE)
}
msg <- .valid.SummarizedExperiment0.assays_nrow(ans)
} else {
ans_assays <- local({
a <- x@assays
a[ii, jj] <- value@assays
a
})
if (is(x, "RangedSummarizedExperiment")) {
ans <- BiocGenerics:::replaceSlots(x, ...,
metadata=ans_metadata,
elementMetadata=ans_elementMetadata,
rowRanges=ans_rowRanges,
colData=ans_colData,
assays=ans_assays,
check=FALSE)
} else {
ans <- BiocGenerics:::replaceSlots(x, ...,
metadata=ans_metadata,
elementMetadata=ans_elementMetadata,
NAMES=ans_NAMES,
colData=ans_colData,
assays=ans_assays,
check=FALSE)
}
msg <- .valid.SummarizedExperiment0.assays_dim(ans)
}
if (!is.null(msg))
stop(msg)
ans
})
setMethod("extractROWS", "SummarizedExperiment0",
function(x, i)
{
i <- normalizeSingleBracketSubscript(i, x)
x[i, ]
}
)
setMethod("replaceROWS", "SummarizedExperiment0",
function(x, i, value)
{
i <- normalizeSingleBracketSubscript(i, x)
x[i, ] <- value
x
}
)
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Quick colData access.
###
setMethod("[[", c("SummarizedExperiment0", "ANY", "missing"),
function(x, i, j, ...)
{
colData(x)[[i, ...]]
})
setReplaceMethod("[[", c("SummarizedExperiment0", "ANY", "missing"),
function(x, i, j, ..., value)
{
colData(x)[[i, ...]] <- value
x
})
.DollarNames.SummarizedExperiment0 <- function(x, pattern)
grep(pattern, names(colData(x)), value=TRUE)
setMethod("$", "SummarizedExperiment0",
function(x, name)
{
colData(x)[[name]]
})
setReplaceMethod("$", "SummarizedExperiment0",
function(x, name, value)
{
colData(x)[[name]] <- value
x
})
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Display.
###
setMethod("show", "SummarizedExperiment0",
function(object)
{
selectSome <- S4Vectors:::selectSome
scat <- function(fmt, vals=character(), exdent=2, ...)
{
vals <- ifelse(nzchar(vals), vals, "''")
lbls <- paste(S4Vectors:::selectSome(vals), collapse=" ")
txt <- sprintf(fmt, length(vals), lbls)
cat(strwrap(txt, exdent=exdent, ...), sep="\n")
}
cat("class:", class(object), "\n")
cat("dim:", dim(object), "\n")
## metadata()
expt <- names(metadata(object))
if (is.null(expt))
expt <- character(length(metadata(object)))
scat("metadata(%d): %s\n", expt)
## assays()
nms <- assayNames(object)
if (is.null(nms))
nms <- character(length(assays(object, withDimnames=FALSE)))
scat("assays(%d): %s\n", nms)
## rownames()
dimnames <- dimnames(object)
dlen <- sapply(dimnames, length)
if (dlen[[1]]) scat("rownames(%d): %s\n", dimnames[[1]])
else scat("rownames: NULL\n")
## mcols()
mcolnames <- names(mcols(object))
fmt <- "metadata column names(%d): %s\n"
if (is(object, "RangedSummarizedExperiment"))
fmt <- paste("rowRanges", fmt)
scat(fmt, mcolnames)
## colnames()
if (dlen[[2]]) scat("colnames(%d): %s\n", dimnames[[2]])
else cat("colnames: NULL\n")
## colData()
scat("colData names(%d): %s\n", names(colData(object)))
})
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Combine.
###
### Appropriate for objects with different ranges and same samples.
setMethod("rbind", "SummarizedExperiment0",
function(..., deparse.level=1)
{
args <- unname(list(...))
.rbind.SummarizedExperiment(args)
})
.rbind.SummarizedExperiment <- function(args)
{
if (!.compare(lapply(args, colnames)))
stop("'...' objects must have the same colnames")
if (!.compare(lapply(args, ncol)))
stop("'...' objects must have the same number of samples")
if (is(args[[1L]], "RangedSummarizedExperiment")) {
rowRanges <- do.call(c, lapply(args, rowRanges))
} else {
## Code below taken from combine_GAlignments_objects() from the
## GenomicAlignments package.
## Combine "NAMES" slots.
NAMES_slots <- lapply(args, function(x) x@NAMES)
## TODO: Use elementIsNull() here when it becomes available.
has_no_names <- sapply(NAMES_slots, is.null, USE.NAMES=FALSE)
if (all(has_no_names)) {
NAMES <- NULL
} else {
noname_idx <- which(has_no_names)
if (length(noname_idx) != 0L)
NAMES_slots[noname_idx] <-
lapply(elementLengths(args[noname_idx]), character)
NAMES <- unlist(NAMES_slots, use.names=FALSE)
}
}
colData <- .cbind.DataFrame(args, colData, "colData")
assays <- do.call(rbind, lapply(args, slot, "assays"))
elementMetadata <- do.call(rbind, lapply(args, slot, "elementMetadata"))
metadata <- do.call(c, lapply(args, metadata))
if (is(args[[1L]], "RangedSummarizedExperiment")) {
BiocGenerics:::replaceSlots(args[[1L]],
rowRanges=rowRanges, colData=colData, assays=assays,
elementMetadata=elementMetadata, metadata=metadata)
} else {
BiocGenerics:::replaceSlots(args[[1L]],
NAMES=NAMES, colData=colData, assays=assays,
elementMetadata=elementMetadata, metadata=metadata)
}
}
### Appropriate for objects with same ranges and different samples.
setMethod("cbind", "SummarizedExperiment0",
function(..., deparse.level=1)
{
args <- unname(list(...))
.cbind.SummarizedExperiment(args)
})
.cbind.SummarizedExperiment <- function(args)
{
if (is(args[[1L]], "RangedSummarizedExperiment")) {
if (!.compare(lapply(args, rowRanges), TRUE))
stop("'...' object ranges (rows) are not compatible")
rowRanges <- rowRanges(args[[1L]])
mcols(rowRanges) <- .cbind.DataFrame(args, mcols, "mcols")
} else {
elementMetadata <- .cbind.DataFrame(args, mcols, "mcols")
}
colData <- do.call(rbind, lapply(args, colData))
assays <- do.call(cbind, lapply(args, slot, "assays"))
metadata <- do.call(c, lapply(args, metadata))
if (is(args[[1L]], "RangedSummarizedExperiment")) {
BiocGenerics:::replaceSlots(args[[1L]],
rowRanges=rowRanges,
colData=colData, assays=assays, metadata=metadata)
} else {
BiocGenerics:::replaceSlots(args[[1L]],
elementMetadata=elementMetadata,
colData=colData, assays=assays, metadata=metadata)
}
}
.compare <- function(x, GenomicRanges=FALSE)
{
x1 <- x[[1]]
if (GenomicRanges) {
if (is(x1, "GRangesList")) {
x <- lapply(x, unlist)
x1 <- x[[1]]
}
for (i in seq_along(x)[-1]) {
if (length(x1) != length(x[[i]]))
return(FALSE)
ok <- x1 == x[[i]]
if (!all(ok))
return(FALSE)
}
return(TRUE)
} else {
all(sapply(x[-1],
function(xelt) all(identical(xelt, x[[1]]))))
}
}
.cbind.DataFrame <- function(args, accessor, accessorName)
{
lst <- lapply(args, accessor)
if (!.compare(lst)) {
nms <- lapply(lst, names)
nmsv <- unlist(nms, use.names=FALSE)
names(nmsv) <- rep(seq_along(nms), elementLengths(nms))
dups <- duplicated(nmsv)
## no duplicates
if (!any(dups))
return(do.call(cbind, lst))
## confirm duplicates are the same
lapply(nmsv[duplicated(nmsv)], function(d) {
if (!.compare(lapply(lst, "[", d)))
stop("column(s) '", unname(d),
"' in ", sQuote(accessorName),
" are duplicated and the data do not match")})
## remove duplicates
do.call(cbind, lst)[,!dups]
} else {
lst[[1]]
}
}
|
\docType{package}
\encoding{UTF-8}
\name{ExpressionView-package}
\alias{ExpressionView-package}
\title{The ExpressionView package}
\description{A package designed to interactively explore biclusters
identified in gene expression data.
}
\section{Introduction}{
Clustering genes according to their expression profiles is an important
task in analyzing microarray data. ExpressionView allows the user to
explore the biclusters together with the underlying data in an
interactive environment. The applet requires a browser with Adobe Flash
player 10 installed.
The ExpressionView package can treat gene expression data in the form of
a Bioconductor \code{\link[Biobase]{ExpressionSet}}. It recognizes
biclusters identified by the Iterative Signature Algorithm (ISA)
implemented in the \code{\link[eisa]{ISA}} package as well as the
methods available in the \code{\link[biclust]{biclust}} package.
}
\section{Worflow}{
The usual workflow consist of three steps:
\describe{
\item{Order}{
To arrange the possibly overlapping biclusters in a visually
appealing layout, the gene expression data has to be reordered in such
a way that individual biclusters from contiguous rectangles. The
optimal order is found by the \code{\link{OrderEV}} function.}
\item{Export}{In a second step, the biclusters and all the relevant
biological information are combined and exported to an ExpressionView
XML file. This is done by the \code{\link{ExportEV}} function.}
\item{Visualize}{
The Flash applet is started by the \code{\link{LaunchEV}}
command. Video tutorials describing the various features of the
applet can be found on the ExpressionView website
(\url{http://www.unil.ch/cbg/ExpressionView}).
}
}
A tutorial can be found in the accompanying vignette of the package.
}
\section{Biclustering with non-gene expression data}{
Both, the \code{\link[eisa]{ISA}} and the biclustering methods
implemented in the \code{\link[biclust]{biclust}} package can treat any
two-dimensional data, i.e., not necessarily originating from gene
expression profiling. While the ExpressionView package is optimized for
gene expression matrices, it is also possible to explore data stemming
from other sources. For more information, see the description of the
\code{\link{ExportEV}} function.
}
\author{Andreas \enc{Lüscher}{Luscher}
\email{andreas.luescher@a3.epfl.ch}}
\seealso{\code{\link{OrderEV}}, \code{\link{ExportEV}} and
\code{\link{LaunchEV}}}
\keyword{cluster} | /RExpressionView/man/ExpressionView-package.Rd | no_license | gaborcsardi/ISA | R | false | false | 2,558 | rd | \docType{package}
\encoding{UTF-8}
\name{ExpressionView-package}
\alias{ExpressionView-package}
\title{The ExpressionView package}
\description{A package designed to interactively explore biclusters
identified in gene expression data.
}
\section{Introduction}{
Clustering genes according to their expression profiles is an important
task in analyzing microarray data. ExpressionView allows the user to
explore the biclusters together with the underlying data in an
interactive environment. The applet requires a browser with Adobe Flash
player 10 installed.
The ExpressionView package can treat gene expression data in the form of
a Bioconductor \code{\link[Biobase]{ExpressionSet}}. It recognizes
biclusters identified by the Iterative Signature Algorithm (ISA)
implemented in the \code{\link[eisa]{ISA}} package as well as the
methods available in the \code{\link[biclust]{biclust}} package.
}
\section{Worflow}{
The usual workflow consist of three steps:
\describe{
\item{Order}{
To arrange the possibly overlapping biclusters in a visually
appealing layout, the gene expression data has to be reordered in such
a way that individual biclusters from contiguous rectangles. The
optimal order is found by the \code{\link{OrderEV}} function.}
\item{Export}{In a second step, the biclusters and all the relevant
biological information are combined and exported to an ExpressionView
XML file. This is done by the \code{\link{ExportEV}} function.}
\item{Visualize}{
The Flash applet is started by the \code{\link{LaunchEV}}
command. Video tutorials describing the various features of the
applet can be found on the ExpressionView website
(\url{http://www.unil.ch/cbg/ExpressionView}).
}
}
A tutorial can be found in the accompanying vignette of the package.
}
\section{Biclustering with non-gene expression data}{
Both, the \code{\link[eisa]{ISA}} and the biclustering methods
implemented in the \code{\link[biclust]{biclust}} package can treat any
two-dimensional data, i.e., not necessarily originating from gene
expression profiling. While the ExpressionView package is optimized for
gene expression matrices, it is also possible to explore data stemming
from other sources. For more information, see the description of the
\code{\link{ExportEV}} function.
}
\author{Andreas \enc{Lüscher}{Luscher}
\email{andreas.luescher@a3.epfl.ch}}
\seealso{\code{\link{OrderEV}}, \code{\link{ExportEV}} and
\code{\link{LaunchEV}}}
\keyword{cluster} |
library(glmnet)
mydata = read.table("../../../../TrainingSet/FullSet/Lasso/pleura.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.03,family="gaussian",standardize=TRUE)
sink('./pleura_013.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
| /Model/EN/Lasso/pleura/pleura_013.R | no_license | esbgkannan/QSMART | R | false | false | 344 | r | library(glmnet)
mydata = read.table("../../../../TrainingSet/FullSet/Lasso/pleura.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.03,family="gaussian",standardize=TRUE)
sink('./pleura_013.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
\name{Cuckoo}
\alias{Cuckoo}
\docType{data}
\title{Measurements of Cuckoo Eggs}
\description{
Lengths of cuckoo eggs laid in other birds' nests
}
\format{
A data frame with 120 observations on the following 2 variables.
\tabular{rl}{
\code{Bird} \tab {Type of bird nest: \code{mdw_pipit} (meadow pipit), \code{tree_pipit},}\cr
\tab {\code{hedge_sparrow}, \code{robin}, \code{wagtail}, or \code{wren}}\cr
\code{Length} \tab {Cuckoo egg length (in mm)}\cr
}
}
\details{
Cuckoos are knows to lay their eggs in the nests of other (host) birds.
The eggs are then adopted and hatched by the host birds.
The data give the lengths of cuckoo eggs found in nests of various other bird species.
}
\source{
Downloaded from DASL at http://lib.stat.cmu.edu/DASL/Datafiles/cuckoodat.html
}
\references{
"The Egg of Cuculus Canorus. An Enquiry into the Dimensions of the Cuckoo's Egg and the Relation of the
Variations to the Size of the Eggs of the Foster-Parent, with Notes on Coloration", by Oswald H. Latter, Biometrika,
Vol. 1, No. 2 (Jan., 1902), pp. 164-176.
}
\keyword{datasets}
| /man/Cuckoo.Rd | no_license | cran/Stat2Data | R | false | false | 1,120 | rd | \name{Cuckoo}
\alias{Cuckoo}
\docType{data}
\title{Measurements of Cuckoo Eggs}
\description{
Lengths of cuckoo eggs laid in other birds' nests
}
\format{
A data frame with 120 observations on the following 2 variables.
\tabular{rl}{
\code{Bird} \tab {Type of bird nest: \code{mdw_pipit} (meadow pipit), \code{tree_pipit},}\cr
\tab {\code{hedge_sparrow}, \code{robin}, \code{wagtail}, or \code{wren}}\cr
\code{Length} \tab {Cuckoo egg length (in mm)}\cr
}
}
\details{
Cuckoos are knows to lay their eggs in the nests of other (host) birds.
The eggs are then adopted and hatched by the host birds.
The data give the lengths of cuckoo eggs found in nests of various other bird species.
}
\source{
Downloaded from DASL at http://lib.stat.cmu.edu/DASL/Datafiles/cuckoodat.html
}
\references{
"The Egg of Cuculus Canorus. An Enquiry into the Dimensions of the Cuckoo's Egg and the Relation of the
Variations to the Size of the Eggs of the Foster-Parent, with Notes on Coloration", by Oswald H. Latter, Biometrika,
Vol. 1, No. 2 (Jan., 1902), pp. 164-176.
}
\keyword{datasets}
|
# nombre del archivo a mandar por correo:
# nombre_apellido_inferencia_profesor.R
# ejemplo:
# juan_perez_estadistica_mayer.R
# No olvide ejecutar las líneas 6 y 11 antes de empezar el resto del trabajo
library(readxl)
# 1. Use la función read_excel para cargar los datos que se encuentran en el archivo excel datos_ie.xlsx
misdatos <- read_excel("datos_ie.xlsx")
# 2. Para la variable "velocidad_viento", que es una de las columnas de los datos
# que acaba de cargar, encuentre lo siguiente usando la función quantile:
# 2.1 el cuantil 0.025 (2 pto
Vviento <-misdatos$velocidad_viento
quantile(Vviento)
quantile(Vviento, 0.025)
# 2.2 la mediana (2 pto)
mean(Vviento)
# 2.3 el cuantil 0.975 (2 pto)
quantile(Vviento, 0.975)
# Nota la función quantile es la que calcula cuantiles de un vector. Por ejemplo si el vector se llama x y
# queremos calcular el cuantil 37 escribiremos en R quantile(x, 0.37)
# 3. Para la variable "temperatura" compute lo siguiente}
temperatura <- misdatos$temperatura
# 3.1 la media (2 pto)
mean(temperatura)
# 3.2 la desviación estandar (2 pto)}
sd(temperatura)
# 3.3 la cantidad de observaciones (n) (2 pto)
length(misdatos$temperatura)
# 3.4 la desviación estandar, dividida por la raíz de n (2 pto)
17.88357/sqrt(2612)
# 4. Grafique los histogramas de estas cuatro variables: temperatura, humedad,
# punto_rocio y velocidad_viento. Puede usar cualquier función y paquete de R (4 pts)
hist(temperatura)
hist(misdatos$humedad)
hist(misdatos$punto_rocio)
hist(Vviento)
# 5. Use el comando pnorm, para encontrar la probabilidad de que una variable aleatoria normal
# estándard esté entre -1 y 1.2, es decir, encontrar P(-1 <= z <= 1.2)
# Nota, el comando pnorm asume por defecto que nuestra normal es estándar. (2 pto)
pnorm(-1,1.2)
| /2019_2/sol1/sol1_inferencia_R_mayer/javiera_hernandez_inferencia_mayer.R | no_license | ricardomayerb/ico8306 | R | false | false | 1,802 | r | # nombre del archivo a mandar por correo:
# nombre_apellido_inferencia_profesor.R
# ejemplo:
# juan_perez_estadistica_mayer.R
# No olvide ejecutar las líneas 6 y 11 antes de empezar el resto del trabajo
library(readxl)
# 1. Use la función read_excel para cargar los datos que se encuentran en el archivo excel datos_ie.xlsx
misdatos <- read_excel("datos_ie.xlsx")
# 2. Para la variable "velocidad_viento", que es una de las columnas de los datos
# que acaba de cargar, encuentre lo siguiente usando la función quantile:
# 2.1 el cuantil 0.025 (2 pto
Vviento <-misdatos$velocidad_viento
quantile(Vviento)
quantile(Vviento, 0.025)
# 2.2 la mediana (2 pto)
mean(Vviento)
# 2.3 el cuantil 0.975 (2 pto)
quantile(Vviento, 0.975)
# Nota la función quantile es la que calcula cuantiles de un vector. Por ejemplo si el vector se llama x y
# queremos calcular el cuantil 37 escribiremos en R quantile(x, 0.37)
# 3. Para la variable "temperatura" compute lo siguiente}
temperatura <- misdatos$temperatura
# 3.1 la media (2 pto)
mean(temperatura)
# 3.2 la desviación estandar (2 pto)}
sd(temperatura)
# 3.3 la cantidad de observaciones (n) (2 pto)
length(misdatos$temperatura)
# 3.4 la desviación estandar, dividida por la raíz de n (2 pto)
17.88357/sqrt(2612)
# 4. Grafique los histogramas de estas cuatro variables: temperatura, humedad,
# punto_rocio y velocidad_viento. Puede usar cualquier función y paquete de R (4 pts)
hist(temperatura)
hist(misdatos$humedad)
hist(misdatos$punto_rocio)
hist(Vviento)
# 5. Use el comando pnorm, para encontrar la probabilidad de que una variable aleatoria normal
# estándard esté entre -1 y 1.2, es decir, encontrar P(-1 <= z <= 1.2)
# Nota, el comando pnorm asume por defecto que nuestra normal es estándar. (2 pto)
pnorm(-1,1.2)
|
context("Create tmpdir")
test_that("The temporary directory can be created", {
expect_is(create_appdir(), "character")
expect_error(suppressWarnings(create_appdir(package = "SKDEr")))
expect_is(create_appdir(app_data = mtcars), "character")
expect_warning(create_appdir(app_folder = NULL))
expect_is(suppressWarnings(create_appdir(app_folder = NULL)), "character")
})
| /tests/testthat/test_create_appdir.R | permissive | SKDE-Felles/SKDEr | R | false | false | 379 | r | context("Create tmpdir")
test_that("The temporary directory can be created", {
expect_is(create_appdir(), "character")
expect_error(suppressWarnings(create_appdir(package = "SKDEr")))
expect_is(create_appdir(app_data = mtcars), "character")
expect_warning(create_appdir(app_folder = NULL))
expect_is(suppressWarnings(create_appdir(app_folder = NULL)), "character")
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/process.folder.R
\name{process.folder}
\alias{process.folder}
\title{Process a folder containing accelerometer and GPS data}
\usage{
process.folder(folder_location)
}
\arguments{
\item{folder_location}{the location fo the folders, the rest of the inputs have to ve edited in the input_options.csv in that folder}
}
\description{
writes a folder worth of .csv files of processed data
}
\details{
Currently only tested with Qstarz GPS device files. If you need other types contect the author
, they can be included with ease
}
| /man/process.folder.Rd | no_license | pydemull/modeid | R | false | true | 603 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/process.folder.R
\name{process.folder}
\alias{process.folder}
\title{Process a folder containing accelerometer and GPS data}
\usage{
process.folder(folder_location)
}
\arguments{
\item{folder_location}{the location fo the folders, the rest of the inputs have to ve edited in the input_options.csv in that folder}
}
\description{
writes a folder worth of .csv files of processed data
}
\details{
Currently only tested with Qstarz GPS device files. If you need other types contect the author
, they can be included with ease
}
|
#' @title Regression Kernlab Support Vector Machine
#' @author mboecker
#' @name mlr_learners_regr.ksvm
#'
#' @template class_learner
#' @templateVar id regr.ksvm
#' @templateVar caller ksvm
#'
#' @export
#' @template seealso_learner
#' @template example
LearnerRegrKSVM = R6Class("LearnerRegrKSVM",
inherit = LearnerRegr,
public = list(
#' @description
#' Creates a new instance of this [R6][R6::R6Class] class.
initialize = function() {
ps = ps(
scaled = p_lgl(default = TRUE, tags = "train"),
type = p_fct(default = "eps-svr",
levels = c("eps-svr", "nu-svr", "eps-bsvr"), tags = "train"),
kernel = p_fct(default = "rbfdot",
levels = c(
"rbfdot", "polydot", "vanilladot",
"laplacedot", "besseldot", "anovadot"),
tags = "train"),
C = p_dbl(default = 1, tags = "train"),
nu = p_dbl(default = 0.2, lower = 0, tags = "train"),
epsilon = p_dbl(default = 0.1, tags = "train"),
cache = p_int(default = 40, lower = 1L, tags = "train"),
tol = p_dbl(default = 0.001, lower = 0, tags = "train"),
shrinking = p_lgl(default = TRUE, tags = "train"),
sigma = p_dbl(default = NO_DEF, lower = 0, tags = "train"),
degree = p_int(default = NO_DEF, lower = 1L,
tags = "train"),
scale = p_dbl(default = NO_DEF, lower = 0, tags = "train"),
order = p_int(default = NO_DEF, tags = "train"),
offset = p_dbl(default = NO_DEF, tags = "train")
)
ps$add_dep("C", "type", CondAnyOf$new(c("eps-svr", "eps-bsvr", "nu-svr")))
ps$add_dep("nu", "type", CondAnyOf$new(c("nu-svr")))
ps$add_dep(
"epsilon", "type",
CondAnyOf$new(c("eps-svr", "nu-svr", "eps-bsvr")))
ps$add_dep(
"sigma", "kernel",
CondAnyOf$new(c("rbfdot", "laplacedot", "besseldot", "anovadot")))
ps$add_dep(
"degree", "kernel",
CondAnyOf$new(c("polydot", "besseldot", "anovadot")))
ps$add_dep("scale", "kernel", CondAnyOf$new(c("polydot")))
ps$add_dep("order", "kernel", CondAnyOf$new(c("besseldot")))
ps$add_dep("offset", "kernel", CondAnyOf$new(c("polydot")))
super$initialize(
id = "regr.ksvm",
packages = "kernlab",
feature_types = c(
"logical", "integer", "numeric",
"character", "factor", "ordered"),
predict_types = "response",
param_set = ps,
properties = "weights",
man = "mlr3extralearners::mlr_learners_regr.ksvm"
)
}),
private = list(
.train = function(task) {
pars = self$param_set$get_values(tags = "train")
kpar = intersect(
c("sigma", "degree", "scale", "order", "offset"),
names(pars))
if ("weights" %in% task$properties) {
pars$class.weights = task$weights$weight
}
if (length(kpar) > 0) {
pars$kpar = pars[kpar]
pars[kpar] = NULL
}
f = task$formula()
data = task$data()
invoke(kernlab::ksvm, x = f, data = data, .args = pars)
},
.predict = function(task) {
newdata = task$data(cols = task$feature_names)
p = invoke(kernlab::predict, self$model,
newdata = newdata,
type = "response")
list(response = p)
}
)
)
.extralrns_dict$add("regr.ksvm", LearnerRegrKSVM)
| /R/learner_kernlab_regr_ksvm.R | no_license | A-Pai/mlr3extralearners | R | false | false | 3,361 | r | #' @title Regression Kernlab Support Vector Machine
#' @author mboecker
#' @name mlr_learners_regr.ksvm
#'
#' @template class_learner
#' @templateVar id regr.ksvm
#' @templateVar caller ksvm
#'
#' @export
#' @template seealso_learner
#' @template example
LearnerRegrKSVM = R6Class("LearnerRegrKSVM",
inherit = LearnerRegr,
public = list(
#' @description
#' Creates a new instance of this [R6][R6::R6Class] class.
initialize = function() {
ps = ps(
scaled = p_lgl(default = TRUE, tags = "train"),
type = p_fct(default = "eps-svr",
levels = c("eps-svr", "nu-svr", "eps-bsvr"), tags = "train"),
kernel = p_fct(default = "rbfdot",
levels = c(
"rbfdot", "polydot", "vanilladot",
"laplacedot", "besseldot", "anovadot"),
tags = "train"),
C = p_dbl(default = 1, tags = "train"),
nu = p_dbl(default = 0.2, lower = 0, tags = "train"),
epsilon = p_dbl(default = 0.1, tags = "train"),
cache = p_int(default = 40, lower = 1L, tags = "train"),
tol = p_dbl(default = 0.001, lower = 0, tags = "train"),
shrinking = p_lgl(default = TRUE, tags = "train"),
sigma = p_dbl(default = NO_DEF, lower = 0, tags = "train"),
degree = p_int(default = NO_DEF, lower = 1L,
tags = "train"),
scale = p_dbl(default = NO_DEF, lower = 0, tags = "train"),
order = p_int(default = NO_DEF, tags = "train"),
offset = p_dbl(default = NO_DEF, tags = "train")
)
ps$add_dep("C", "type", CondAnyOf$new(c("eps-svr", "eps-bsvr", "nu-svr")))
ps$add_dep("nu", "type", CondAnyOf$new(c("nu-svr")))
ps$add_dep(
"epsilon", "type",
CondAnyOf$new(c("eps-svr", "nu-svr", "eps-bsvr")))
ps$add_dep(
"sigma", "kernel",
CondAnyOf$new(c("rbfdot", "laplacedot", "besseldot", "anovadot")))
ps$add_dep(
"degree", "kernel",
CondAnyOf$new(c("polydot", "besseldot", "anovadot")))
ps$add_dep("scale", "kernel", CondAnyOf$new(c("polydot")))
ps$add_dep("order", "kernel", CondAnyOf$new(c("besseldot")))
ps$add_dep("offset", "kernel", CondAnyOf$new(c("polydot")))
super$initialize(
id = "regr.ksvm",
packages = "kernlab",
feature_types = c(
"logical", "integer", "numeric",
"character", "factor", "ordered"),
predict_types = "response",
param_set = ps,
properties = "weights",
man = "mlr3extralearners::mlr_learners_regr.ksvm"
)
}),
private = list(
.train = function(task) {
pars = self$param_set$get_values(tags = "train")
kpar = intersect(
c("sigma", "degree", "scale", "order", "offset"),
names(pars))
if ("weights" %in% task$properties) {
pars$class.weights = task$weights$weight
}
if (length(kpar) > 0) {
pars$kpar = pars[kpar]
pars[kpar] = NULL
}
f = task$formula()
data = task$data()
invoke(kernlab::ksvm, x = f, data = data, .args = pars)
},
.predict = function(task) {
newdata = task$data(cols = task$feature_names)
p = invoke(kernlab::predict, self$model,
newdata = newdata,
type = "response")
list(response = p)
}
)
)
.extralrns_dict$add("regr.ksvm", LearnerRegrKSVM)
|
# plant size data
size <- read.table("size.csv", header = TRUE, sep = ";")
head(size)
size <- size %>%
filter(sp == "RAN") %>%
select(-Torig, -Porig, -Tdest, -Pdest, -DOY, -blk) %>%
mutate(trt = plyr::mapvalues(trt, c("c", "wa", "we", "ww"), c("Control", "Warmer", "LaterSM", "WarmLate")))
size %>%
ggplot() +
ggtitle("First flowering") +
geom_boxplot(aes(x= orig, y = size_end, fill=trt)) +
scale_fill_manual(values=c("white", "red", "blue", "purple"))
SizeMeanSE <- size %>%
filter(orig != "VES" | trt != "Control") %>% # remove Control at Veskre
group_by(trt, site, orig) %>%
summarise(N = sum(!is.na(size_end)), mean = mean(size_end, na.rm = TRUE), se = sd(size_end, na.rm = TRUE)/sqrt(N))
# only for checking results
MeanSE %>% filter(trt %in% c("Control", "Warmer")) %>%
ggplot(aes(x = trt, y = mean, color = orig, group = orig)) +
geom_point() +
geom_line()
SizeMeanSE %>%
ggplot(aes(x = site, y = mean, color = trt)) +
geom_point() +
facet_grid(~ orig)
# Calculate difference between Control and Treatment for SE
SizeSEData <- SizeMeanSE %>%
ungroup() %>%
select(-mean, -N, -site) %>% # remove site, because it causes problems
spread(key = trt, value = se) %>% # spread Treatments
mutate(Warmer = sqrt(Control^2 + Warmer^2), LaterSM = sqrt(Control^2 + LaterSM^2), WarmLate = sqrt(Control^2 + WarmLate^2)) %>%
select(-Control) %>%
gather(key = Treatment, value = SE, -orig) %>% # gather Treatments
filter(!is.na(SE)) %>% # remove e.g. Warmer in RAM, no such treatment
select(SE, orig, Treatment)
# Calculate difference between Control and Treatment for Mean
SizeMeanData <- SizeMeanSE %>%
ungroup() %>%
select(-se, -N, -site) %>% # remove site, because it causes problems
spread(key = trt, value = mean) %>% # spread Treatments
mutate(Warmer = Warmer - Control, LaterSM = LaterSM - Control, WarmLate = WarmLate - Control) %>% # Difference Treatment - Control
#mutate(Warmer.prop = Warmer * 100 / Control, LaterSM.prop = LaterSM * 100 / Control, WarmLate.prop = WarmLate * 100 / Control) %>% # Difference Treatment - Control
select(-Control) %>%
gather(key = Treatment, value = Effect, -orig) %>% # gather Treatments
filter(!is.na(Effect)) %>% # remove e.g. Warmer in RAM, no such treatment
left_join(SizeSEData, by = c("orig" = "orig", "Treatment" = "Treatment")) %>%
left_join(SizeMeanSE, by = c("orig" = "orig", "Treatment" = "trt")) %>%
select(-mean, -se) %>%
mutate(orig = plyr::mapvalues(orig, c("GUD", "RAM", "SKJ"), c("Alpine-early", "Subalpine-early", "Alpine-late"))) %>%
mutate(pheno.stage = "Leaf length") %>%
mutate(pheno.unit = "cm") %>%
rename(mean = Effect, se = SE) %>%
mutate(newname = NA) %>%
left_join(SMDiff, by = c("orig", "Treatment", "site")) %>%
select(orig, pheno.stage, pheno.unit, Treatment, mean, se, newname, N, site, sm, smOrig, smDiff)
LeafSizePlot <- SizeMeanData %>%
mutate(Treatment = factor(Treatment, levels = c("Warmer", "LaterSM", "WarmLate"))) %>%
ggplot(aes(x = smDiff, y = mean, color = Treatment, shape = Treatment, ymax = mean + se, ymin = mean - se)) +
geom_hline(yintercept=0, color = "gray", linetype = "dashed") +
geom_point(size = 3) +
labs(y = "Difference in leaf size [cm] between\n treatment and origin-control", x = "Difference in SMT between\n origin and destination site [days]", title = "Phenotypic plasticity: origin-control") +
scale_colour_manual(name = "Treatment", values = c("red", "blue", "purple")) +
scale_shape_manual(name = "Treatment:", values = c(17,16,15)) +
ylim(-2.5, 3) +
geom_errorbar(width=0.2) +
panel_border(colour = "black", remove = FALSE) +
annotate(geom = "text", x = 32, y = 3, label = "larger", color = "grey20") +
annotate(geom = "text", x = 32, y = -2.5, label = "smaller", color = "grey20") +
theme(legend.position="none")
########################################################################
#### ADAPTATION #####
########################################################################
MeanSEAdapt <- size %>%
filter(orig != "GUD" | trt != "Control") %>% # remove Control at Gudmedalen
group_by(trt, site, orig) %>%
summarise(N = sum(!is.na(size_end)), mean = mean(size_end, na.rm = TRUE), se = 2*sd(size_end, na.rm = TRUE)/sqrt(N))
# Calculate difference between Control and Treatment for SE
SEAdaptData <- MeanSEAdapt %>%
ungroup() %>%
select(-mean, -N, -orig) %>% # remove origin, because it causes problems
spread(key = trt, value = se) %>% # spread Treatments
mutate(Warmer = sqrt(Control^2 + Warmer^2), LaterSM = sqrt(Control^2 + LaterSM^2), WarmLate = sqrt(Control^2 + WarmLate^2)) %>%
select(-Control) %>%
gather(key = Treatment, value = SE, -site) %>% # gather Treatments
filter(!is.na(SE)) %>% # remove e.g. Warmer in RAM, no such treatment
select(SE, site, Treatment)
# Calculate difference between Control and Treatment for Mean
MeanDataAdapt <- MeanSEAdapt %>%
ungroup() %>%
select(-se, -N, -orig) %>% # remove orig, because it causes problems
spread(key = trt, value = mean) %>% # spread Treatments
mutate(Warmer = Warmer - Control, LaterSM = LaterSM - Control, WarmLate = WarmLate - Control) %>% # Difference Treatment - Control
select(-Control) %>%
gather(key = Treatment, value = Effect, -site) %>% # gather Treatments
filter(!is.na(Effect)) %>% # remove e.g. Warmer in RAM, no such treatment
left_join(SEAdaptData, by = c("site" = "site", "Treatment" = "Treatment")) %>% # join SE
left_join(MeanSEAdapt, by = c("site" = "site", "Treatment" = "trt")) %>%
select(-mean, -se) %>%
mutate(site = plyr::mapvalues(site, c("RAM", "VES", "SKJ"), c("Subalpine-early", "Subalpine-late", "Alpine-late"))) %>%
mutate(pheno.stage = "Leaf length") %>%
mutate(pheno.unit = "cm") %>%
rename(mean = Effect, se = SE) %>%
mutate(newname = NA) %>%
left_join(SMDiffAdapt, by = c("orig", "Treatment", "site")) %>%
select(orig, pheno.stage, pheno.unit, Treatment, mean, se, newname, N, site, sm, smOrig, smDiff) %>%
mutate(Treatment = factor(Treatment, levels = c("Warmer", "LaterSM", "WarmLate")))
LeafSizePlotAdapt <- MeanDataAdapt %>%
mutate(Pvalue = factor(1)) %>%
ggplot(aes(x = smDiff, y = mean, color = Treatment, shape = Treatment, ymax = mean + se, ymin = mean - se)) +
geom_hline(yintercept=0, color = "gray", linetype = "dashed") +
geom_point(size = 3) +
labs(y = "Difference in leaf size [cm] between\n treatment and destination-control", x = "Difference in SMT between\n origin and destination site [days]", title = "Genetic differentiation: destination-control") +
scale_colour_manual(values = c("red", "blue", "purple")) +
scale_shape_manual(name = "Treatment:", values = c(2,1,0)) +
#scale_alpha_manual(values = c(0, 1)) +
ylim(-2.5, 3) +
geom_errorbar(width=0.2) +
panel_border(colour = "black", remove = FALSE) +
annotate(geom = "text", x = 32, y = 3, label = "larger", color = "grey20") +
annotate(geom = "text", x = 32, y = -2.5, label = "smaller", color = "grey20") +
theme(legend.position="none")
LeafSizes <- plot_grid(LeafSizePlot, LeafSizePlotAdapt, nrow = 1, align = "h")
ggsave(LeafSizes, filename = "LeafSizes.pdf", height = 6)
| /Old code/LeafSize.R | no_license | audhalbritter/PollenLimitation | R | false | false | 7,210 | r | # plant size data
size <- read.table("size.csv", header = TRUE, sep = ";")
head(size)
size <- size %>%
filter(sp == "RAN") %>%
select(-Torig, -Porig, -Tdest, -Pdest, -DOY, -blk) %>%
mutate(trt = plyr::mapvalues(trt, c("c", "wa", "we", "ww"), c("Control", "Warmer", "LaterSM", "WarmLate")))
size %>%
ggplot() +
ggtitle("First flowering") +
geom_boxplot(aes(x= orig, y = size_end, fill=trt)) +
scale_fill_manual(values=c("white", "red", "blue", "purple"))
SizeMeanSE <- size %>%
filter(orig != "VES" | trt != "Control") %>% # remove Control at Veskre
group_by(trt, site, orig) %>%
summarise(N = sum(!is.na(size_end)), mean = mean(size_end, na.rm = TRUE), se = sd(size_end, na.rm = TRUE)/sqrt(N))
# only for checking results
MeanSE %>% filter(trt %in% c("Control", "Warmer")) %>%
ggplot(aes(x = trt, y = mean, color = orig, group = orig)) +
geom_point() +
geom_line()
SizeMeanSE %>%
ggplot(aes(x = site, y = mean, color = trt)) +
geom_point() +
facet_grid(~ orig)
# Calculate difference between Control and Treatment for SE
SizeSEData <- SizeMeanSE %>%
ungroup() %>%
select(-mean, -N, -site) %>% # remove site, because it causes problems
spread(key = trt, value = se) %>% # spread Treatments
mutate(Warmer = sqrt(Control^2 + Warmer^2), LaterSM = sqrt(Control^2 + LaterSM^2), WarmLate = sqrt(Control^2 + WarmLate^2)) %>%
select(-Control) %>%
gather(key = Treatment, value = SE, -orig) %>% # gather Treatments
filter(!is.na(SE)) %>% # remove e.g. Warmer in RAM, no such treatment
select(SE, orig, Treatment)
# Calculate difference between Control and Treatment for Mean
SizeMeanData <- SizeMeanSE %>%
ungroup() %>%
select(-se, -N, -site) %>% # remove site, because it causes problems
spread(key = trt, value = mean) %>% # spread Treatments
mutate(Warmer = Warmer - Control, LaterSM = LaterSM - Control, WarmLate = WarmLate - Control) %>% # Difference Treatment - Control
#mutate(Warmer.prop = Warmer * 100 / Control, LaterSM.prop = LaterSM * 100 / Control, WarmLate.prop = WarmLate * 100 / Control) %>% # Difference Treatment - Control
select(-Control) %>%
gather(key = Treatment, value = Effect, -orig) %>% # gather Treatments
filter(!is.na(Effect)) %>% # remove e.g. Warmer in RAM, no such treatment
left_join(SizeSEData, by = c("orig" = "orig", "Treatment" = "Treatment")) %>%
left_join(SizeMeanSE, by = c("orig" = "orig", "Treatment" = "trt")) %>%
select(-mean, -se) %>%
mutate(orig = plyr::mapvalues(orig, c("GUD", "RAM", "SKJ"), c("Alpine-early", "Subalpine-early", "Alpine-late"))) %>%
mutate(pheno.stage = "Leaf length") %>%
mutate(pheno.unit = "cm") %>%
rename(mean = Effect, se = SE) %>%
mutate(newname = NA) %>%
left_join(SMDiff, by = c("orig", "Treatment", "site")) %>%
select(orig, pheno.stage, pheno.unit, Treatment, mean, se, newname, N, site, sm, smOrig, smDiff)
LeafSizePlot <- SizeMeanData %>%
mutate(Treatment = factor(Treatment, levels = c("Warmer", "LaterSM", "WarmLate"))) %>%
ggplot(aes(x = smDiff, y = mean, color = Treatment, shape = Treatment, ymax = mean + se, ymin = mean - se)) +
geom_hline(yintercept=0, color = "gray", linetype = "dashed") +
geom_point(size = 3) +
labs(y = "Difference in leaf size [cm] between\n treatment and origin-control", x = "Difference in SMT between\n origin and destination site [days]", title = "Phenotypic plasticity: origin-control") +
scale_colour_manual(name = "Treatment", values = c("red", "blue", "purple")) +
scale_shape_manual(name = "Treatment:", values = c(17,16,15)) +
ylim(-2.5, 3) +
geom_errorbar(width=0.2) +
panel_border(colour = "black", remove = FALSE) +
annotate(geom = "text", x = 32, y = 3, label = "larger", color = "grey20") +
annotate(geom = "text", x = 32, y = -2.5, label = "smaller", color = "grey20") +
theme(legend.position="none")
########################################################################
#### ADAPTATION #####
########################################################################
MeanSEAdapt <- size %>%
filter(orig != "GUD" | trt != "Control") %>% # remove Control at Gudmedalen
group_by(trt, site, orig) %>%
summarise(N = sum(!is.na(size_end)), mean = mean(size_end, na.rm = TRUE), se = 2*sd(size_end, na.rm = TRUE)/sqrt(N))
# Calculate difference between Control and Treatment for SE
SEAdaptData <- MeanSEAdapt %>%
ungroup() %>%
select(-mean, -N, -orig) %>% # remove origin, because it causes problems
spread(key = trt, value = se) %>% # spread Treatments
mutate(Warmer = sqrt(Control^2 + Warmer^2), LaterSM = sqrt(Control^2 + LaterSM^2), WarmLate = sqrt(Control^2 + WarmLate^2)) %>%
select(-Control) %>%
gather(key = Treatment, value = SE, -site) %>% # gather Treatments
filter(!is.na(SE)) %>% # remove e.g. Warmer in RAM, no such treatment
select(SE, site, Treatment)
# Calculate difference between Control and Treatment for Mean
MeanDataAdapt <- MeanSEAdapt %>%
ungroup() %>%
select(-se, -N, -orig) %>% # remove orig, because it causes problems
spread(key = trt, value = mean) %>% # spread Treatments
mutate(Warmer = Warmer - Control, LaterSM = LaterSM - Control, WarmLate = WarmLate - Control) %>% # Difference Treatment - Control
select(-Control) %>%
gather(key = Treatment, value = Effect, -site) %>% # gather Treatments
filter(!is.na(Effect)) %>% # remove e.g. Warmer in RAM, no such treatment
left_join(SEAdaptData, by = c("site" = "site", "Treatment" = "Treatment")) %>% # join SE
left_join(MeanSEAdapt, by = c("site" = "site", "Treatment" = "trt")) %>%
select(-mean, -se) %>%
mutate(site = plyr::mapvalues(site, c("RAM", "VES", "SKJ"), c("Subalpine-early", "Subalpine-late", "Alpine-late"))) %>%
mutate(pheno.stage = "Leaf length") %>%
mutate(pheno.unit = "cm") %>%
rename(mean = Effect, se = SE) %>%
mutate(newname = NA) %>%
left_join(SMDiffAdapt, by = c("orig", "Treatment", "site")) %>%
select(orig, pheno.stage, pheno.unit, Treatment, mean, se, newname, N, site, sm, smOrig, smDiff) %>%
mutate(Treatment = factor(Treatment, levels = c("Warmer", "LaterSM", "WarmLate")))
LeafSizePlotAdapt <- MeanDataAdapt %>%
mutate(Pvalue = factor(1)) %>%
ggplot(aes(x = smDiff, y = mean, color = Treatment, shape = Treatment, ymax = mean + se, ymin = mean - se)) +
geom_hline(yintercept=0, color = "gray", linetype = "dashed") +
geom_point(size = 3) +
labs(y = "Difference in leaf size [cm] between\n treatment and destination-control", x = "Difference in SMT between\n origin and destination site [days]", title = "Genetic differentiation: destination-control") +
scale_colour_manual(values = c("red", "blue", "purple")) +
scale_shape_manual(name = "Treatment:", values = c(2,1,0)) +
#scale_alpha_manual(values = c(0, 1)) +
ylim(-2.5, 3) +
geom_errorbar(width=0.2) +
panel_border(colour = "black", remove = FALSE) +
annotate(geom = "text", x = 32, y = 3, label = "larger", color = "grey20") +
annotate(geom = "text", x = 32, y = -2.5, label = "smaller", color = "grey20") +
theme(legend.position="none")
LeafSizes <- plot_grid(LeafSizePlot, LeafSizePlotAdapt, nrow = 1, align = "h")
ggsave(LeafSizes, filename = "LeafSizes.pdf", height = 6)
|
###########################################################
#
# MSC 301: Introduction to Physical Oceanography
# Homework 01 Sample Code
#
# Author: mgrossi
# Date: 01 Sep 2018
#
###########################################################
# =========================================================
# QUESTION 2d:
# Carefully plot the temperature and salinity profiles from
# the accompanying data file, being sure to label all axes.
# First move into the directory (folder) in which the files
# are saved and then read in the csv file.
setwd('/path/to/wherever/you/saved/the/homework/files/') # This is the FOLDER!
dat = read.csv(file='HW01_GoM_data.csv', header=TRUE, row.names=1) # This is the FILE!
# Remember that by default read.csv() assumes that the first
# row in the file contains data. In our case, the first
# row contains column names, so we specify this by passing
# header=TRUE. Similarly, row.names=1 tells R that the
# first column are actually row names, not data.
#
# Now to make the plots!
#
# We need to tell R what to plot. Just like in Excel, where
# we would specify which column contains the x values and
# which column contains the y values, we need to do the
# same thing for the R function plot().
#
# We have saved our data to a variable we called 'dat'.
# Recall that there are two ways to index specific columns
# in R: we can either specify row and column using
# dat[row#,column#], or we can use a column name, such as
# dat$Depth_m. (Hint: Use colnames(dat) to see what the
# column names are, if you are unsure.)
#
# To make a temperature profile, we want temperature as a
# function of depth. For visualization purposes, we
# typically plot depth on the y axis with 0 at the top.
#
# In order to compare the profiles, we need the x axis
# ranges to be the same on both temperature plots. We can
# do this either by combining the two temperature profiles
# together and then using the range() function to extract
# the joint min and max, or by indexing the two
# temperature columns and using range() to find the max
# and min. See below to see what I mean.
#
# Tip: The function png() is used to save a plot as a png
# file. I'll demonstrate it here, along with some other
# handy options, for your future use.
# Temperature vs. Depth for profile 1
png(filename='P1_TvsD.png') # save plot to file (optional)
plot(x=dat$Prof1_Temp_degC, y=dat$Depth_m, # x, y values
xlim=range(dat[,c(2,4)]), # set x axis range (see comments above)
ylim=rev(range(dat$Depth_m)), # set y axis range to have 0 at the top
type='l', col='blue', lwd=3, # plot as a blue line of width 3
main='Profile 1: Temperature vs. Depth', # add a title
xlab='Temp (deg C)', ylab='Depth (m)') # set x, y axis labels
dev.off() # write plot to file (used with png())
# png() opens a file and dev.off() closes the file
# (literally, 'turn the plotting device off.')
# Everything in between actually writes the file.
# Note that the file will be saved in your current
# working directory. If you're not sure what this
# is, use getwd().
# Now for profile 2:
png(filename='P2_TvsD.png')
plot(x=dat$Prof2_Temp_degC, y=dat$Depth_m,
xlim=range(dat[,c(2,4)]),
ylim=rev(range(dat$Depth_m)),
type='l', col='blue', lwd=3,
main='Profile 2: Temperature vs. Depth',
xlab='Temp (deg C)', ylab='Depth (m)')
dev.off()
# Salinity vs. Depth
png(filename='P1_SvsD.png')
plot(x=dat$Prof1_Sal_psu, y=dat$Depth_m,
xlim=range(dat[,c(3,5)]),
ylim=rev(range(dat$Depth_m)),
type='l', lwd=3, col='blue',
main='Profile 1: Salinity vs. Depth',
xlab='Salinity (psu)', ylab='Depth (m)')
dev.off()
png(filename='P2_SvsD.png')
plot(x=dat$Prof2_Sal_psu, y=dat$Depth_m,
xlim=range(dat[,c(3,5)]),
ylim=rev(range(dat$Depth_m)),
type='l', lwd=3, col='blue',
main='Profile 2: Salinity vs. Depth',
xlab='Salinity (psu)', ylab='Depth (m)')
# There are always many ways to do the same thing! This is
# just one way. You may discover alternative options.
# You might, for example, make the depths negative instead
# of setting the ylim to get 0m at the top of the graph.
#
# Since we're comparing two profiles, it may be helpful to
# show both temperature profiles on one plot, and
# similarly, both salinity profiles on one plot. How might
# we do that in R?
# Temperature vs. Depth
plot( x=dat$Prof1_Temp_degC, y=dat$Depth_m, # Plot one profile the same we
xlim=range(dat[,c(2,4)]), # did above. It doesn't matter
ylim=rev(range(dat$Depth_m)), # which one (profile 1 or 2)
type='l', lwd=3, col='blue', # you do first.
main='Temperature vs. Depth',
xlab='Temp (deg C)', ylab='Depth (m)')
points(y=dat$Depth_m, x=dat$Prof2_Temp_degC, # Add the second profile using points()
type='l', lwd=3, col='red') # function with the same settings.
legend('topleft', # Add a legend. Notice we used different colors
legend=c('Profile 1', 'Profile 2'), # series labels
col=c('blue', 'red'), lty=1, lwd=3) # color and line type
# Salinity vs. Depth
plot(x=dat$Prof1_Sal_psu, y=dat$Depth_m,
xlim=range(dat[,c(3,5)]),
ylim=rev(range(dat$Depth_m)),
type='l', lwd=3, col='blue',
main='Salinity vs. Depth',
xlab='Salinity (psu)', ylab='Depth (m)')
points(y=dat$Depth_m, x=dat$Prof2_Sal_psu,
type='l', lwd=3, col='red')
legend('bottomleft', legend=c('Profile 1', 'Profile 2'),
col=c('blue', 'red'), lty=1, lwd=3)
# =========================================================
# QUESTION 2e:
# Calculate the density profile at each station. Plot your
# results, being careful to label all axes.
# First load the equation of state function, and then use
# it to calculate the two density profiles.
source('EOS80.R')
# Some of you discovered another way of loading the function:
# Double-clicking the file opens it in RStudio as an R
# script, which you can then run. Both methods work. One way
# to be sure the function loaded is to look for water.density
# under "Functions" in your Global Environment in the upper
# right panel of RStudio.
#
# Remember that water.density() is a function, like plot().
# Just like a math function, for which one needs to provide
# numerical values for each variable in order to obtain the
# value of the function, we need to pass values to an R
# function in order to get anything out. water.density() has
# three variables: temp, sal, and pres. We need to supply a
# value (or a vector of values) for each of these variables
# in order to get an output.
#
# For convienence, let's define two new variables, one for
# each profile. What does the function water.density()
# produce?
dens1 = water.density(temp = dat$Prof1_Temp_degC,
sal = dat$Prof1_Sal_psu,
pres = dat$Depth_m)
dens2 = water.density(temp = dat$Prof2_Temp_degC,
sal = dat$Prof2_Sal_psu,
pres = dat$Depth_m)
# Make the plots, as above.
# Option 1: Individual plots
# Density vs. Depth
plot(x=dens1, y=dat$Depth_m,
xlim=range(c(dens1, dens2)),
ylim=rev(range(dat$Depth_m)),
type='l', lwd=3, col='blue',
main='Profile 1: Density vs. Depth',
xlab='Density (kg/m^3)', ylab='Depth (m)')
plot(x=dens2, y=dat$Depth_m,
xlim=range(c(dens1, dens2)),
ylim=rev(range(dat$Depth_m)),
type='l', lwd=3, col='blue',
main='Profile 2: Density vs. Depth',
xlab='Density (kg/m^3)', ylab='Depth (m)')
# Option 2: Both profiles on one plot
# Density vs. Depth
plot(x=dens1, y=dat$Depth_m,
xlim=range(c(dens1, dens2)),
ylim=rev(range(dat$Depth_m)),
type='l', lwd=3, col='blue',
main='Density vs. Depth',
xlab='Density (kg/m^3)', ylab='Depth (m)')
points(x=dens2, y=dat$Depth_m,
type='l', lwd=3, col='red')
legend('bottomleft', legend=c('Profile 1', 'Profile 2'),
col=c('blue', 'red'), lty=1, lwd=3)
# That's all there is to it!
#
# Take some time to play around with these samples and
# become comfortable with setting plot parameters, indexing
# data, and extracting information from the data. Remember,
# practice makes perfect!
# =========================================================
| /teaching_hw_examples/HW01_sample_code.R | no_license | mdgrossi/code-samples | R | false | false | 8,385 | r | ###########################################################
#
# MSC 301: Introduction to Physical Oceanography
# Homework 01 Sample Code
#
# Author: mgrossi
# Date: 01 Sep 2018
#
###########################################################
# =========================================================
# QUESTION 2d:
# Carefully plot the temperature and salinity profiles from
# the accompanying data file, being sure to label all axes.
# First move into the directory (folder) in which the files
# are saved and then read in the csv file.
setwd('/path/to/wherever/you/saved/the/homework/files/') # This is the FOLDER!
dat = read.csv(file='HW01_GoM_data.csv', header=TRUE, row.names=1) # This is the FILE!
# Remember that by default read.csv() assumes that the first
# row in the file contains data. In our case, the first
# row contains column names, so we specify this by passing
# header=TRUE. Similarly, row.names=1 tells R that the
# first column are actually row names, not data.
#
# Now to make the plots!
#
# We need to tell R what to plot. Just like in Excel, where
# we would specify which column contains the x values and
# which column contains the y values, we need to do the
# same thing for the R function plot().
#
# We have saved our data to a variable we called 'dat'.
# Recall that there are two ways to index specific columns
# in R: we can either specify row and column using
# dat[row#,column#], or we can use a column name, such as
# dat$Depth_m. (Hint: Use colnames(dat) to see what the
# column names are, if you are unsure.)
#
# To make a temperature profile, we want temperature as a
# function of depth. For visualization purposes, we
# typically plot depth on the y axis with 0 at the top.
#
# In order to compare the profiles, we need the x axis
# ranges to be the same on both temperature plots. We can
# do this either by combining the two temperature profiles
# together and then using the range() function to extract
# the joint min and max, or by indexing the two
# temperature columns and using range() to find the max
# and min. See below to see what I mean.
#
# Tip: The function png() is used to save a plot as a png
# file. I'll demonstrate it here, along with some other
# handy options, for your future use.
# Temperature vs. Depth for profile 1
png(filename='P1_TvsD.png') # save plot to file (optional)
plot(x=dat$Prof1_Temp_degC, y=dat$Depth_m, # x, y values
xlim=range(dat[,c(2,4)]), # set x axis range (see comments above)
ylim=rev(range(dat$Depth_m)), # set y axis range to have 0 at the top
type='l', col='blue', lwd=3, # plot as a blue line of width 3
main='Profile 1: Temperature vs. Depth', # add a title
xlab='Temp (deg C)', ylab='Depth (m)') # set x, y axis labels
dev.off() # write plot to file (used with png())
# png() opens a file and dev.off() closes the file
# (literally, 'turn the plotting device off.')
# Everything in between actually writes the file.
# Note that the file will be saved in your current
# working directory. If you're not sure what this
# is, use getwd().
# Now for profile 2:
png(filename='P2_TvsD.png')
plot(x=dat$Prof2_Temp_degC, y=dat$Depth_m,
xlim=range(dat[,c(2,4)]),
ylim=rev(range(dat$Depth_m)),
type='l', col='blue', lwd=3,
main='Profile 2: Temperature vs. Depth',
xlab='Temp (deg C)', ylab='Depth (m)')
dev.off()
# Salinity vs. Depth
png(filename='P1_SvsD.png')
plot(x=dat$Prof1_Sal_psu, y=dat$Depth_m,
xlim=range(dat[,c(3,5)]),
ylim=rev(range(dat$Depth_m)),
type='l', lwd=3, col='blue',
main='Profile 1: Salinity vs. Depth',
xlab='Salinity (psu)', ylab='Depth (m)')
dev.off()
png(filename='P2_SvsD.png')
plot(x=dat$Prof2_Sal_psu, y=dat$Depth_m,
xlim=range(dat[,c(3,5)]),
ylim=rev(range(dat$Depth_m)),
type='l', lwd=3, col='blue',
main='Profile 2: Salinity vs. Depth',
xlab='Salinity (psu)', ylab='Depth (m)')
# There are always many ways to do the same thing! This is
# just one way. You may discover alternative options.
# You might, for example, make the depths negative instead
# of setting the ylim to get 0m at the top of the graph.
#
# Since we're comparing two profiles, it may be helpful to
# show both temperature profiles on one plot, and
# similarly, both salinity profiles on one plot. How might
# we do that in R?
# Temperature vs. Depth
plot( x=dat$Prof1_Temp_degC, y=dat$Depth_m, # Plot one profile the same we
xlim=range(dat[,c(2,4)]), # did above. It doesn't matter
ylim=rev(range(dat$Depth_m)), # which one (profile 1 or 2)
type='l', lwd=3, col='blue', # you do first.
main='Temperature vs. Depth',
xlab='Temp (deg C)', ylab='Depth (m)')
points(y=dat$Depth_m, x=dat$Prof2_Temp_degC, # Add the second profile using points()
type='l', lwd=3, col='red') # function with the same settings.
legend('topleft', # Add a legend. Notice we used different colors
legend=c('Profile 1', 'Profile 2'), # series labels
col=c('blue', 'red'), lty=1, lwd=3) # color and line type
# Salinity vs. Depth
plot(x=dat$Prof1_Sal_psu, y=dat$Depth_m,
xlim=range(dat[,c(3,5)]),
ylim=rev(range(dat$Depth_m)),
type='l', lwd=3, col='blue',
main='Salinity vs. Depth',
xlab='Salinity (psu)', ylab='Depth (m)')
points(y=dat$Depth_m, x=dat$Prof2_Sal_psu,
type='l', lwd=3, col='red')
legend('bottomleft', legend=c('Profile 1', 'Profile 2'),
col=c('blue', 'red'), lty=1, lwd=3)
# =========================================================
# QUESTION 2e:
# Calculate the density profile at each station. Plot your
# results, being careful to label all axes.
# First load the equation of state function, and then use
# it to calculate the two density profiles.
source('EOS80.R')
# Some of you discovered another way of loading the function:
# Double-clicking the file opens it in RStudio as an R
# script, which you can then run. Both methods work. One way
# to be sure the function loaded is to look for water.density
# under "Functions" in your Global Environment in the upper
# right panel of RStudio.
#
# Remember that water.density() is a function, like plot().
# Just like a math function, for which one needs to provide
# numerical values for each variable in order to obtain the
# value of the function, we need to pass values to an R
# function in order to get anything out. water.density() has
# three variables: temp, sal, and pres. We need to supply a
# value (or a vector of values) for each of these variables
# in order to get an output.
#
# For convienence, let's define two new variables, one for
# each profile. What does the function water.density()
# produce?
dens1 = water.density(temp = dat$Prof1_Temp_degC,
sal = dat$Prof1_Sal_psu,
pres = dat$Depth_m)
dens2 = water.density(temp = dat$Prof2_Temp_degC,
sal = dat$Prof2_Sal_psu,
pres = dat$Depth_m)
# Make the plots, as above.
# Option 1: Individual plots
# Density vs. Depth
plot(x=dens1, y=dat$Depth_m,
xlim=range(c(dens1, dens2)),
ylim=rev(range(dat$Depth_m)),
type='l', lwd=3, col='blue',
main='Profile 1: Density vs. Depth',
xlab='Density (kg/m^3)', ylab='Depth (m)')
plot(x=dens2, y=dat$Depth_m,
xlim=range(c(dens1, dens2)),
ylim=rev(range(dat$Depth_m)),
type='l', lwd=3, col='blue',
main='Profile 2: Density vs. Depth',
xlab='Density (kg/m^3)', ylab='Depth (m)')
# Option 2: Both profiles on one plot
# Density vs. Depth
plot(x=dens1, y=dat$Depth_m,
xlim=range(c(dens1, dens2)),
ylim=rev(range(dat$Depth_m)),
type='l', lwd=3, col='blue',
main='Density vs. Depth',
xlab='Density (kg/m^3)', ylab='Depth (m)')
points(x=dens2, y=dat$Depth_m,
type='l', lwd=3, col='red')
legend('bottomleft', legend=c('Profile 1', 'Profile 2'),
col=c('blue', 'red'), lty=1, lwd=3)
# That's all there is to it!
#
# Take some time to play around with these samples and
# become comfortable with setting plot parameters, indexing
# data, and extracting information from the data. Remember,
# practice makes perfect!
# =========================================================
|
#Julian Ramirez-Villegas
#February 2012
#CIAT / CCAFS / UoL
stop("Do not run whole thing")
#libraries
library(maptools); library(rgdal); library(raster)
data(wrld_simpl)
#sourcing important functions
src.dir <- "D:/_tools/dapa-climate-change/trunk/EcoCrop/src"
#src.dir <- "/home/jramirez/dapa-climate-change/EcoCrop/src"
source(paste(src.dir,"/createMask.R",sep=""))
src.dir2<-"D:/_tools/dapa-climate-change/trunk/PhD/0007-crop-modelling/scripts"
#src.dir2 <- "/home/jramirez/dapa-climate-change/PhD/0007-crop-modelling/scripts"
source(paste(src.dir2,"/detrender-functions.R",sep=""))
#set the working folder
bDir <- "F:/PhD-work/crop-modelling/GLAM/climate-signals-yield"
#bDir <- "/andromeda_data1/jramirez/crop-modelling/GLAM/climate-signals-yield"
cropName <- "rice"
cd <- paste(bDir,"/",toupper(cropName),sep="")
#set DOS emulator folder
eDir <- "C:/Program Files (x86)/DOSBox-0.74"
dDir <- "F:/PhD-work/GLAM/detrending/smoothr"
#load shapefile and define characteristics
shp <- paste(cd,"/shp/IND2-",tolower(cropName),".shp",sep="")
shp <- readShapePoly(shp)
relField <- "DISID" #relational field
adm1Field <- "NAME_1" #field of adm1 names
adm2Field <- "NAME_2" #field of adm2 names
#read historic yield data
yieldData <- read.table(paste(cd,"/data/IND2-",cropName,".tab",sep=""),sep="\t",header=T)
#1. Detrend each of the districts data using lowess, linear or polynomial regression and do summaries
iyr <- 66; fyr <- 04
if (fyr <- iyr) {
tser <- (1900+iyr):(2000+fyr)
} else {
tser <- 1900+(iyr:fyr)
}
tser <- substr(tser,3,4)
#important fields
yfds <- paste("Y",tser,sep="") #yield
hfds <- paste("H",tser,sep="") #area harvested
pfds <- paste("T",tser,sep="") #total production
#detrend all districts
x <- detrendAll(yieldData,"DISID",yfds,iyr,fyr,cd,cropName)
# ###############################
# #list features
# nPol <- length(shp@polygons)
# for (p in 1:nPol) {
# cat("Pol", p, "\n")
# cname <- shp@data$COUNTRY[p]
# pol <- shp@polygons[p] #extract single polygon
# sh <- SpatialPolygons(pol) #create SP object from extracted feature
# rs <- createMask(sh, res) #create a raster from the SP object
# }
| /PhD/0007-crop-modelling/scripts/signals/detrender-rice.R | no_license | CIAT-DAPA/dapa-climate-change | R | false | false | 2,214 | r | #Julian Ramirez-Villegas
#February 2012
#CIAT / CCAFS / UoL
stop("Do not run whole thing")
#libraries
library(maptools); library(rgdal); library(raster)
data(wrld_simpl)
#sourcing important functions
src.dir <- "D:/_tools/dapa-climate-change/trunk/EcoCrop/src"
#src.dir <- "/home/jramirez/dapa-climate-change/EcoCrop/src"
source(paste(src.dir,"/createMask.R",sep=""))
src.dir2<-"D:/_tools/dapa-climate-change/trunk/PhD/0007-crop-modelling/scripts"
#src.dir2 <- "/home/jramirez/dapa-climate-change/PhD/0007-crop-modelling/scripts"
source(paste(src.dir2,"/detrender-functions.R",sep=""))
#set the working folder
bDir <- "F:/PhD-work/crop-modelling/GLAM/climate-signals-yield"
#bDir <- "/andromeda_data1/jramirez/crop-modelling/GLAM/climate-signals-yield"
cropName <- "rice"
cd <- paste(bDir,"/",toupper(cropName),sep="")
#set DOS emulator folder
eDir <- "C:/Program Files (x86)/DOSBox-0.74"
dDir <- "F:/PhD-work/GLAM/detrending/smoothr"
#load shapefile and define characteristics
shp <- paste(cd,"/shp/IND2-",tolower(cropName),".shp",sep="")
shp <- readShapePoly(shp)
relField <- "DISID" #relational field
adm1Field <- "NAME_1" #field of adm1 names
adm2Field <- "NAME_2" #field of adm2 names
#read historic yield data
yieldData <- read.table(paste(cd,"/data/IND2-",cropName,".tab",sep=""),sep="\t",header=T)
#1. Detrend each of the districts data using lowess, linear or polynomial regression and do summaries
iyr <- 66; fyr <- 04
if (fyr <- iyr) {
tser <- (1900+iyr):(2000+fyr)
} else {
tser <- 1900+(iyr:fyr)
}
tser <- substr(tser,3,4)
#important fields
yfds <- paste("Y",tser,sep="") #yield
hfds <- paste("H",tser,sep="") #area harvested
pfds <- paste("T",tser,sep="") #total production
#detrend all districts
x <- detrendAll(yieldData,"DISID",yfds,iyr,fyr,cd,cropName)
# ###############################
# #list features
# nPol <- length(shp@polygons)
# for (p in 1:nPol) {
# cat("Pol", p, "\n")
# cname <- shp@data$COUNTRY[p]
# pol <- shp@polygons[p] #extract single polygon
# sh <- SpatialPolygons(pol) #create SP object from extracted feature
# rs <- createMask(sh, res) #create a raster from the SP object
# }
|
library(UsingR)
### Name: maydow
### Title: Dow Jones industrial average and May maximum temperature
### Aliases: maydow
### Keywords: datasets
### ** Examples
data(maydow)
attach(maydow)
plot(max.temp,DJA)
plot(max.temp[-1],diff(DJA))
| /data/genthat_extracted_code/UsingR/examples/maydow.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 243 | r | library(UsingR)
### Name: maydow
### Title: Dow Jones industrial average and May maximum temperature
### Aliases: maydow
### Keywords: datasets
### ** Examples
data(maydow)
attach(maydow)
plot(max.temp,DJA)
plot(max.temp[-1],diff(DJA))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/predict.R
\name{predict.blm}
\alias{predict.blm}
\title{Predict}
\usage{
\method{predict}{blm}(object, newdata, beta, ...)
}
\arguments{
\item{object}{The Bayesian linear regression model}
\item{newdata}{New data set}
\item{beta}{Number}
\item{...}{Additional data, for example a data frame.}
}
\value{
fitted model
}
\description{
Makes predictions based on the fitted Bayesian linear model, using new data, without new data it gives predictions ont he data used to fit the model
}
| /man/predict.blm.Rd | no_license | laurenejames/blm | R | false | true | 565 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/predict.R
\name{predict.blm}
\alias{predict.blm}
\title{Predict}
\usage{
\method{predict}{blm}(object, newdata, beta, ...)
}
\arguments{
\item{object}{The Bayesian linear regression model}
\item{newdata}{New data set}
\item{beta}{Number}
\item{...}{Additional data, for example a data frame.}
}
\value{
fitted model
}
\description{
Makes predictions based on the fitted Bayesian linear model, using new data, without new data it gives predictions ont he data used to fit the model
}
|
IWLS_CorrZIP <-
function(Loadings,Correlations,corrModel,YList=NULL,
XList=NULL,ZZIndep=NULL,indepModel=NULL,SSIndep=NULL,BetaList=NULL,Vstart=NULL,OFFSETList=NULL,LinkList=c("Log"),DDRIndep=NULL,DRgammaIndep=NULL,
RespDist=c("Normal","Normal"),RandDistIndep=NULL,DDY=NULL,DYgamma=NULL,
FactDist=NULL,FF=NULL,SSF=NULL,CorrMat=NULL,ZZCorr=NULL,RandDistCorr=NULL,DDRCorr=NULL,DRCorrgamma=NULL,CustomVarMat=NULL,SSC=NULL,
EstimateOverDisp=c(FALSE,FALSE),LaplaceFixed=c(FALSE,FALSE),EstimateCorrelations=TRUE, EstimateVariances=TRUE,StandardErrors=TRUE,
Info=FALSE,DEBUG=FALSE,CONV=CONV,DRFgamma=NULL,APMethod="REML"){
# Compose all the designs first #
## if (!require(Matrix)) stop("Package matrix not installed")
# From design matrix into the two #
# Maximize TP and BN under independence #
# Estimate correlation #
# Funkcja do dodawania zer wedlug wierszy i column #
FillZeros<-function(a,nr,nc,up=FALSE,left=FALSE){
if (nr<nrow(a)) stop("number of specified rows is less than the matrix rows")
if (nc<ncol(a)) stop("number of specified columns is less than the matrix columns")
if (nr-nrow(a)>0) zeromatrow<-matrix(0,nr-nrow(a),ncol(a))
else zeromatrow<-NULL
if (nc-ncol(a)>0) zeromatcol<-matrix(0,nrow(a),nc-ncol(a))
else zeromatcol<-NULL
if (nr-nrow(a)>0 & nc-ncol(a)>0) zeromatdia<-matrix(0,nr-nrow(a),nc-ncol(a))
else zeromatdia<-NULL
if (!(up|left)) b<-rbind(cbind(a,zeromatcol),cbind(zeromatrow,zeromatdia))
if (up & !left) b<-rbind(cbind(zeromatrow,zeromatdia),cbind(a,zeromatcol))
if (left & !up) b<-rbind(cbind(zeromatcol,a),cbind(zeromatdia,zeromatrow))
if (up & left) b<-rbind(cbind(zeromatdia,zeromatrow),cbind(zeromatcol,a))
b
}
# Funkcja laczy po przekatnej #
dbind<-function(a,b){
out1<-cbind(a,matrix(0,nrow(a),ncol(b)))
out2<-cbind(matrix(0,nrow(b),ncol(a)),b)
out<-rbind(out1,out2)
out
}
# Creator of U the inverse of the link for V #
LinkR<-function(x,RandDist){
if (RandDist=="Normal") out<-x
if (RandDist=="Gamma") out<-exp(x)
if (RandDist=="IGamma") out<-(-1/x)
if (RandDist=="Beta") out<-exp(x)/(1+exp(x))
out
}
# Random effects W vector creator - this takes as an argument u vector#
WRVC<-function(x,RandDist){
if (RandDist=="Normal") out<-rep(1,length(x))
if (RandDist=="Gamma") out<-x
if (RandDist=="IGamma") out<-x^2
if (RandDist=="Beta") out<-x*(1-x)
out
}
# x- vscale y- uscale - computes deviances for the estimation of the lambda paramters #
DevRand<-function(x,y,RandDist){
if (RandDist=="Normal") out<-y^2
if (RandDist=="Gamma") out<-2*(y-x-1)
if (RandDist=="IGamma") out<-2*(log(y)-x-1)
if (RandDist=="Beta") out<--log(4*y*(1-y))
out
}
DWRDU<-function(x,RandDist){
if (RandDist=="Normal") out<-rep(0,length(x))
if (RandDist=="Gamma") out<-rep(1,length(x))
if (RandDist=="IGamma") out<-2*x
if (RandDist=="Beta") out<-1-2*x
out
}
D2WRDU2<-function(x,RandDist){
if (RandDist=="Normal") out<-rep(0,length(x))
if (RandDist=="Gamma") out<-rep(0,length(x))
if (RandDist=="IGamma") out<-rep(2,length(x))
if (RandDist=="Beta") out<-rep(2,length(x))
return(out)
}
# link of the main distribution part - choice between canonical inverse and logarithm #
LinkY<-function(mu,Link){
if (Link=="Inverse") eta<--(1/mu)
if (Link=="Log") eta<-log(mu)
if (Link=="Identity") eta<-mu
if (Link=="Logit") eta<-log(mu/(1-mu))
if (Link=="Probit") eta<-qnorm(mu)
if (Link=="CLogLog") eta<-log(-log(1-mu))
eta
}
# Inverse of the link #
InvLinkY<-function(eta,Link){
if (Link=="Inverse") mu<--(1/eta)
if (Link=="Log") mu<-exp(eta)
if (Link=="Identity") mu<-eta
if (Link=="Logit") mu<-exp(eta)/(1+exp(eta))
if (Link=="Probit") mu<-pnorm(eta)
if (Link=="CLogLog") mu<-1-exp(-exp(eta))
mu
}
# Generation of the weight matrix W# # This now has two arguments first one says what is the distribution assumed the second one what is the link #
# Also added parameter B for binomial distribution #
# WARNING !!!! _ currently only canonical links !!!!!!!!!!!!!!!! #
# These functions Wmatgen and dWdmugen should be ammended with B factor for the binomial distibution !!!!!!!!!!!!#
Wmatgen<-function(mu,B,Link,Dist){
if (Dist=="Normal") Vmat<-rep(1,length(mu))
if (Dist=="Poisson") Vmat<-mu
if (Dist=="Binomial") Vmat<-(B-mu)*(mu/B) # In binomial models mu=p*B therefore the transformation is used g(mu/B)=eta #
if (Dist=="Gamma") Vmat<-mu^2
if (Dist!="Binomial") B<-1 # This makes sure offset is not used here if distribution is different than binomial #
# Include B everywhere and set it to one for different then binomial distribution 3
#if (Link=="Inverse") Wvec<-(1/Vmat)
#if (Link=="Log") Wvec<-(1/Vmat)*(mu^2)
#if (Link=="Identity") Wvec<-(1/Vmat)*rep(1,length(mu))
#if (Link=="Logit") Wvec<-(1/Vmat)*
Wmat<-Vmat
Wmat
}
# Generation of bfuncv #
bfuncvgen<-function(Vvec,Dist){
if (Dist=="Normal") out<-((Vvec^2)/2)
if (Dist=="Gamma") out<-exp(Vvec)
if (Dist=="Beta") out<-log(1+exp(Vvec))
if (Dist=="IGamma") out<--log(-Vvec)
return(out)
}
# Still the problem with B in link and variance function for binomial seems not to be solved !!!!!!! #
dWdmugen<-function(mu,B,Link,Dist){
mu1<-mu/B
if (Dist=="Normal") {
Vmat<-rep(1,length(mu))
dVmatdmu<-rep(0,length(mu))
}
if (Dist=="Poisson") {
Vmat<-mu
dVmatdmu<-rep(1,length(mu))
}
if (Dist=="Binomial") {
Vmat<-(B-mu)*(mu/B)
dVmatdmu<-1-2*(mu/B)
}
if (Dist=="Gamma") {
Vmat<-mu^2
dVmatdmu<-2*mu
}
if (Dist!="Binomial") B<-1
if (Link=="Inverse") {
detadmu <- 1/(mu^2)
d2etadmu2 <- -2/(mu^3)
}
if (Link=="Log") {
detadmu<-1/mu
d2etadmu2<--1/(mu^2)
}
if (Link=="Identity") {
detadmu<-rep(1,length(mu))
d2etadmu2<-rep(0,length(mu))
}
if (Link=="Logit") {
detadmu<-1/(mu*(1-mu1))
d2etadmu2<--(1-2*mu1)/((mu*(1-mu1))^2)
}
dWdmu<--(1/Vmat^2)*dVmatdmu*((1/detadmu)^2)+2*(1/Vmat)*(1/detadmu)*(-1/detadmu^2)*d2etadmu2
dWdmu
}
d2Wdmu2gen<-function(mu,B,Link,Dist){
mu1<-mu/B
if (Dist=="Normal") {
Vmat<-rep(1,length(mu))
dVmatdmu<-rep(0,length(mu))
d2Vmatdmu2<-rep(0,length(mu))
}
if (Dist=="Poisson") {
Vmat<-mu
dVmatdmu<-rep(1,length(mu))
d2Vmatdmu2<-rep(0,length(mu))
}
if (Dist=="Binomial") {
Vmat<-(B-mu)*(mu/B)
dVmatdmu<-1-2*(mu/B)
d2Vmatdmu2<--2*(1/B)
}
if (Dist=="Gamma") {
Vmat<-mu^2
dVmatdmu<-2*mu
d2Vmatdmu2<-2
}
if (Dist!="Binomial") B<-1
if (Link=="Inverse") {
detadmu <- 1/(mu^2)
d2etadmu2 <- -2/(mu^3)
d3etadmu3 <- 6/(mu^4)
}
if (Link=="Log") {
detadmu<-1/mu
d2etadmu2<--1/(mu^2)
d3etadmu3<-2/(mu^3)
}
if (Link=="Identity") {
detadmu<-rep(1,length(mu))
d2etadmu2<-rep(0,length(mu))
d3etadmu3<-rep(0,length(mu))
}
if (Link=="Logit") {
detadmu<-1/(mu*(1-mu1))
d2etadmu2<--(1-2*mu1)/((mu*(1-mu1))^2)
d3etadmu3<-((2/B)*((mu*(1-mu1))^2)+2*(1-2*mu1)*mu*(1-2*mu1)*(1-mu1))/(mu*(1-mu1))^4
}
# Add d2Vmatdmu2 and d3etadmu3 to all the functions #
d2Wdmu2<-2*(1/Vmat^3)*(dVmatdmu^2)*((1/detadmu)^2)-(1/Vmat^2)*d2Vmatdmu2*((1/detadmu)^2)+2*(1/Vmat^2)*dVmatdmu*((1/detadmu)^3)*(d2etadmu2)-
2*(1/Vmat^2)*(dVmatdmu)*(1/detadmu)*(-1/detadmu^2)*d2etadmu2-2*(1/Vmat)*(1/detadmu^2)*d2etadmu2*(-1/detadmu^2)*d2etadmu2-
4*(1/Vmat)*(1/detadmu)*(-1/detadmu^3)*(d2etadmu2^2)+2*(1/Vmat)*(1/detadmu)*(-1/detadmu^2)*d3etadmu3
return(d2Wdmu2)
}
# Copy of a function for canonical links - direct computation much easier #
#d2Wdmu2gen<-function(mu,B,Link,Dist){
# if (Dist=="Normal") Vmat<-rep(0,length(mu))
# if (Dist=="Poisson") Vmat<-rep(0,length(mu))
# if (Dist=="Binomial") Vmat<--2/B # In binomial models mu=p*B therefore the transformation is used g(mu/B)=eta #
# if (Dist=="Gamma") Vmat<-rep(2,length(mu))
# if (Dist!="Binomial") B<-1 # This makes sure offset is not used here if distribution is different than binomial #
# Include B everywhere and set it to one for different then binomial distribution #
#if (Link=="Inverse") Wvec<-(1/Vmat)
#if (Link=="Log") Wvec<-(1/Vmat)*(mu^2)
#if (Link=="Identity") Wvec<-(1/Vmat)*rep(1,length(mu))
#if (Link=="Logit") Wvec<-(1/Vmat)*
# Wmat<-Vmat
# Wmat
#}
# Generation of the derivative dmudeta #
dmudetagen<-function(mu,B,Link,Dist){
if (Link=="Inverse") dmudeta<-mu^2
if (Link=="Log") dmudeta<-mu
if (Link=="Identity") dmudeta<-rep(1,length(mu))
if (Link=="Logit") dmudeta<-(B-mu)*(mu/B)
dmudeta
}
# Generation of the derivative dAdmu (y-mu)/Phi is outside#
#dAdmugen<-function(mu,Link){
# if (Link=="Inverse") dAdmu<-rep(0,length(mu))
# if (Link=="Log") dAdmu<-(1/mu^2)
# dAdmu
#}
# These functions are with fact index # - lets keep it for now
# These functins must be modified for large matricies
# These matricies must be reprogrammed e.g. nrand and nfact must be replaced by nrandcor and nrandind
SolverShort<-function(ISIGMAMvec,Z){
nr<-nrow(Z)
nc<-ncol(Z)
SigmaE<-ISIGMAMvec[1:nr]
SigmaR<-ISIGMAMvec[(nr+1):(nr+nc)] # This wont be a diagonal anymore in the correlated random effects models
tempmat<-t(Z*SigmaE)%*%Z+diag(SigmaR)
Inverse<-solve(tempmat)
rm(tempmat)
PP2<-cbind(Z%*%Inverse%*%t(Z),Z%*%Inverse)
PP2<-rbind(PP2,cbind(Inverse%*%t(Z),Inverse))
PP2<-t(t(PP2)*ISIGMAMvec)
DiagPP2<-diag(PP2)
rm(PP2)
list(Inverse=Inverse,DiagPP2=DiagPP2)
}
SolverLong<-function(ISIGMAMvec,zTot){
SigmaE<-ISIGMAMvec[1:ntot]
SigmaR<-ISIGMAMvec[(ntot+1):(ntot+qcum[nrandcor+nrandind+1])]
if (!exists("INV1")) {INV1<-SolverShort(ISIGMAMvec,Z)$Inverse}
INV1<-as.matrix(INV1)
AA<-as.matrix(t(X*SigmaE)%*%X)
BB<-as.matrix(t(X*SigmaE)%*%Z)
CC<-as.matrix(t(Z*SigmaE)%*%X)
AA1<-as.matrix(solve(AA-BB%*%INV1%*%CC))
BB1<--AA1%*%BB%*%INV1
CC1<--INV1%*%CC%*%AA1
DD1<-INV1+INV1%*%CC%*%AA1%*%BB%*%INV1
Inverse<-rbind(cbind(AA1,BB1),cbind(CC1,DD1))
DPMAT<-rep(0,ntot+qcum[nrandcor+nrandind+1])
# If n is large do the iteration over the row index #
DPMAT[1:ntot]<-diag(X%*%AA1%*%t(X*SigmaE)+Z%*%(CC1)%*%t(X*SigmaE)+X%*%(BB1)%*%t(Z*SigmaE)+Z%*%(DD1)%*%t(Z*SigmaE))
# For the random part #
DPMAT[(ntot+1):length(DPMAT)]<-diag(DD1)*SigmaR
rm(AA);rm(BB);rm(CC);rm(INV1)
tempmat<-rbind(cbind(X,as.matrix(Z)),cbind(matrix(0,qcum[nrandcor+nrandind+1],ptot),diag(qcum[nrandcor+nrandind+1])))
HELP1<-Inverse%*%t(tempmat*ISIGMAMvec)
NewParms<-HELP1%*%zTot
rm(Inverse);rm(HELP1);rm(tempmat)
rm(AA1);rm(BB1);rm(CC1);rm(DD1)
list(NewParms=NewParms,DiagPMAT=DPMAT)
}
nearPD<-function (M, eig.tol = 1e-06, conv.tol = 1e-07, posd.tol = 1e-08,
maxits = 100)
{
if (!(is.numeric(M) && is.matrix(M) && identical(M, t(M))))
stop("Input matrix M must be square and symmetric.\n")
inorm <- function(x) max(rowSums(abs(x)))
n <- ncol(M)
U <- matrix(0, n, n)
X <- M
iter <- 0
converged <- FALSE
while (iter < maxits && !converged) {
Y <- X
T <- Y - U
e <- eigen(Y, symmetric = TRUE)
Q <- e$vectors
d <- e$values
D <- if (length(d) > 1)
diag(d)
else as.matrix(d)
p <- (d > eig.tol * d[1])
QQ <- Q[, p, drop = FALSE]
X <- QQ %*% D[p, p, drop = FALSE] %*% t(QQ)
U <- X - T
X <- (X + t(X))/2
conv <- inorm(Y - X)/inorm(Y)
iter <- iter + 1
converged <- conv <= conv.tol
}
X <- (X + t(X))/2
e <- eigen(X, symmetric = TRUE)
d <- e$values
Eps <- posd.tol * abs(d[1])
if (d[n] < Eps) {
d[d < Eps] <- Eps
Q <- e$vectors
o.diag <- diag(X)
X <- Q %*% (d * t(Q))
D <- sqrt(pmax(Eps, o.diag)/diag(X))
X[] <- D * X * rep(D, each = n)
}
(X + t(X))/2
}
# First derivative of the SigmaA matrix #
# invSigmaMat and dSigmadlambda is a matrix #
dDDdranmat <- function(X,Z,dWdmu,Wvec,dvhatdlambda,invSigmaMat,dSigmadlambda,WR,dWRdu){
uprow <- dWdmu*Wvec*as.vector(Z%*%dvhatdlambda)
downrow <- -invSigmaMat%*%dSigmadlambda%*%(invSigmaMat*WR)+(invSigmaMat*dWRdu*WR*dvhatdlambda)
uprow1 <- t(X*uprow)%*%X
uprow2 <- t(X*uprow)%*%Z
dorow1 <- t(Z*uprow)%*%X
dorow2 <- t(Z*uprow)%*%Z+downrow
out <- rbind(cbind(uprow1,uprow2),cbind(dorow1,dorow2))
return(out)
}
# Second derivative of the SigmaA matrix with respect to random effects parameters #
d2DDdranmat2 <- function(X,Z,d2Wdmu2,dWdmu,Wvec,dvhatdlambda1,dvhatdlambda2,d2vhatdlambda12,invSigmaMat,dSigmadlambda1,dSigmadlambda2,d2Sigmadlambda12,WR,dWRdu,d2WRdu2){
uprow <- d2Wdmu2*Wvec*as.vector(Z%*%dvhatdlambda1)*as.vector(Z%*%dvhatdlambda2)+dWdmu*dWdmu*as.vector(Z%*%dvhatdlambda1)*as.vector(Z%*%dvhatdlambda2)+
dWdmu*Wvec*as.vector(Z%*%d2vhatdlambda12)
downrow <- invSigmaMat%*%dSigmadlambda1%*%invSigmaMat%*%dSigmadlambda2%*%(invSigmaMat*WR)+invSigmaMat%*%dSigmadlambda2%*%invSigmaMat%*%dSigmadlambda1%*%(invSigmaMat*WR)-
invSigmaMat%*%d2Sigmadlambda12%*%(invSigmaMat*WR)-invSigmaMat%*%dSigmadlambda1%*%(invSigmaMat*dWRdu*WR*dvhatdlambda2)-
invSigmaMat%*%dSigmadlambda2%*%(invSigmaMat*dWRdu*WR*dvhatdlambda1)+(invSigmaMat*d2WRdu2*WR*dvhatdlambda1*WR*dvhatdlambda2)+
(invSigmaMat*dWRdu*dWRdu*WR*dvhatdlambda1*dvhatdlambda2)+(invSigmaMat*dWRdu*WR*d2vhatdlambda12)
uprow1 <- t(X*uprow)%*%X
uprow2 <- t(X*uprow)%*%Z
dorow1 <- t(Z*uprow)%*%X
dorow2 <- t(Z*uprow)%*%Z+downrow
out <- rbind(cbind(uprow1,uprow2),cbind(dorow1,dorow2))
return(out)
}
dvhatdranmat <- function(invTT2,invSigmaMat,dSigmadlambda,Psi,Uvec){
out <- -invTT2%*%(invSigmaMat%*%(dSigmadlambda%*%(invSigmaMat%*%(Psi-Uvec))))
return(out)
}
# Second derivative of vhat with respect to random effects varaince components parameters #
d2vhatdranmat2 <- function(invTT2,Z,Phi,dWdmu,Wvec,dvhatdlambda1,dvhatdlambda2,invSigmaMat,dWRdu,WR,dSigmadlambda1,dSigmadlambda2,Psi,Uvec,d2Sigmadlambda12){
out1 <- (t(Z*as.vector(1/Phi)*dWdmu*Wvec*as.vector(Z%*%dvhatdlambda1))%*%as.vector(Z%*%dvhatdlambda2)) + invSigmaMat%*%(dWRdu*WR*dvhatdlambda1*dvhatdlambda2) -
invSigmaMat%*%(dSigmadlambda1%*%(invSigmaMat%*%as.vector(WR*dvhatdlambda2))) - invSigmaMat%*%(dSigmadlambda2%*%(invSigmaMat%*%as.vector(WR*dvhatdlambda1))) -
invSigmaMat%*%(dSigmadlambda1%*%(invSigmaMat%*%(dSigmadlambda2%*%(invSigmaMat%*%(Psi-Uvec))))) - invSigmaMat%*%(dSigmadlambda2%*%(invSigmaMat%*%(dSigmadlambda1%*%(invSigmaMat%*%(Psi-Uvec))))) +
invSigmaMat%*%(d2Sigmadlambda12%*%(invSigmaMat%*%(Psi-Uvec)))
out <- -invTT2%*%out1
return(out)
}
# First derivative of h with respect to random effects variance components #
# This function need to be ammended for different random effects using the c(psi,lambda) terms #
dhdranmatInd <- function(Z,y,mu,Phi,dvhatdlambda,invSigmaMat,Psi,Uvec,Vvec,bfuncv,dSigmadlambda,randist){
out1 <- dvhatdlambda%*%(t(Z*as.vector(1/Phi))%*%(y-mu)+invSigmaMat%*%(Psi-Uvec))+(Psi*Vvec-bfuncv)%*%(invSigmaMat^2)%*%rep(1,length(Vvec))
# depending on distribution a residual needs to added form c(psi,lambda) #
return(out1)
}
# Temporary first derivative #
dhdranmatCorr <- function(Z,y,mu,Phi,dvhatdlambda,invSigmaMat,Psi,Uvec,Vvec,bfuncv,dSigmadlambda){
out1 <- dvhatdlambda%*%(t(Z*as.vector(1/Phi))%*%(y-mu)+invSigmaMat%*%(Psi-Uvec))+0.5*Vvec%*%invSigmaMat%*%dSigmadlambda%*%invSigmaMat%*%Vvec-
0.5*sum(diag(invSigmaMat%*%dSigmadlambda))
return(out1)
}
# Second derivative of h with respect to random effects variance components #
d2hdranmatCorrCorr <- function(Z,y,mu,Phi,d2vhatdlambda12,dvhatdlambda1,dvhatdlambda2,Wvec,invSigmaMat,dSigmadlambda1,dSigmadlambda2,d2Sigmadlambda12,Psi,Uvec,Vvec,bfuncv,WR){
out <- d2vhatdlambda12%*%t(Z*as.vector(1/Phi))%*%(y-mu)-dvhatdlambda1%*%t(Z*as.vector(Wvec/Phi))%*%Z%*%dvhatdlambda2 +
d2vhatdlambda12%*%invSigmaMat%*%(Psi-Uvec)-dvhatdlambda1%*%invSigmaMat%*%dSigmadlambda2%*%invSigmaMat%*%(Psi-Uvec)-dvhatdlambda2%*%invSigmaMat%*%dSigmadlambda1%*%invSigmaMat%*%(Psi-Uvec)-
dvhatdlambda1%*%(invSigmaMat)%*%(WR*dvhatdlambda2)+
(sqrt(abs(Psi*Vvec-bfuncv))*sign(Psi*Vvec-bfuncv))%*%(invSigmaMat%*%dSigmadlambda1%*%invSigmaMat%*%dSigmadlambda2%*%invSigmaMat+invSigmaMat%*%dSigmadlambda2%*%invSigmaMat%*%dSigmadlambda1%*%invSigmaMat-
invSigmaMat%*%d2Sigmadlambda12%*%invSigmaMat)%*%(sqrt(abs(Psi*Vvec-bfuncv))*sign(Psi*Vvec-bfuncv))-
0.5*sum(diag(invSigmaMat%*%d2Sigmadlambda12))+0.5*sum(diag(invSigmaMat%*%dSigmadlambda1%*%invSigmaMat%*%dSigmadlambda2))
return(out)
}
#################################################################################
# We need to include a profile likelihood function for correlations p_v_beta(h) #
#################################################################################
#####
##### Maybe this function needs to update the random effects estimates #
#####
AdjProfCorrelations<-function(ZF){
# This function utilized lexical scooping for the higher level parameters #
Correls<-list(0)
ZF<-list(ZF)
for (i in 1:length(ZF)){
Correls[[i]]<-(1-exp(2*ZF[[i]]))/(1+exp(2*ZF[[i]]))
# Create the design matrix - SigmaTot ## do we need to use Cholesky step here - alternatively we could use the multivariate normal distribution #
}
# Unfold CorrMat #
TempCorrMat<-list(0)
for (i in 1:length(CorrMat)){
TempCorrMat[[i]]<-CorrMat[[i]]
for (j in 1:length(Correls[[i]])){
TempCorrMat[[i]][CorrMat[[i]]==j]<-Correls[[i]][j]
}
diag(TempCorrMat[[i]])<-1
CorrMatOut[[i]]<-TempCorrMat[[i]]
}
LambdaCorr<-exp(DDRCorr%*%DRCorrgamma)
# now create correlation matrix for all the random effects #
for (i in 1:length(CorrMat)){
LambdaLocal<-rep(0,qcorr[i])
for (j in 1:qcorr[i]){
LambdaLocal[j]<-LambdaCorr[cumindCorrIndex[cumqcorr[i]+j]+1]
}
SigmaMat[[i]]<-sqrt(LambdaLocal)*t(CorrMatOut[[i]]*sqrt(LambdaLocal))
}
# merging the matrices by diagonals #
for (i in 1:length(CorrMat)){
if (i==1) {
SigmaTot<-SigmaMat[[1]]%x%diag(lcorr[1])
invSigmaTot<-solve(SigmaMat[[1]])%x%diag(lcorr[1])
}
else {
SigmaTot<-dbind(SigmaTot,SigmaMat[[i]]%x%diag(lcorr[i]))
invSigmaTot<-dbind(invSigmaTot,solve(SigmaMat[[i]])%x%diag(lcorr[i]))
}
}
#############################################
##### Add here estimation of V and Beta #####
#############################################
invSigmaTotIn<-invSigmaTot
if (nrandind>0) invSigmaTotIn<-dbind(invSigmaTotIn,diag(ISIGMAMvec[(qcum[cumqcorr[length(CorrMat)+1]+1]+1):qcum[length(qcum)]]))
convIn<-10
INVTEMP<-solve(t(ZOriginal)%*%(ZOriginal*as.vector(Wvec/Phi))+invSigmaTotIn)%*%t(ZOriginal*as.vector(Wvec/Phi))
VTCorrTotIn<-VTCorrTot
while (convIn>0.01) {
OldVTCorrTotIn<-VTCorrTotIn
VTCorrTotIn<-solve(t(ZOriginal)%*%(ZOriginal*as.vector(Wvec/Phi))+invSigmaTotIn)%*%t(ZOriginal*as.vector(Wvec/Phi))%*%(zmain-X%*%Beta)
convIn<-sum(abs(OldVTCorrTotIn-VTCorrTotIn))
}
VTCorrTot<-VTCorrTotIn
# At this stage we have new SigmaTot #
# All what has to be computed here is the multivariate normal distributions of random effects where the correlations occur #
# and also the logarithm of the determinant where also the correlations are present , the other factors are independent of rho #
# at least in this function because we do not interate over the rho again to reestimate beta vs drgamma dygamma... #
## require(mvtnorm)
hlikelihood<-0
eta<-TTOriginal[1:ntot,]%*%as.matrix(c(Beta,VTCorrTot))
for (i in 1:nModels){
mu[(cModelsDims[i]+1):cModelsDims[i+1]]<-B[(cModelsDims[i]+1):cModelsDims[i+1]]*InvLinkY(eta[(cModelsDims[i]+1):cModelsDims[i+1]],LinkList[[i]])
if (RespDist[i]=="Normal") hlikelihood<-hlikelihood+sum(dnorm(YList[[i]],mu[(cModelsDims[i]+1):cModelsDims[i+1]],sd=sqrt(Phi[(cModelsDims[i]+1):cModelsDims[i+1]]),log=TRUE))
if (RespDist[i]=="Poisson") {
## hlikelihood<-hlikelihood+sum(dpois(YList[[i]],mu[(cModelsDims[i]+1):cModelsDims[i+1]],log=TRUE)/Phi[(cModelsDims[i]+1):cModelsDims[i+1]],log=TRUE)
temp<-sum((-mu[(cModelsDims[i]+1):cModelsDims[i+1]]+YList[[i]]*log(mu[(cModelsDims[i]+1):cModelsDims[i+1]])-lgamma(YList[[i]]+1))/Phi[(cModelsDims[i]+1):cModelsDims[i+1]])
hlikelihood<-hlikelihood+temp
}
if (RespDist[i]=="Binomial") hlikelihood<-hlikelihood+sum(dbinom(YList[[i]],B[(cModelsDims[i]+1):cModelsDims[i+1]],(mu[(cModelsDims[i]+1):cModelsDims[i+1]]/B[(cModelsDims[i]+1):cModelsDims[i+1]]),log=TRUE))
if (RespDist[i]=="Gamma") hlikelihood<-hlikelihood+sum(dgamma(YList[[i]],shape=(1/Phi[(cModelsDims[i]+1):cModelsDims[i+1]]),scale=(mu[(cModelsDims[i]+1):cModelsDims[i+1]]*Phi[(cModelsDims[i]+1):cModelsDims[i+1]])))
}
hlikelihood1<-hlikelihood
for (i in 1:length(CorrMat)){
VTemp<-unlist(VTCorrTot)[(qcum[cumqcorr[i]+1]+1):qcum[cumqcorr[i+1]+1]] # Extract empirical bayes corresponding to the correlated effects of CorrMat[[i]]
noraneff<-cumqcorr[i+1]-cumqcorr[i]
VTemp<-matrix(VTemp,length(VTemp)/noraneff,noraneff)
hlikelihood<-hlikelihood+sum(dmvnorm(VTemp,mean=rep(0,noraneff),sigma=SigmaMat[[i]],log=TRUE))
}
if (nrandind>0) {
for (i in 1:nrandind) {
if (RandDistIndep[i]=="Normal") hlikelihood<-hlikelihood+sum(dnorm(VTCorrTot[(qcum[cumqcorr[length(CorrMat)+1]]+i):qcum[cumqcorr[length(CorrMat)+1]]+i+1],log=TRUE))
}
}
# REMARK: There was a problem with invSigmaTot - it was set to independent unit matrix which is not true #
for (i in 1:length(CorrMat)){
if (i==1) {
SigmaTot<-SigmaMat[[1]]%x%diag(lcorr[1])
invSigmaTot<-solve(SigmaMat[[1]])%x%diag(lcorr[1])
}
else {
SigmaTot<-dbind(SigmaTot,SigmaMat[[i]]%x%diag(lcorr[i]))
invSigmaTot<-dbind(invSigmaTot,solve(SigmaMat[[i]])%x%diag(lcorr[i]))
}
}
hlikelihood2<-hlikelihood
MIDMAT<-dbind(diag(as.vector(Wvec/Phi)),invSigmaTot)
if ((qcum[cumqcorr[length(CorrMat)+1]+1]+1)-qcum[length(qcum)]<0) MIDMAT<-dbind(MIDMAT,diag(ISIGMAMvec[(qcum[cumqcorr[length(CorrMat)+1]+1]+1):qcum[length(qcum)]]))
DD<-t(TTOriginal)%*%MIDMAT%*%TTOriginal
TTOriginal3<-rbind(ZOriginal,diag(ncol(ZOriginal)))
DD1<-t(TTOriginal3)%*%MIDMAT%*%TTOriginal3
hlikelihood3<-hlikelihood-0.5*determinant((DD1/(2*pi)),logarithm=TRUE)$modulus
hlikelihood4<-hlikelihood-0.5*determinant((DD/(2*pi)),logarithm=TRUE)$modulus
AdjProfLike<--hlikelihood4
MIDMAT1<-dbind(diag(as.vector(Wvec/Phi)),0*invSigmaTot)
BB<-t(TTOriginal)%*%MIDMAT1%*%TTOriginal
pd<- sum(diag(solve(DD) %*% BB))
caic<--2*hlikelihood1+2*pd
res<-list(AdjProfLike,hlikelihood2,hlikelihood3,caic)
return(res)
}
##################################################################################
# Create the design system of all #
# Check if there are correlated random effects #
# if (is.null(CorrMat)) EstimCorrelation<-FALSE - this condition is not really necessary
# Therefore we dont need a newton-raphson to estimate correlations if corrmat does not exist #
# YBN<-(Y==0)
# YTP<-Y
# YTP[Y==0]<-NA
# nBN<-nrow(YBN)
# nTP<-nrow(YTP)
# pBN<-ncol(XBN)
# pTP<-ncol(XTP)
# This is a total X matrix #
# First model goes Binomial, second Truncated Poisson - or extensions to Poisson etc #
# First also we model the correlated random effects
# XTT<-dbind(XBN,XTP)
# nrandTP<-length(ZZTP) # number of truncate poisson components
# nrandBN<-length(ZZBN) # number of binomial
# nrandCR<-length(ZZCorr) # number of correlated components
# nrandTT<-nrandTP+nrandBN
nModels<-length(YList)
ModelsDims<-sapply(YList,nrow)
cModelsDims<-cumsum(c(0,ModelsDims))
RandDist<-c(RandDistCorr,RandDistIndep) # This specified the distribution of all random effects
ntot<-sum(ModelsDims)
qcorrels<-sapply(Correlations,length)
cumqcorrels<-cumsum(c(0,qcorrels))
# Design for the matrices of random effects ##
if (!is.null(DDRCorr) & !is.null(DDRIndep)) DDR<-dbind(DDRCorr,DDRIndep)
if (is.null(DDRCorr) & !is.null(DDRIndep)) DDR<-DDRIndep
if (!is.null(DDRCorr) & is.null(DDRIndep)) DDR<-DDRCorr
if (is.null(DDRCorr) & is.null(DDRIndep)) stop("You did not specify any design matrix for random effects!")
# Create the design matrices of X covariates and Y #
for (i in 1:length(YList)){
if (i==1) {
Y<-YList[[1]]
X<-XList[[1]]
}
else {
Y<-rbind(Y,YList[[i]])
X<-dbind(X,XList[[i]])
}
}
# Index of the covariates over all the models #
p<-sapply(XList,ncol)
ptot<-sum(p)
pcum<-cumsum(c(0,p))
# Create the matrix of covariances - first multiply correlations by standard deviations to create variance covariance matrix#
if(!is.null(CorrMat)) {
# Unfold CorrMat #
TempCorrMat<-list(0)
CorrMatOut<-list(0)
for (i in 1:length(CorrMat)){
TempCorrMat[[i]]<-CorrMat[[i]]
for (j in 1:length(Correlations[[i]])){
TempCorrMat[[i]][CorrMat[[i]]==j]<-Correlations[[i]][j]
}
diag(TempCorrMat[[i]])<-1
CorrMatOut[[i]]<-TempCorrMat[[i]]
}
LambdaCorr<-exp(DDRCorr%*%DRCorrgamma)
# now create correlation matrix for all the random effects #
qcorr<-rep(0,length(CorrMat))
lcorr<-rep(0,length(CorrMat)) # in this vector we store
for (i in 1:length(CorrMat)){
if (i==1) {
qcorr[1]<-nrow(CorrMat[[1]])
}
else qcorr[i]<-nrow(CorrMat[[i]])
cumqcorr<-cumsum(c(0,qcorr))
lcorr[i]<-ncol(ZZCorr[[cumqcorr[i]+1]])
}
for (i in 1:length(CorrMat)){
tempindex<-as.numeric(names(table(corrModel[(cumqcorr[i]+1):cumqcorr[i+1]])))
if (sum(LaplaceFixed[tempindex]==FALSE)!=length(LaplaceFixed[tempindex]) & sum(LaplaceFixed[tempindex]==TRUE)!=length(LaplaceFixed[tempindex]))
stop("You choose for some correlated effect LAPFIX=TRUE while others LAPFIX=FALSE this is not permitted!")
}
# create index of individual random effects #
indCorrIndex<-rep(0,length(ZZCorr))
for (i in 1:length(ZZCorr)){
indCorrIndex[i]<-ncol(ZZCorr[[i]])
}
cumindCorrIndex<-cumsum(c(0,indCorrIndex))
SigmaMat<-list(0)
for (i in 1:length(CorrMat)){
LambdaLocal<-rep(0,qcorr[i])
for (j in 1:qcorr[i]){
LambdaLocal[j]<-LambdaCorr[cumindCorrIndex[cumqcorr[i]+j]+1]
}
SigmaMat[[i]]<-sqrt(LambdaLocal)*t(CorrMatOut[[i]]*sqrt(LambdaLocal))
}
# merging the matrices by diagonals #
for (i in 1:length(CorrMat)){
if (i==1) {
SigmaTot<-SigmaMat[[1]]%x%diag(lcorr[1])
invSigmaTot<-solve(SigmaMat[[1]])%x%diag(lcorr[1])
}
else {
SigmaTot<-dbind(SigmaTot,SigmaMat[[i]]%x%diag(lcorr[i]))
invSigmaTot<-dbind(invSigmaTot,solve(SigmaMat[[i]])%x%diag(lcorr[i]))
}
}
# Matrix SigmaTot is the resulting matrix #
# We have to make the random effects indendent via Cholesky decomposition #
# We need a kroneker product of cholesky matrix times the SigmaTot #
# You can do the cholesky on the total sigma matrix - the problem is the dimension is greater so maybe we loose computational efficiency #
# DO cholesky on SigmaMat list #
# We have to make functions to convert the ZZCorr into vectoral design for correlated random effects and back to the diagnoal design according to subject#
# This will make the computationally more efficient things #
ZZCorrVec<-list(0)
for (i in 1:length(ZZCorr)){
ZZCorrVec[[i]]<-ZZCorr[[i]]%*%rep(1,ncol(ZZCorr[[i]]))
}
# Determine how many models we have linked by correlation #
# Now we modify the design matrix via cholesky decompositions #
# All these steps need to be reprogramed using matrix package although in the final product there might be not so many zeros sometime #
ZZCorrUpd<-list(0)
DiagDesign<-list(0)
CholeskyMatrices<-list(0)
ZZShort<-list(0)
for (i in 1:length(CorrMat)){
itchol<-t(chol(SigmaMat[[i]])) # This is actually cholesky decomposition instead of inverse, there was before inverse which was wrong
CholeskyMatrices[[i]]<-itchol
currentindex<-seq(cumqcorr[i]+1,cumqcorr[i+1])
lengthcorrModelCur<-length(RespDist)
valuescorrModelCur<-as.numeric(names(table(corrModel)))
ZZCorrTemp<-rep(list(0),lengthcorrModelCur)
for (j in 1:lengthcorrModelCur){
for (k in currentindex) {
if (ZZCorrTemp[[j]][1]==0 & length(ZZCorrTemp[[j]])==1){
if (corrModel[k]==valuescorrModelCur[j]) ZZCorrTemp[[j]]<-ZZCorrVec[[k]] # If the observation belongs to this model than make it #
else ZZCorrTemp[[j]]<-rep(0,ModelsDims[j])
}
else {
if (corrModel[k]==valuescorrModelCur[j]) ZZCorrTemp[[j]]<-cbind(ZZCorrTemp[[j]],ZZCorrVec[[k]])
else ZZCorrTemp[[j]]<-cbind(ZZCorrTemp[[j]],rep(0,ModelsDims[j]))
}
}
}
# Binding it all together #
for (j in 1:length(ZZCorrTemp)){
if (j==1) {
ZZCorrTempTot<-ZZCorrTemp[[j]]
nrowtot<-nrow(ZZCorrTemp[[j]])
}
else {
ZZCorrTempTot<-rbind(ZZCorrTempTot,ZZCorrTemp[[j]])
nrowtot<-c(nrowtot,nrow(ZZCorrTemp[[j]]))
}
}
cnrowtot<-cumsum(c(0,nrowtot))
# Now we use cholesky transform on the design matrix #
ZZCorrTempTotUpd<-ZZCorrTempTot%*%itchol
ZZShort[[i]]<-ZZCorrTempTot
# ZZCorrTempTotUpd is the new design matrix for the joint model from the correlated part #
# This design matrix is in the short form (vector form) - we need to expand it to the diagnoal form #
# Now we need to take into accout to which model we should link the SSC #
# Expansion to diagnoal here we need matrix package already #
for (j in currentindex){
DiagDesign[[j]]<-model.matrix(~SSC[[j]]-1)
DiagDesign[[j]]<-DiagDesign[[j]]*ZZCorrTempTotUpd[,(1+j-currentindex[1])]
if (j==currentindex[1]) ZZCorrTotUpd<-DiagDesign[[j]]
else ZZCorrTotUpd<-cbind(ZZCorrTotUpd,DiagDesign[[j]])
#DiagDesign[[j]]<-model.matrix(~SSC[[j]]-1)
}
ZZCorrUpd[[i]]<-ZZCorrTotUpd
if (i==1) ZZCorrDesign<-ZZCorrUpd[[1]]
else ZZCorrDesign<-cbind(ZZCorrDesign,ZZCorrUpd[[i]])
}
q<-sapply(DiagDesign,ncol)
}
# DiagDesign contains individual design matrix for each random effects - which are now independent !!!!!!!!!!!!! #
# The models must be fitted jointly if there are correlations between random effects between different models #
# !!!!! It works !!!!! #
# Now we need to add independent design matricies and then create the vector of random effect corresponding to the design matrices #
# Independent random effects are in ZZIndep indexed by
# Handle independent random effects here #
if (exists("ZZIndep")) nrandind<-length(ZZIndep)
else nrandind<-0
if (exists("ZZCorr")) nrandcor<-length(ZZCorr)
else nrandcor<-0
Beta<-unlist(BetaList)
if (nrandind>0) {
ZZIndepTemp<-list(0)
for (i in 1:nrandind){
if (indepModel[i]==length(ModelsDims)) rowsdown<-0
else rowsdown<-sum(ModelsDims[(indepModel[i]+1):length(ModelsDims)][!is.na(ModelsDims[(indepModel[i]+1):length(ModelsDims)])])
if (indepModel[i]==1) rowsup<-0
else rowsup<-sum(ModelsDims[1:(indepModel[i]-1)])
# First expand up if possible #
ZZIndepTemp[[i]]<-FillZeros(ZZIndep[[i]],nr=(nrow(ZZIndep[[i]])+rowsup),nc=ncol(ZZIndep[[i]]),up=TRUE)
ZZIndepTemp[[i]]<-FillZeros(ZZIndepTemp[[i]],nr=(nrow(ZZIndepTemp[[i]])+rowsdown),nc=ncol(ZZIndepTemp[[i]]))
if (!exists("ZZCorrDesign") & i==1) ZZDesign<-ZZIndepTemp[[i]]
if (exists("ZZCorrDesign") & i==1) ZZDesign<-cbind(ZZCorrDesign,ZZIndepTemp[[1]])
if (i>1) ZZDesign<-cbind(ZZDesign,ZZIndepTemp[[i]])
if (i==1) ZZIndepDesign<-ZZIndepTemp[[1]]
if (i>1) ZZIndepDesign<-cbind(ZZIndepDesign,ZZIndepTemp[[i]])
}
}
else if(nrandcor>0) ZZDesign<-ZZCorrDesign
if (nrandind>0) {
if (!is.null(CorrMat)) q<-c(q,sapply(ZZIndepTemp,ncol))
else q<-sapply(ZZIndepTemp,ncol)
}
# Create ZOriginal and TTOriginal #
# ZOriginal is not well defined here !!!!!! - it must be corrected #
ZOriginal<-NULL
if (nrandcor>0) {
for (i in 1:nrandcor) {# These also dont need to be from different models #
if (corrModel[i]==length(ModelsDims)) rowsdown<-0
else rowsdown<-sum(ModelsDims[(corrModel[i]+1):length(ModelsDims)][!is.na(ModelsDims[(corrModel[i]+1):length(ModelsDims)])])
if (corrModel[i]==1) rowsup<-0
else rowsup<-sum(ModelsDims[1:(corrModel[i]-1)])
ZZCorrTemp<-FillZeros(ZZCorr[[i]],nr=(nrow(ZZCorr[[i]])+rowsup),nc=ncol(ZZCorr[[i]]),up=TRUE)
ZZCorrTemp<-FillZeros(ZZCorrTemp,nr=(nrow(ZZCorrTemp)+rowsdown),nc=ncol(ZZCorrTemp))
if (i==1) ZOriginal<-ZZCorrTemp
else {
ZOriginal<-cbind(ZOriginal,ZZCorrTemp)
}
}
}
if (nrandind>0) {
if (i==1 & is.null(ZOriginal)) ZOriginal<-ZZIndepDesign
else ZOriginal<-cbind(ZOriginal,ZZIndepDesign)
}
TTOriginal1<-cbind(X,ZOriginal)
TTOriginal2<-cbind(matrix(0,ncol(ZOriginal),ptot),diag(ncol(ZOriginal)))
TTOriginal<-rbind(TTOriginal1,TTOriginal2)
###################
qcum<-cumsum(c(0,q))
qtot<-qcum[length(qcum)]
if (is.null(Vstart)) Vstart<-rep(0,sum(q))
# index of correlated random effects
V<-list(0)
U<-list(0)
## print(nrow(ZZDesign))
## print(ncol(ZZDesign))
## print(nrow(X))
TT<-cbind(X,ZZDesign)
PsiM<-rep(0,sum(q))
if (nrandcor>0){
for (i in 1:nrandcor){
TT<-rbind(TT,cbind(matrix(0,q[i],ptot+qcum[i]),diag(q[i]),matrix(0,q[i],qcum[nrandcor+nrandind+1]-qcum[i+1])))
V[[i]]<-as.matrix(Vstart[(qcum[i]+1):(qcum[i+1])])
if (i==1) VT<-V[[1]]
else VT<-c(VT,list(V[[i]]))
if (RandDistCorr[i]=="Normal") PsiM[(qcum[i]+1):qcum[i+1]]<-0
if (RandDistCorr[i]=="Gamma") PsiM[(qcum[i]+1):qcum[i+1]]<-1
if (RandDistCorr[i]=="IGamma") PsiM[(qcum[i]+1):qcum[i+1]]<-1
if (RandDistCorr[i]=="Beta") PsiM[(qcum[i]+1):qcum[i+1]]<-0.5
}
}
if (nrandind>0){
for (i in 1:nrandind){
TT<-rbind(TT,cbind(matrix(0,q[i+nrandcor],ptot+qcum[i+nrandcor]),diag(q[i+nrandcor]),matrix(0,q[i+nrandcor],qcum[nrandind+nrandcor+1]-qcum[nrandcor+i+1])))
V[[i+nrandcor]]<-as.matrix(Vstart[(qcum[i+nrandcor]+1):(qcum[i+nrandcor+1])])
if ((i+nrandcor)==1) VT<-V[[i]]
else VT<-c(VT,list(V[[i+nrandcor]]))
if (RandDistIndep[i]=="Normal") PsiM[(qcum[i+nrandcor]+1):qcum[i+nrandcor+1]]<-0
if (RandDistIndep[i]=="Gamma") PsiM[(qcum[i+nrandcor]+1):qcum[i+nrandcor+1]]<-1
if (RandDistIndep[i]=="IGamma") PsiM[(qcum[i+nrandcor]+1):qcum[i+nrandcor+1]]<-1
if (RandDistIndep[i]=="Beta") PsiM[(qcum[i+nrandcor]+1):qcum[i+nrandcor+1]]<-0.5
}
}
# OFFSET and Binomial denominators management - i am not sure how to do it - keep it for later while we develop the code #
# The offset needs to be done for the whole vector corresponding to Y - such as all models are considered Poisson or Binomial -begrijp je? #
Z<-TT[(1:sum(ModelsDims)),((ptot+1):ncol(TT))]
B<-0
if (is.null(OFFSETList)) B<-rep(1,nrow(Y))
else {
for (i in 1:nModels){
if (length(OFFSETList[[i]])==1 & OFFSETList[[i]]==0) B[(cModelsDims[i]+1):(cModelsDims[i+1])]<-1
else B[(cModelsDims[i]+1):(cModelsDims[i+1])]<-OFFSETList[[i]]
}
}
if (nrandcor>0) DRgamma<-rep(0,ncol(DDRCorr))
else DRgamma<-NULL
if (nrandind>0) DRgamma<-c(DRgamma,DRgammaIndep)
Iteration<-0
Convergence<-100
while (Convergence>CONV){
# for (iii in 1:1) {
Iteration<-Iteration+1
# if (Info) cat("\n Iteration: ",Iteration," Convergence: ",Convergence,"\n")
###############################################
# PLAN: #
# Update Mean Structure #
# Laplace approximation to the mean structure #
# Variances of random components #
# Overdispersions #
# Correlations #
###############################################
MeanParmsLast<-c(Beta,unlist(VT))
# Lambda of correlated random effects is equal to one - these after cholesky transformation are denoted LambdaChol#
if (nrandcor>0) {
DRgamma[1:cumqcorr[length(cumqcorr)]]<-0
}
Lambda<-exp(DDR%*%DRgamma)
# Overdispersion - now for all models overdispersion is coded in one matrix DDY and one vector DYgamma FOR ALL MODELS AT THE SAME TIME #
Phi<-exp(DDY%*%DYgamma)
GammaMvec<-c(Phi,Lambda)
# Mean values #
eta<-TT[1:ntot,]%*%as.matrix(c(Beta,unlist(VT)))
# Here different link is applied to different model #
mu<-0
Wvec<-0
dWdmu<-0
dmudeta<-0
d2Wdmu2<-0
for (i in 1:nModels){
mu[(cModelsDims[i]+1):cModelsDims[i+1]]<-B[(cModelsDims[i]+1):cModelsDims[i+1]]*InvLinkY(eta[(cModelsDims[i]+1):cModelsDims[i+1]],LinkList[[i]])
Wvec[(cModelsDims[i]+1):cModelsDims[i+1]]<-Wmatgen(mu[(cModelsDims[i]+1):cModelsDims[i+1]],B[(cModelsDims[i]+1):cModelsDims[i+1]],LinkList[[i]],RespDist[i])
dWdmu[(cModelsDims[i]+1):cModelsDims[i+1]]<-dWdmugen(mu[(cModelsDims[i]+1):cModelsDims[i+1]],B[(cModelsDims[i]+1):cModelsDims[i+1]],LinkList[[i]],RespDist[i])
d2Wdmu2[(cModelsDims[i]+1):cModelsDims[i+1]]<-d2Wdmu2gen(mu[(cModelsDims[i]+1):cModelsDims[i+1]],B[(cModelsDims[i]+1):cModelsDims[i+1]],LinkList[[i]],RespDist[i])
dmudeta[(cModelsDims[i]+1):cModelsDims[i+1]]<-dmudetagen(mu[(cModelsDims[i]+1):cModelsDims[i+1]],B[(cModelsDims[i]+1):cModelsDims[i+1]],LinkList[[i]],RespDist[i])
}
# So far the Link and Dist are equivalent as only canonical links are allowed #
WR<-list(0)
UT<-0
WRT<-list(0)
if (nrandcor+nrandind>0) {
for (i in 1:(nrandcor+nrandind)) {
U[[i]]<-LinkR(V[[i]],RandDist[i])
if (i==1) UT<-U[[1]]
else UT<-c(UT,U[[i]])
WR[[i]]<-WRVC(U[[i]],RandDist[i])
if (i==1) WRT<-WR[[1]]
else WRT<-c(WRT,WR[[i]])
}
}
WTotvec<-c(Wvec,WRT)
ISIGMAMvec<-as.vector((1/GammaMvec)*WTotvec)
#dAdmu<-dAdmugen(mu,Link)
#Amat<-Amatgen(mu,Link)
# Adjustment computation for the Laplace Approximation to the mean #
# Now a Laplace Approximation to the mean can be used for one model but not for another therefore LaplaceFixed is a vector #
# We will not make a LAPFIX by model, instead we adjust all at the same time:
# Those models which are LAPFIX=FALSE need to have Design matrix in this part Z equal to zero for random effects corresponding to these models
# The adjustment terms are of length n1+n2+.... so for all the models, if one model is LAPFIX=FALSE then the adjustemt terms for those models are evenly
# Redistributed among the rest of the models which are LAPFIX=TRUE
Scorr<-rep(0,sum(ModelsDims))
################################################
# Consider the revision of the procedure below #
# By adding the factor random effects #
################################################
# There are two options : one is that there are too many adjustments than the ys - so they have to be redistributed
# : two is that rows are deleted and there are as many adjustments as the ys but instead all the dimension change
# : total matrices must be used in computation of the derivitive vhatbeta
# PART OF CREATING DESIGN MATRICES FOR LAPFIX CAN BE MOVED OUT OF THE ITERATIONS AS THE SAME MATRICES WILL BE USED - BUT ONLY CORRELATION CHANGES THE
# CORRELATED PART - THINK HOW TO DO IT
CorrTerms<-0
if (any(LaplaceFixed)==TRUE){
# Exlcude from the Z matrix the collumns which are not integreatd out - as this model is not LAPFIX=TRUE #
# We need an index to denote which random effects are integrated out #
ZLapFix<-Z
VLapFix<-unlist(VT)
qLapFix<-q
ISIGMAMvecLapFix<-ISIGMAMvec
WvecLapFix<-Wvec
dWdmuLapFix<-dWdmu
dmudetaLapFix<-dmudeta
integratedModels<-seq(1,nModels)
ModelsDimsLapFix<-ModelsDims
if (nrandcor>0) {
for (i in 1:nrandcor){
if (LaplaceFixed[corrModel[i]]==FALSE) {
ZLapFix[,(qcum[i]+1):(qcum[i+1])]<-0
VLapFix[(qcum[i]+1):(qcum[i+1])]<-NA
qLapFix[i]<-NA
ISIGMAMvecLapFix[(ntot+qcum[i]+1):(ntot+qcum[i+1])]<-NA
}
}
}
if (nrandind>0) {
for (i in 1:nrandind){
if (LaplaceFixed[indepModel[i]]==FALSE) {
ZLapFix[,(qcum[i+nrandcor]+1):(qcum[i+nrandcor+1])]<-0
VLapFix[(qcum[i+nrandcor]+1):(qcum[i+nrandcor+1])]<-NA
qLapFix[i+nrandcor]<-NA
ISIGMAMvecLapFix[(ntot+qcum[i+nrandcor]+1):(ntot+qcum[i+nrandcor+1])]<-NA
}
}
}
nrandint<-0
integratedIndex<-NA
if (nrandcor>0) {
for (i in 1:nrandcor){
if (LaplaceFixed[corrModel[i]]==TRUE) {
nrandint<-nrandint+1
if (is.na(integratedIndex[1])) integratedIndex<-i
else integratedIndex<-c(integratedIndex,i)
}
}
}
if (nrandind>0) {
for (i in 1:nrandind){
if (LaplaceFixed[indepModel[i]]==TRUE) {
nrandint<-nrandint+1
if (is.na(integratedIndex[1])) integratedIndex<-i
else integratedIndex<-c(integratedIndex,i+nrandcor) # integratedIndex contains the number of random effect which is integrated out its position in original design matrix#
}
}
}
# Remove all the columns which are of zero - but question should we keep track which columns are removed ? #
ZLapFix<-ZLapFix[,(apply(ZLapFix,2,sum)!=0)]
VLapFix<-VLapFix[!is.na(VLapFix)]
qLapFix<-qLapFix[!is.na(qLapFix)]
qcumLapFix<-cumsum(c(0,qLapFix))
# Remove Rows if are all zeros - and create an index which rows are removed - note that we remove the rows for Y response together with #
rowLapFix<-rep(1,nModels) # This index will determine which rows are deleted for which model if index is set to zero it means that the rows for that model
# are deleted
obsLapFix<-rep(1,ntot) # This vector will select which adjustments are assigned to ys and which need to be redistributed at the moment i am doing it, it is
# still unclear how it will work
for (i in 1:nModels) {
if (all(apply(ZLapFix[(cModelsDims[i]+1):cModelsDims[i+1],],1,sum)==0)) {
integratedModels[i]<-NA
rowLapFix[i]<-0
ISIGMAMvecLapFix[(cModelsDims[i]+1):cModelsDims[i+1]]<-NA # This removes the main effects - we still need to remove the random effects matrix #
WvecLapFix[(cModelsDims[i]+1):cModelsDims[i+1]]<-NA
dWdmuLapFix[(cModelsDims[i]+1):cModelsDims[i+1]]<-NA
dmudetaLapFix[(cModelsDims[i]+1):cModelsDims[i+1]]<-NA
ModelsDimsLapFix[i]<-NA
if (nrandcor>0){
for (j in 1:nrandcor){
if (corrModel[j]==i) ISIGMAMvecLapFix[(ntot+qcum[j]+1):(ntot+qcum[j+1])]<-NA
}
}
if (nrandind>0){
for (j in 1:nrandind){
if (indepModel[j]==i) ISIGMAMvecLapFix[(ntot+qcum[j+nrandcor]+1):(ntot+qcum[j+nrandcor+1])]<-NA
}
}
}
if (LaplaceFixed[i]==FALSE) obsLapFix[(cModelsDims[i]+1):cModelsDims[i+1]]<-0
}
ZLapFix<-ZLapFix[apply(ZLapFix,1,sum)!=0,]
ISIGMAMvecLapFix<-ISIGMAMvecLapFix[!is.na(ISIGMAMvecLapFix)]
WvecLapFix<-WvecLapFix[!is.na(WvecLapFix)]
dWdmuLapFix<-dWdmuLapFix[!is.na(dWdmuLapFix)]
dmudetaLapFix<-dmudetaLapFix[!is.na(dmudetaLapFix)]
ntotLapFix<-length(WvecLapFix)
integratedModels<-integratedModels[!is.na(integratedModels)]
ModelsDimsLapFix<-ModelsDimsLapFix[!is.na(ModelsDimsLapFix)]
cModelsDimsLapFix<-cumsum(c(0,ModelsDimsLapFix))
nintMod<-length(integratedModels)
# We need to separate the design matrix for the derivative with respect to beta with the design which is used in the trace part #
TT2<-TT[,(ptot+1):(ptot+sum(q))]
TT2LapFix<-rbind(ZLapFix,diag(ncol(ZLapFix)))
# The function Solver Short needs to be reprogrammed - but how can we invert a matrix if there is a zero row or zero collumn #
# One thing is to use a generalized inverse, however other thing is to - do we need zeros in the inverse function #
# This is for the derivative #
OUT1<-SolverShort(ISIGMAMvec,Z)
OUTLapFix<-SolverShort(ISIGMAMvecLapFix,ZLapFix) # This needs to be adjusted to accomodate the zero rows, than the diagonal matrix is reduced #
# A new argument Z is added.as the design matrix in this part might be different than the general Z
#print("DimTT2");print(dim(TT2));print("Dim2");print(length(rep(ISIGMAMvec,each=nrow(t(TT2)))))
INV1<-OUT1$Inverse # These are used for derivative of random effects #
DiagPP2<-OUT1$DiagPP2
INV1LapFix<-OUTLapFix$Inverse # These are used for the correction factor determinant over integrated random effects #
DiagPP2LapFix<-OUTLapFix$DiagPP2 # This will not work because ntot and dimensions are given globally we need to change it !#
rm(OUT1)
rm(OUTLapFix)
MOD<-INV1%*%(t(Z)*rep((1/Phi),each=ncol(Z))) # This is for random effects derivative
ADJDER1<-list(0)
ADJDER2<-list(0)
# How to modify this? #
# Now we need to iterate over the random effects which are integrated out #
for (i in 1:nrandint){
ADJDER1[[i]]<--ZLapFix[,(qcumLapFix[i]+1):qcumLapFix[i+1]]%*%(MOD[(qcum[integratedIndex[i]]+1):(qcum[integratedIndex[i]+1]),])#*rep(Wvec,each=nrow(MOD[(qcum[i]+1):(qcum[i+1]),])))
ADJDER2[[i]]<--MOD[(qcum[integratedIndex[i]]+1):(qcum[integratedIndex[i]+1]),]
}
# Computing correction quantities for the Laplace Approximation of fixed effects #
# This here gets difficult ntot and ntotLapFix - how to make sure we do the correct thing #
CorrTermsTemp<-list(0)
CorrTermsTemp[[1]]<-as.vector(DiagPP2LapFix[1:ntotLapFix])*as.vector(1/WvecLapFix)*as.vector(1/WvecLapFix)*(dWdmuLapFix)*dmudetaLapFix
# We need to scale CorrTerms[[1]] onto the whole ntot #
CorrTerms<-list(0)
CorrTerms[[1]]<-rep(0,ntot)
for (i in 1:nintMod){
CorrTerms[[1]][(cModelsDims[integratedModels[i]]+1):cModelsDims[integratedModels[i]+1]]<-CorrTermsTemp[[1]][(cModelsDimsLapFix[i]+1):cModelsDimsLapFix[i+1]]
}
for (i in 1:nrandint){
ADJ1<-rep(0,ntot)
ADJ2<-rep(0,ntot)
ADJ1<-t(ADJDER1[[i]])%*%(as.vector(DiagPP2LapFix[1:ntotLapFix])*as.vector(1/WvecLapFix)*as.vector(dWdmuLapFix)*as.vector(dmudetaLapFix)) # Check this one
if (RandDist[integratedIndex[i]]=="Gamma") ADJ2<-t(ADJDER2[[i]])%*%as.vector(DiagPP2LapFix[(ntotLapFix+qcumLapFix[i]+1):(ntotLapFix+qcumLapFix[1+i])])
if (RandDist[integratedIndex[i]]=="IGamma") ADJ2<-t(ADJDER2[[i]])%*%(as.vector(DiagPP2LapFix[(ntotLapFix+qcumLapFix[i]+1):(ntotLapFix+qcumLapFix[1+i])])*as.vector(2*U[[integratedIndex[i]]]))
if (RandDist[integratedIndex[i]]=="Beta") ADJ2<-t(ADJDER2[[i]])%*%(as.vector(DiagPP2LapFix[(ntotLapFix+qcumLapFix[i]+1):(ntotLapFix+qcumLapFix[1+i])])*as.vector(1-2*U[[integratedIndex[i]]]))
CorrTerms<-c(CorrTerms,list(ADJ1,ADJ2))
}
# Here we need to have in mind two scenarions, when there is cholesky correlation therefore design over n1+n2 but integrated only the one model #
# And scenario two when only one model is integrated but there are no cholesky correlation #
CorrTermsLength<-length(CorrTerms)
CorrTerms<-as.matrix(unlist(CorrTerms))
dim(CorrTerms)<-c(ntot,CorrTermsLength)
Scorr<-0.5*Phi*dmudeta*apply(CorrTerms,1,sum)
# Now we need to take out the corrections for which ys are not adjusted and split them evenly over those who are #
}
Ystar<-Y-Scorr
zmain<-eta+(Ystar-mu)/dmudeta
PsiMstar<-PsiM+(Lambda*crossprod(Z,as.matrix(Wvec*Scorr*(1/Phi)/dmudeta)))
zrand<-list(0)
if (nrandcor>0){
for (i in 1:nrandcor){
zrand[[i]]<-V[[i]]+(PsiMstar[(qcum[i]+1):qcum[i+1]]-U[[i]])/WR[[i]]
}
}
if (nrandind>0){
for (i in 1:nrandind){
zrand[[i+nrandcor]]<-V[[i+nrandcor]]+(PsiMstar[(qcum[i+nrandcor]+1):qcum[i+nrandcor+1]]-U[[i+nrandcor]])/WR[[i+nrandcor]]
}
}
zrand<-as.matrix(unlist(zrand))
zTot<-as.matrix(c(zmain,zrand))
# Updating Equations #
MeanParmsLast<-c(Beta,unlist(VT))
OUT1<-SolverLong(ISIGMAMvec,zTot)
MeanParms<-OUT1$NewParms
DiagPMAT<-OUT1$DiagPMAT
rm(OUT1)
if (exists("INV1")) rm(INV1)
#print("Block 2");print(CPTISIGMAM);#print("EISIGMAMvec");#print(EISIGMAMvec);print("zTot");print(zTot)
Beta<-MeanParms[1:ptot]
if (nrandcor>0){
for (i in 1:nrandcor){
V[[i]]<-MeanParms[(ptot+qcum[i]+1):(ptot+qcum[i+1])]
if (i==1) VT<-V[[i]]
else VT<-c(VT,V[[i]])
}
}
if (nrandind>0){
for (i in 1:nrandind){
V[[i+nrandcor]]<-MeanParms[(ptot+qcum[i+nrandcor]+1):(ptot+qcum[i+nrandcor+1])]
if (i==1) VT<-V[[i+nrandcor]]
else VT<-c(VT,V[[i+nrandcor]])
}
}
Convergence<-sum(abs(MeanParms-MeanParmsLast))
## if (DEBUG==TRUE) print("Convergence Mean");print(Convergence)
# Now we move to the estimation of dispersion and overdispersion given the correlation parameters rho based on transformed multivariat random effects to the
# independence scenario - so we continue the procedure, first dispersion for each random effect, then overdispersion for each model
# After that we reestimate the correlation and construct updated variance covariance matrix this ends the joint model
###############################
##### Variance Components #####
###############################
if (EstimateVariances==TRUE) {
# Reevaluation of mean and u #
eta<-TT[1:ntot,]%*%as.matrix(c(Beta,unlist(VT)))
# Here different link is applied to different model #
for (i in 1:nModels){
mu[(cModelsDims[i]+1):cModelsDims[i+1]]<-B[(cModelsDims[i]+1):cModelsDims[i+1]]*InvLinkY(eta[(cModelsDims[i]+1):cModelsDims[i+1]],LinkList[[i]])
Wvec[(cModelsDims[i]+1):cModelsDims[i+1]]<-Wmatgen(mu[(cModelsDims[i]+1):cModelsDims[i+1]],B[(cModelsDims[i]+1):cModelsDims[i+1]],LinkList[[i]],RespDist[i])
dWdmu[(cModelsDims[i]+1):cModelsDims[i+1]]<-dWdmugen(mu[(cModelsDims[i]+1):cModelsDims[i+1]],B[(cModelsDims[i]+1):cModelsDims[i+1]],LinkList[[i]],RespDist[i])
dmudeta[(cModelsDims[i]+1):cModelsDims[i+1]]<-dmudetagen(mu[(cModelsDims[i]+1):cModelsDims[i+1]],B[(cModelsDims[i]+1):cModelsDims[i+1]],LinkList[[i]],RespDist[i])
}
if (DEBUG==TRUE) {
muout <- mu
Wvecout <- Wvec
dWdmuout <- dWdmu
dmudetaout <- dmudeta
}
# So far the Link and Dist are equivalent as only canonical links are allowed #
WR<-list(0)
UT<-0
WRT<-0
dWRdu<-0
if (nrandcor+nrandind>0) {
for (i in 1:(nrandcor+nrandind)) {
U[[i]]<-LinkR(V[[i]],RandDist[i])
if (i==1) UT<-U[[1]]
else UT<-c(UT,U[[i]])
WR[[i]]<-WRVC(U[[i]],RandDist[i])
if (i==1) WRT<-WR[[1]]
else WRT<-c(WRT,WR[[i]])
if (i==1) dWRdu<-DWRDU(U[[i]],RandDist[i])
else dWRdu<-c(dWRdu,DWRDU(U[[i]],RandDist[i]))
}
}
WTotvec<-c(Wvec,WRT)
ISIGMAMvec<-as.vector((1/GammaMvec)*WTotvec)
#Computing Deviances : this must be adjusted for correlated and independent random effects #
DevianceRand<-list(0)
if (nrandcor>0) {
for (i in 1:nrandcor){
DevianceRand[[i]]<-DevRand(V[[i]],U[[i]],RandDist[i])
}
}
if (nrandind>0) {
for (i in 1:nrandind){
DevianceRand[[i+nrandcor]]<-DevRand(V[[i+nrandcor]],U[[i+nrandcor]],RandDist[i+nrandcor])
}
}
# Truncated Computations #
#MTheta<-1-exp(-mu)
#M1Theta<-exp(-mu)*mu
#M2Theta<-exp(-mu)*mu*(1-mu)
#M3Theta<-M2Theta*(1-mu)-mu*M1Theta
#WTildevec<-as.vector(Wvec+((M2Theta/MTheta)-(M1Theta/MTheta)^2))
DVhatDlambda<-list(0)
# ERROR - This part needs to be adjusted !!!!!!! #
# OLD CODE FOR DERIVATIVE #
if (nrandcor>0) {
for (i in 1:nrandcor){
DVhatDlambda[[i]]<--solve((t(Z[,(qcum[i]+1):qcum[i+1]])*rep(Wvec/Phi,each=ncol(Z[,(qcum[i]+1):qcum[i+1]])))%*%Z[,(qcum[i]+1):qcum[i+1]]+diag(WR[[i]]/Lambda[(1+qcum[i]):qcum[i+1]]))%*%(PsiM[(qcum[i]+1):(qcum[i+1])]-U[[i]])/(Lambda[(1+qcum[i]):qcum[i+1]]^2)
if (i==1) derold<-DVhatDlambda[[i]]
}
}
if (nrandind>0) {
for (i in 1:nrandind){
DVhatDlambda[[i+nrandcor]]<--solve((t(Z[,(qcum[i+nrandcor]+1):qcum[i+nrandcor+1]])*rep(Wvec/Phi,each=ncol(Z[,(qcum[i+nrandcor]+1):qcum[i+nrandcor+1]])))%*%Z[,(qcum[i+nrandcor]+1):qcum[i+nrandcor+1]]+diag(WR[[i+nrandcor]]/Lambda[(1+qcum[i+nrandcor]):qcum[i+nrandcor+1]]))%*%(PsiM[(qcum[i+nrandcor]+1):(qcum[i+nrandcor+1])]-U[[i+nrandcor]])/(Lambda[(1+qcum[i+nrandcor]):qcum[i+nrandcor+1]]^2)
}
}
# New code for derivative #
# We need to create the lambda vector #
#if (nrandcor>0) {
# for (i in 1:nrandcor) {
# LambdaTemp<-rep(0,length(Lambda))
# LambdaTemp[(qcum[i]+1):qcum[i+1]]<-1
# DVhatDlambda[[i]]<--solve(t(Z*as.vector(Wvec/Phi))%*%Z+diag(as.vector(WRT/Lambda)))%*%(((PsiM-UT)/(Lambda^2))*LambdaTemp)
# if (i==1) dernew<-DVhatDlambda[[i]]
# }
#}
#if (nrandind>0) {
# for (i in 1:nrandind) {
# LambdaTemp<-rep(0,length(Lambda))
# LambdaTemp[(qcum[i+nrandcor]+1):qcum[i+nrandcor+1]]<-Lambda[(qcum[i+nrandcor]+1):qcum[i+nrandcor+1]]
# DVhatDlambda[[i+nrandcor]]<--solve(t(Z*as.vector(Wvec/Phi))%*%Z+diag(as.vector(WRT/Lambda)))%*%(((PsiM-UT)/(Lambda^2))*LambdaTemp)
# }
#}
#DWTildeDthetavec<-as.vector(WTildevec-(M2Theta/MTheta)+(M3Theta/MTheta)-3*((M1Theta*M2Theta)/(MTheta^2))+((M1Theta^2)/(MTheta^2))+2*((M1Theta^3)/(MTheta^3)))
#DWTildeDmuvec<-as.vector(DWTildeDthetavec/mu)
qmod<-list(0)
qCur<-list(0)
qrr<-list(0)
if (nrandcor>0) {
for (i in 1:nrandcor){
SSCur<-rep(0,ntot+sum(q))
SSCur[1:ntot]<-SSC[[i]]
SSCur[(ntot+1+qcum[i]):(ntot+qcum[i+1])]<-(1:(qcum[i+1]-qcum[i]))
DlogWDloglambda<-matrix(0,ntot+sum(q),ntot+sum(q))
##### This here can be replaced the diagonal matrix dlogwdloglambda #####
DlogWDloglambda[1:ntot,1:ntot]<-diag(as.vector(1/Wvec)*as.vector(dWdmu)*as.vector(dmudeta)*(as.vector(Z[,(1+qcum[i]):qcum[i+1]]%*%(DVhatDlambda[[i]]*Lambda[(1+qcum[i]):qcum[i+1]])))) # RRRRR
DlogWDloglambda[(ntot+1+qcum[i]):(ntot+qcum[i+1]),(ntot+1+qcum[i]):(ntot+qcum[i+1])]<-diag(DWRDU(U[[i]],RandDist[i])*(as.vector(DVhatDlambda[[i]])*as.vector(Lambda[(1+qcum[i]):qcum[i+1]]))) # RRRRR
qmod[[i]]<-DiagPMAT*diag(DlogWDloglambda)
qCur[[i]]<-cbind(qmod[[i]],SSCur)
qCur[[i]]<-tapply(qCur[[i]][,1],qCur[[i]][,2],sum)
qCur[[i]]<-qCur[[i]][row.names(qCur[[i]])!="0"]
qrr[[i]]<-DiagPMAT[(ntot+1+qcum[i]):(ntot+qcum[i+1])]
## if (DEBUG) {print("Basis qrr");print(qrr);}
qrr[[i]]<-qrr[[i]]-qCur[[i]]
# Correction to estimate the true likelihood instead of EQL #
if (RandDist[i]=="Gamma") qrr[[i]]<-qrr[[i]]+1+2*(log(Lambda[(1+qcum[i]):(qcum[i+1])])/Lambda[(1+qcum[i]):(qcum[i+1])])+2*(digamma(1/Lambda[(1+qcum[i]):(qcum[i+1])])/Lambda[(1+qcum[i]):(qcum[i+1])])
if (RandDist[i]=="IGamma") qrr[[i]]<-qrr[[i]]+1+(2/Lambda[(1+qcum[i]):(qcum[i+1])])-2*(log(1/Lambda[(1+qcum[i]):(qcum[i+1])])/Lambda[(1+qcum[i]):(qcum[i+1])])-2*((1+Lambda[(1+qcum[i]):(qcum[i+1])])/Lambda[(1+qcum[i]):(qcum[i+1])])+2*(digamma(1+(1/Lambda[(1+qcum[i]):(qcum[i+1])]))/Lambda[(1+qcum[i]):(qcum[i+1])])
if (RandDist[i]=="Beta") qrr[[i]]<-qrr[[i]]+1-2*(digamma(1/Lambda[(1+qcum[i]):(qcum[i+1])])/Lambda[(1+qcum[i]):(qcum[i+1])])+2*(digamma(1/(2*Lambda[(1+qcum[i]):(qcum[i+1])]))/Lambda[(1+qcum[i]):(qcum[i+1])])+log(4)/Lambda[(1+qcum[i]):(qcum[i+1])]
# Applying the correction for the deviances #
DevianceRand[[i]]<-DevianceRand[[i]]/(1-qrr[[i]])
}
}
if (nrandind>0) {
for (i in 1:nrandind){
SSCur<-rep(0,ntot+sum(q))
SSCur[1:ntot]<-SSIndep[[i]]
SSCur[(ntot+1+qcum[i+nrandcor]):(ntot+qcum[i+nrandcor+1])]<-(1:(qcum[i+nrandcor+1]-qcum[i+nrandcor]))
DlogWDloglambda<-matrix(0,ntot+sum(q),ntot+sum(q))
##### This here can be replaced the diagonal matrix dlogwdloglambda #####
DlogWDloglambda[1:ntot,1:ntot]<-diag(as.vector(1/Wvec)*as.vector(dWdmu)*as.vector(dmudeta)*(as.vector(Z[,(qcum[i+nrandcor]+1):qcum[i+nrandcor+1]]%*%(DVhatDlambda[[i+nrandcor]]*Lambda[(1+qcum[i+nrandcor]):qcum[i+nrandcor+1]])))) # RRRRR
DlogWDloglambda[(ntot+1+qcum[i+nrandcor]):(ntot+qcum[i+nrandcor+1]),(ntot+1+qcum[i+nrandcor]):(ntot+qcum[i+nrandcor+1])]<-diag(DWRDU(U[[i+nrandcor]],RandDist[i+nrandcor])*(as.vector(DVhatDlambda[[i+nrandcor]])*as.vector(Lambda[(1+qcum[i+nrandcor]):qcum[i+nrandcor+1]]))) # RRRRR
qmod[[i+nrandcor]]<-DiagPMAT*diag(DlogWDloglambda)
qCur[[i+nrandcor]]<-cbind(qmod[[i+nrandcor]],SSCur)
qCur[[i+nrandcor]]<-tapply(qCur[[i+nrandcor]][,1],qCur[[i+nrandcor]][,2],sum)
qCur[[i+nrandcor]]<-qCur[[i+nrandcor]][row.names(qCur[[i+nrandcor]])!="0"]
qrr[[i+nrandcor]]<-DiagPMAT[(ntot+1+qcum[i+nrandcor]):(ntot+qcum[i+nrandcor+1])]
## if (DEBUG) {print("Basis qrr");print(qrr);}
qrr[[i+nrandcor]]<-qrr[[i+nrandcor]]-qCur[[i+nrandcor]]
# Correction to estimate the true likelihood instead of EQL #
if (RandDist[i+nrandcor]=="Gamma") qrr[[i+nrandcor]]<-qrr[[i+nrandcor]]+1+2*(log(Lambda[(1+qcum[i+nrandcor]):(qcum[i+nrandcor+1])])/Lambda[(1+qcum[i+nrandcor]):(qcum[i+nrandcor+1])])+2*(digamma(1/Lambda[(1+qcum[i+nrandcor]):(qcum[i+nrandcor+1])])/Lambda[(1+qcum[i+nrandcor]):(qcum[i+nrandcor+1])])
if (RandDist[i+nrandcor]=="IGamma") qrr[[i+nrandcor]]<-qrr[[i+nrandcor]]+1+(2/Lambda[(1+qcum[i+nrandcor]):(qcum[i+nrandcor+1])])-2*(log(1/Lambda[(1+qcum[i+nrandcor]):(qcum[i+nrandcor+1])])/Lambda[(1+qcum[i+nrandcor]):(qcum[i+nrandcor+1])])-2*((1+Lambda[(1+qcum[i+nrandcor]):(qcum[i+nrandcor+1])])/Lambda[(1+qcum[i+nrandcor]):(qcum[i+nrandcor+1])])+2*(digamma(1+(1/Lambda[(1+qcum[i+nrandcor]):(qcum[i+nrandcor+1])]))/Lambda[(1+qcum[i+nrandcor]):(qcum[i+nrandcor+1])])
if (RandDist[i+nrandcor]=="Beta") qrr[[i+nrandcor]]<-qrr[[i+nrandcor]]+1-2*(digamma(1/Lambda[(1+qcum[i+nrandcor]):(qcum[i+nrandcor+1])])/Lambda[(1+qcum[i+nrandcor]):(qcum[i+nrandcor+1])])+2*(digamma(1/(2*Lambda[(1+qcum[i+nrandcor]):(qcum[i+nrandcor+1])]))/Lambda[(1+qcum[i+nrandcor]):(qcum[i+nrandcor+1])])+log(4)/Lambda[(1+qcum[i+nrandcor]):(qcum[i+nrandcor+1])]
# Applying the correction for the deviances #
DevianceRand[[i+nrandcor]]<-DevianceRand[[i+nrandcor]]/(1-qrr[[i+nrandcor]])
}
}
## if (DEBUG) {print("Qrr");print(qrr);}
######################################
##### Gamma model for dispersion #####
######################################
# To speed up we can separate the models for dispersion by component instead of inverting the whole matrix #
# However if there are not so many dispersion components it should not be a problem #
invSigmaGammaR<-list(0)
if (nrandcor>0){
for (i in 1:nrandcor){
invSigmaGammaR[[i]]<-((1-qrr[[i]])/4)
}
}
if (nrandind>0){
for (i in 1:nrandind){
invSigmaGammaR[[i+nrandcor]]<-((1-qrr[[i+nrandcor]])/4)
}
}
muGammaR<-Lambda
oldDRgamma<-DRgamma
ksiR<-DDR%*%DRgamma
DevianceRand<-unlist(DevianceRand)
ZRresp<-ksiR+(DevianceRand-muGammaR)/muGammaR
invSigmaGammaR<-diag(unlist(invSigmaGammaR))
if (nrandcor==0) cumqcorr <- 0
OldIndepgamma <- DRgamma[(cumqcorr[length(cumqcorr)]+1):length(DRgamma)]
DRgamma<-solve(crossprod(DDR,invSigmaGammaR)%*%DDR,crossprod(DDR,invSigmaGammaR)%*%ZRresp)
StdErrDRgamma<-sqrt(diag(solve(crossprod(DDR,invSigmaGammaR)%*%DDR)))
NewIndepgamma <-DRgamma[(cumqcorr[length(cumqcorr)]+1):length(DRgamma)]
if (DEBUG==TRUE){
## print("DRgamma");print(DRgamma)
## print("Beta");print(Beta)
## print("VS");print(VT)
}
#if (DEBUG) {print("Phi End");print(Phi)}
if (nrandind>0) Convergence <- Convergence + sum(abs(OldIndepgamma-NewIndepgamma))
# Maybe the piece of code below should be moved after the correlation is estimated #
####################################################################################################################
##### In this part we redesign the Z matrix - update with new correlation and new variances for the correlated #####
####################################################################################################################
# We need to add the construction of correlation matrix after the correlations have been upadted in front of this piece code #
if (nrandcor>0){
for (i in 1:length(CorrMat)){
currentGamma<-DRgamma[(cumqcorr[i]+1):cumqcorr[i+1]]
DiagVar<-diag(exp(currentGamma))
## if (DEBUG) print("Correlation Matrix");print(DiagVar)
# Apply inverse cholesky #
currentMat<-CholeskyMatrices[[i]]%*%DiagVar%*%t(CholeskyMatrices[[i]])
# Now update with correlations #
currentMat<-diag(currentMat)
currentMat<-sqrt(currentMat)*t(CorrMatOut[[i]]*sqrt(currentMat))
CholeskyMatrices[[i]]<-t(chol(currentMat))
# Now current mat is the the new varcov matrix with the same correlation as before #
ZZCorrTempTotUpd<-ZZShort[[i]]%*%CholeskyMatrices[[i]]
currentindex<-seq(cumqcorr[i]+1,cumqcorr[i+1])
for (j in currentindex) {
DiagDesign[[j]]<-model.matrix(~SSC[[j]]-1)
DiagDesign[[j]]<-DiagDesign[[j]]*ZZCorrTempTotUpd[,(1+j-currentindex[1])]
if (j==currentindex[1]) ZZCorrTotUpd<-DiagDesign[[j]]
else ZZCorrTotUpd<-cbind(ZZCorrTotUpd,DiagDesign[[j]])
}
ZZCorrUpd[[i]]<-ZZCorrTotUpd
if (i==1) ZZCorrDesign<-ZZCorrUpd[[1]]
else ZZCorrDesign<-cbind(ZZCorrDesign,ZZCorrUpd[[i]])
}
}
if (nrandind>0) {
ZZIndepTemp<-list(0)
for (i in 1:nrandind){
if (indepModel[i]==length(ModelsDims)) rowsdown<-0
else rowsdown<-sum(ModelsDims[(indepModel[i]+1):length(ModelsDims)])
if (indepModel[i]==1) rowsup<-0
else rowsup<-sum(ModelsDims[1:(indepModel[i]-1)])
# First expand up if possible #
ZZIndepTemp[[i]]<-FillZeros(ZZIndep[[i]],nr=(nrow(ZZIndep[[i]])+rowsup),nc=ncol(ZZIndep[[i]]),up=TRUE)
ZZIndepTemp[[i]]<-FillZeros(ZZIndepTemp[[i]],nr=(nrow(ZZIndepTemp[[i]])+rowsdown),nc=ncol(ZZIndepTemp[[i]]))
if (!exists("ZZCorrDesign") & i==1) ZZDesign<-ZZIndepTemp[[i]]
if (exists("ZZCorrDesign") & i==1) ZZDesign<-cbind(ZZCorrDesign,ZZIndepTemp[[1]])
if (i>1) ZZDesign<-cbind(ZZDesign,ZZIndepTemp[[i]])
}
}
else if(nrandcor>0) ZZDesign<-ZZCorrDesign
TT<-cbind(X,ZZDesign)
if (nrandcor>0){
for (i in 1:nrandcor){
TT<-rbind(TT,cbind(matrix(0,q[i],ptot+qcum[i]),diag(q[i]),matrix(0,q[i],qcum[nrandcor+nrandind+1]-qcum[i+1])))
}
}
if (nrandind>0){
for (i in 1:nrandind){
TT<-rbind(TT,cbind(matrix(0,q[i+nrandcor],ptot+qcum[i+nrandcor]),diag(q[i+nrandcor]),matrix(0,q[i+nrandcor],qcum[nrandind+nrandcor+1]-qcum[nrandcor+i+1])))
}
}
Z<-TT[(1:sum(ModelsDims)),((ptot+1):ncol(TT))]
# Koniec respecyfikacji macierzy efektow losowych w czesci odpowiadajacej efektom skorelowanym #
#Convergence<-Convergence+sum(abs(DRgamma-oldDRgamma))
}
##########################
##### Overdispersion #####
##########################
# now work on overdispersion #
# we need to select the parameters which are supposed to be EstimateOverDisp=TRUE not all models have estimated overdispersion true #
if (any(EstimateOverDisp)){
# First select the matrix which will be used for the estimation #
for (i in 1:nModels){
if (EstimateOverDisp[i]==TRUE) {
if (i==1) indexODEstim<-seq(cModelsDims[i]+1,cModelsDims[i+1])
else indexODEstim<-c(indexODEstim,seq(cModelsDims[i]+1,cModelsDims[i+1]))
ntotODEstim<-length(indexODEstim)
}
}
#PMAT<-TT%*%solve(CPTISIGMAM%*%TT)%*%CPTISIGMAM # This stays the same - just a selection need to be done of the diagonals corresponding to the models which are #
# EstimOverDisp = TRUE #
qrrO<-rep(0,ntotODEstim)
# Applying the correction for the deviances #
DevianceResp<-rep(0,ntotODEstim)
DiagPMATODEstim<-DiagPMAT
for (i in 1:nModels){
if (EstimateOverDisp[i]==TRUE){
DevianceRespTemp<-rep(0,cModelsDims[i+1]-cModelsDims[i])
YTemp<-YList[[i]]
BTemp<-B[(cModelsDims[i]+1):cModelsDims[i+1]]
muTemp<-mu[(cModelsDims[i]+1):cModelsDims[i+1]]
PhiTemp<-Phi[(cModelsDims[i]+1):cModelsDims[i+1]]
if (RespDist[i]=="Binomial") {
DevianceRespTemp[YTemp!=0 & YTemp!=BTemp]<-2*(YTemp[YTemp!=0 & YTemp!=BTemp]*log(YTemp[YTemp!=0 & YTemp!=BTemp]/muTemp[YTemp!=0 & YTemp!=BTemp])-(YTemp[YTemp!=0 & YTemp!=BTemp]-BTemp[YTemp!=0 & YTemp!=BTemp])*log((BTemp[YTemp!=0 & YTemp!=BTemp]-YTemp[YTemp!=0 & YTemp!=BTemp])/(BTemp[YTemp!=0 & YTemp!=BTemp]-muTemp[YTemp!=0 & YTemp!=BTemp])))
DevianceRespTemp[YTemp==0]<-2*(BTemp[YTemp==0]*log((BTemp[YTemp==0])/(BTemp[YTemp==0]-muTemp[YTemp==0])))
DevianceRespTemp[YTemp==BTemp]<-2*(YTemp[YTemp==BTemp]*log(YTemp[YTemp==BTemp]/muTemp[YTemp==BTemp]))
}
if (RespDist[i]=="Poisson"){
DevianceRespTemp[YTemp!=0]<-2*(YTemp[YTemp!=0]*log(YTemp[YTemp!=0]/muTemp[YTemp!=0])-(YTemp[YTemp!=0]-muTemp[YTemp!=0]))
DevianceRespTemp[YTemp==0]<-2*muTemp[YTemp==0]
}
if (RespDist[i]=="Normal"){
DevianceRespTemp<-(YTemp-muTemp)^2
}
if (RespDist[i]=="Gamma"){
DevianceRespTemp<-2*(-log(YTemp/muTemp)+(YTemp-muTemp)/muTemp)
DiagPMATODEstim[(cModelsDims[i]+1):cModelsDims[i+1]]<-DiagPMATODEstim[(cModelsDims[i]+1):cModelsDims[i+1]]+1+as.vector(2*log(PhiTemp)/PhiTemp)+as.vector(2*digamma(1/PhiTemp)/PhiTemp)
}
DevianceResp[(cModelsDims[i]+1):cModelsDims[i+1]]<-DevianceRespTemp
}
}
qrrO<-DiagPMAT[indexODEstim]
## if (DEBUG) {print("qrrO");print(qrrO)}
DevianceResp<-DevianceResp/(1-qrrO)
## if (DEBUG) {print("DevResp");print(DevianceResp)}
# Algorithm for Gamma model #
invSigmaGammaO<-((1-qrrO)/4)
muGammaO<-Phi[indexODEstim]
oldDYgamma<-DYgamma
# select DDY which are to estimate #
DDYODEstim<-DDY[indexODEstim,,drop=F]
# select columns which are not zero #
tempind<-apply(matrix(as.logical(DDYODEstim),nrow(DDYODEstim),ncol(DDYODEstim)),2,any)
indexgammaODEstim<-which(tempind==TRUE)
# remove collums with all zeros from the design matrix #
DDYODEstim<-DDYODEstim[,indexgammaODEstim,drop=F]
DYgammaODEstim<-DYgamma[indexgammaODEstim]
ksiO<-DDYODEstim%*%DYgammaODEstim
ZOresp<-ksiO+(DevianceResp-muGammaO)/muGammaO
## if (DEBUG) {print("ZOresp");print(ZOresp)}
invSigmaGammaO<-diag(invSigmaGammaO)
DYgammaODEstim<-solve(crossprod(DDYODEstim,invSigmaGammaO)%*%DDYODEstim,crossprod(DDYODEstim,invSigmaGammaO)%*%ZOresp)
DYgamma[indexgammaODEstim]<-DYgammaODEstim
## if (DEBUG) {print("DYgamma");print(DYgamma)}
Convergence<-Convergence+sum(abs(DYgamma-oldDYgamma))
## if (DEBUG) {print("Convergence overdisp");print(Convergence)}
}
VTCorr<-list(0)
if (nrandcor>0){
for (i in 1:length(CorrMat)){
currentindex<-seq((cumqcorr[i]+1),cumqcorr[i+1])
DRgamma[currentindex]<-log(diag(CholeskyMatrices[[i]]%*%t(CholeskyMatrices[[i]])))
DRCorrgammaOld<-DRCorrgamma
DRCorrgamma<-DRgamma[currentindex] #HERE is probably an error if more than one correlation matrix is provided #
## print("DRCorrgamma");print(DRCorrgamma)
Convergence<-Convergence+sum(abs(DRCorrgammaOld-DRCorrgamma))
# Transform the random effects back to the scale of correlated #
VTemp<-unlist(VT)[(qcum[cumqcorr[i]+1]+1):qcum[cumqcorr[i+1]+1]] # Extract empirical bayes corresponding to the correlated effects of CorrMat[[i]]
noraneff<-cumqcorr[i+1]-cumqcorr[i]
VTemp<-matrix(VTemp,length(VTemp)/noraneff,noraneff)
VTCorr[[i]]<-CholeskyMatrices[[i]]%*%t(VTemp)
VTCorr[[i]]<-matrix(t(VTCorr[[i]]),nrow(VTemp)*noraneff,1)
if (i==1) VTCorrTot<-VTCorr[[1]]
else VTCorrTot<-c(VTCorrTot,VTCorr[[i]])
}
if (nrandind>0) VTCorrTot<-c(VTCorrTot,unlist(VT)[(qcum[cumqcorr[length(CorrMat)+1]]+1):qcum[length(qcum)]]) # This adds up independent empirical bayes to VTCorrTot
}
# In the estimation of the correlation the original design matrix is used #
################################################################
##### Estimate Correlations - Final Step in the Algorithm #####
################################################################
# How will we approach this ? #
# NOTES NOTES NOTES #
# 1. We need to transform the design to the correlated desging #
# 2. Compute first and second order derivatives #
# 3. Update the correlations #
# 4. Transform back to the independent design #
# It is not that easy all this #
# Iterate over correlations - for each compute the derivative #
# double iterate over correlations for each pair compute hessian #
# Strategy 1 - Use the previous variances values not the updated ones #
# We update the whole matrix of correlations after we have variances and correlations updated #
# Strategy 2 - use already here the updated matrix of variances - WHICH IS BETTER ? #
# We try strategy 1 it seems faster to program but is it faster to work? #
if (!is.null(CorrMat)) {
ncorr<-length(unlist(Correlations))
# How many random effects correlated there are - nrandcor #
ScoreCorr<-rep(0,ncorr)
HessCorr<-matrix(0,ncorr,ncorr)
dvhatdcorr<-list(0)
dSigmadcorr<-list(0) # Derivative of variance covariance matrix with respect to rho #
# Note the derivative of Sigma with respect to two correlations (second order derivative) is equal to zero #
# We need to remember that the change is made not for the correlation but on Fishers z transform -Inf/Inf #
ZF <-list(0) #This is Fishers Z
dCorrdZF<-list(0)
OldCorrelations<-Correlations
# Compute score equations #
# The order in the program is as in the Correlations #
# Iterated over the number of correlations not random effects which are correlated #
# REMARK : It is wrong here in the way we treat correlations - it is not a vector but a list !#
for (i in 1:length(CorrMat)) {
# Compute current Fisher Z #
# if (Correlations[i]==1) ZF[i]<--Inf
# else if (Correlations[i]==-1) ZF[i]<-Inf
for (j in 1:length(Correlations[[i]])){
# Make ZF as a vector not as a list here #
ZF[[i]][j]<-0.5*log((1-Correlations[[i]][j])/(1+Correlations[[i]][j]))
dCorrdZF[[i]][j] <- - 4 * exp(2*ZF[[i]][j]) /((1+exp(2*ZF[[i]][i]))^2)
}
}
# Compute derivative of Total Sigma matrix with respect to rho #
# Here the creation of CorrMatDer #
for (i in 1:ncorr){ # This says which derivative
CurrentMatrix<-sum(cumqcorr<i)
for (j in 1:length(CorrMat)){
currentindex<-i-sum(cumqcorr[cumqcorr<i])
if (j==CurrentMatrix) {
TempCorrDerMat<-SigmaMat[[j]]
if (Correlations[[j]][currentindex]!=0) TempCorrDerMat[CorrMat[[j]]==currentindex]<-SigmaMat[[j]][CorrMat[[j]]==currentindex]/(Correlations[[j]][currentindex])
else {
standardDeviations<-as.vector(sqrt(diag(SigmaMat[[j]])))
standardDeviations<-standardDeviations%*%t(standardDeviations)
TempCorrDerMat[CorrMat[[j]]==currentindex]<-standardDeviations[CorrMat[[j]]==currentindex]
}
TempCorrDerMat[CorrMat[[j]]!=currentindex]<-0
TempCorrDerMat<-TempCorrDerMat%x%diag(lcorr[j])
if (j==1) dSigmadcorr[[i]]<-TempCorrDerMat
else dSigmadcorr[[i]]<-dbind(dSigmadcorr[[i]],TempCorrDerMat[[j]])
}
else {
TempCorrDerMat<-SigmaMat[[j]]*0
TempCorrDerMat<-TempCorrDerMat%x%diag(lcorr[j])
if (j==1) dSigmadcorr[[i]]<-TempCorrDerMat
else dSigmadcorr[[i]]<-dbind(dSigmadcorr[[i]],TempCorrDerMat[[j]])
}
}
}
for (i in 1:length(RandDist)){
if (i==1) { dWRdUTot<-DWRDU(U[[i]],RandDist[i])
d2WRdU2Tot<-D2WRDU2(U[[i]],RandDist[i])
WRTot<-WR[[1]]
UTot<-U[[1]]
}
else { dWRdUTot<-c(dWRdUTot,DWRDU(U[[i]],RandDist[i]))
d2WRdU2Tot<-c(d2WRdU2Tot,D2WRDU2(U[[i]],RandDist[i]))
WRTot<-c(WRTot,WR[[i]])
UTot<-c(UTot,U[[i]])
}
}
# We need to add the zeros for uncorrelated random effects #
if (nrandind>0) dSigmadcorr[[i]]<-dbind(dSigmadcorr[[i]],diag(qcum[length(qcum)]-qcum[cumqcorr[length(cumqcorr)]])*0)
invSigmaTotComp<-invSigmaTot
if ((qcum[cumqcorr[length(CorrMat)+1]+1]+1)-qcum[length(qcum)]<0) invSigmaTotComp<-dbind(invSigmaTot,diag(ISIGMAMvec[(qcum[cumqcorr[length(CorrMat)+1]]+1):qcum[length(qcum)]]))
invTT2temp<-solve(t(ZOriginal)%*%(ZOriginal*as.vector(Wvec/Phi))+invSigmaTotComp)
# within this iteration nest another iteration where hessian and second derivative of vhat is computed #
# Is this derivative for exponential family or only for normal distribution #
for (i in 1:ncorr){
dvhatdcorr[[i]]<-as.vector(invTT2temp%*%(invSigmaTotComp%*%(dSigmadcorr[[i]]%*%(invSigmaTotComp%*%VTCorrTot)))) # This seems to be okay
}
VTCorrTot<-as.vector(VTCorrTot)
# DD matrix #
MIDMAT<-dbind(diag(as.vector(Wvec/Phi)),invSigmaTot)
if ((qcum[cumqcorr[length(CorrMat)+1]+1]+1)-qcum[length(qcum)]<0) MIDMAT<-dbind(MIDMAT,diag(ISIGMAMvec[(qcum[cumqcorr[length(CorrMat)+1]+1]+1):qcum[length(qcum)]]))
DD<-t(TTOriginal)%*%MIDMAT%*%TTOriginal
invDD<-solve(DD)
# Computing score and hessian with respect to the correlation parameters #
for (i in 1:ncorr) {
#dDDdrho1<-dbind(matrix(0,ptot,ptot),-invSigmaTotComp%*%dSigmadcorr[[i]]%*%invSigmaTotComp) ##### ERROR / FIXED - This implementation is only for normal random effects #####
# The below version is for GLM distribution #
dDDdrho1 <- dDDdranmat(X,ZOriginal,dWdmu,Wvec,dvhatdlambda=dvhatdcorr[[i]],invSigmaMat=invSigmaTotComp,dSigmadlambda=dSigmadcorr[[i]],WR=WRTot,dWRdu=dWRdUTot)
# The below score is okay but must be adjusted for indepnedent random effects #
ScoreCorr[i]<-(dvhatdcorr[[i]])%*%t(ZOriginal)%*%((Y-mu)*(1/Phi))-0.5*sum(diag(invSigmaTotComp%*%dSigmadcorr[[i]]))-dvhatdcorr[[i]]%*%invSigmaTotComp%*%VTCorrTot+
0.5*VTCorrTot%*%invSigmaTotComp%*%dSigmadcorr[[i]]%*%invSigmaTotComp%*%VTCorrTot-0.5*sum(diag(invDD%*%dDDdrho1)) # Score is okay ! #
#ScoreCorr[i]<--0.5*sum(diag(invSigmaTotComp%*%dSigmadcorr[[i]]))+0.5*VTCorrTot%*%invSigmaTotComp%*%dSigmadcorr[[i]]%*%invSigmaTotComp%*%VTCorrTot-0.5*sum(diag(invDD%*%dDDdrho1))
# Now proceed to the Hessian computation #
for (j in 1:ncorr) {
#dDDdrho2<-dbind(matrix(0,ptot,ptot),-invSigmaTotComp%*%dSigmadcorr[[j]]%*%invSigmaTotComp)
dDDdrho2 <- dDDdranmat(X,ZOriginal,dWdmu,Wvec,dvhatdlambda=dvhatdcorr[[j]],invSigmaMat=invSigmaTotComp,dSigmadlambda=dSigmadcorr[[j]],WR=WRTot,dWRdu=dWRdUTot)
d2vhatdcorr2<--invTT2temp%*%((t(ZOriginal*as.vector(1/Phi)*dWdmu*Wvec*as.vector(ZOriginal%*%dvhatdcorr[[i]]))%*%as.vector(ZOriginal%*%dvhatdcorr[[j]]))+invSigmaTotComp%*%(dSigmadcorr[[j]]%*%(invSigmaTotComp%*%(dSigmadcorr[[i]]%*%(invSigmaTotComp%*%VTCorrTot))))+invSigmaTotComp%*%(dSigmadcorr[[i]]%*%
(invSigmaTotComp%*%(dSigmadcorr[[j]]%*%(invSigmaTotComp%*%VTCorrTot))))-invSigmaTotComp%*%(dSigmadcorr[[j]]%*%(invSigmaTotComp%*%dvhatdcorr[[i]]))-
invSigmaTotComp%*%(dSigmadcorr[[i]]%*%(invSigmaTotComp%*%dvhatdcorr[[j]])))
d2vhatdcorr2<-as.vector(d2vhatdcorr2)
#d2DDdrho2<-dbind(matrix(0,ptot,ptot),invSigmaTotComp%*%dSigmadcorr[[j]]%*%invSigmaTotComp%*%dSigmadcorr[[i]]%*%invSigmaTotComp+invSigmaTotComp%*%dSigmadcorr[[i]]%*%invSigmaTotComp%*%dSigmadcorr[[j]]%*%invSigmaTotComp)
d2DDdrho2 <- d2DDdranmat2(X,ZOriginal,d2Wdmu2,dWdmu,Wvec,dvhatdlambda1=dvhatdcorr[[i]],dvhatdlambda2=dvhatdcorr[[j]],d2vhatdlambda12=d2vhatdcorr2,
invSigmaMat=invSigmaTotComp,dSigmadlambda1=dSigmadcorr[[i]],dSigmadlambda2=dSigmadcorr[[j]],d2Sigmadlambda12=dSigmadcorr[[j]]*0,WR=WRTot,dWRdu=dWRdUTot,d2WRdu2=d2WRdU2Tot)
HessCorr[i,j]<--dvhatdcorr[[i]]%*%t(ZOriginal)%*%(ZOriginal*as.vector(Wvec/Phi))%*%dvhatdcorr[[j]]+d2vhatdcorr2%*%t(ZOriginal*as.vector(1/Phi))%*%(Y-mu)+
0.5*sum(diag(invSigmaTotComp%*%dSigmadcorr[[i]]%*%invSigmaTotComp%*%dSigmadcorr[[j]]))+
dvhatdcorr[[i]]%*%invSigmaTotComp%*%dSigmadcorr[[j]]%*%invSigmaTotComp%*%VTCorrTot-dvhatdcorr[[j]]%*%invSigmaTotComp%*%dvhatdcorr[[i]]-d2vhatdcorr2%*%invSigmaTotComp%*%VTCorrTot+
dvhatdcorr[[j]]%*%invSigmaTotComp%*%dSigmadcorr[[i]]%*%invSigmaTotComp%*%VTCorrTot-
VTCorrTot%*%invSigmaTotComp%*%dSigmadcorr[[j]]%*%invSigmaTotComp%*%dSigmadcorr[[i]]%*%invSigmaTotComp%*%VTCorrTot-
# Adding the trace of the derivative #
0.5*(sum(diag(invDD%*%d2DDdrho2))-sum(diag(invDD%*%dDDdrho1%*%invDD%*%dDDdrho2)))
}
}
# Now Lets create the derivative with respect to Fishers Z #
dCorrdZF<-unlist(dCorrdZF)
ScoreCorrZF<-ScoreCorr*dCorrdZF
HessCorrZF<-matrix(0,ncorr,ncorr)
for (i in 1:ncorr) {
for (j in 1:ncorr) {
HessCorrZF[i,j]<-HessCorr[i,j]*dCorrdZF[i]*dCorrdZF[j]
}
}
HessCorrZF<-(HessCorrZF+t(HessCorrZF))/2
# Now we apply Newton-Raphson updater #
object1<-AdjProfCorrelations(unlist(ZF))
REML<-2*object1[[1]]
## print("REML");print(REML)
if (EstimateCorrelations==TRUE){
ZFOld<-ZF
if (nrow(as.matrix(-HessCorrZF))==1 & ncol(as.matrix(-HessCorrZF))==1) {
if (-HessCorrZF<=0) HessCorrZF<--0.00001
}
else {
## HessCorrZF<--HessCorrZF
## print("HessCorrZF");print(HessCorrZF)
## print("ScoreCorrZF");print(ScoreCorrZF)
## if (det(-as.matrix(HessCorrZF))<=0) HessCorrZF<--nearPD(-as.matrix(HessCorrZF))
}
check<-0
frac<-1
IHessCorrZF<-solve(HessCorrZF)
object2<-AdjProfCorrelations(unlist(ZFOld))
while (check==0) {
TempZFStart<-unlist(ZF)-frac*(IHessCorrZF%*%ScoreCorrZF)
# If condition to check if the likelihood is increasing #
if (any(abs(TempZFStart)>3)) {
check<-0
frac<-frac/2
}
else {
object1<-AdjProfCorrelations(TempZFStart)
REML<-2*object1[[1]]
## print("REML");print(REML)
if (object1[[1]]-object2[[1]]<=0) {
check<-1
for (i in 1:length(CorrMat)){
ZF[[i]]<-TempZFStart[(1+cumqcorrels[i]):cumqcorrels[i+1]]
}
}
else {
check<-0
frac<-frac/2
}
}
}
##### Update Correlations and design matrices #####
M2h<--2*object1[[2]]
M2pvh<--2*object1[[3]]
M2pbvh<-REML
CAIC<-object1[[4]]
for (i in 1:length(CorrMat)){
Correlations[[i]]<-((1-exp(2*ZF[[i]]))/(1+exp(2*ZF[[i]])))
}
}
## if (DEBUG==TRUE) print("Correlations");print(Correlations);
Convergence<-Convergence+sum(abs(unlist(Correlations)-unlist(OldCorrelations)))
M2h<--2*object1[[2]]
M2pvh<--2*object1[[3]]
M2pbvh<-REML
CAIC<-object1[[4]]
# Now make a re-design of the Z matrix taking into account new correlations and new variances #
# We have new correlations - update correlations #
TempCorrMat<-list(0)
for (i in 1:length(CorrMat)){
TempCorrMat[[i]]<-CorrMat[[i]]
for (j in 1:length(Correlations[[i]])){
TempCorrMat[[i]][CorrMat[[i]]==j]<-Correlations[[i]][j]
}
diag(TempCorrMat[[i]])<-1
CorrMatOut[[i]]<-TempCorrMat[[i]]
}
LambdaCorr<-exp(DDRCorr%*%DRCorrgamma)
# now create correlation matrix for all the random effects #
# create index of individual random effects #
SigmaMat<-list(0)
for (i in 1:length(CorrMat)){
LambdaLocal<-rep(0,qcorr[i])
for (j in 1:qcorr[i]){
LambdaLocal[j]<-LambdaCorr[cumindCorrIndex[cumqcorr[i]+j]+1]
}
SigmaMat[[i]]<-sqrt(LambdaLocal)*t(CorrMatOut[[i]]*sqrt(LambdaLocal))
}
# merging the matrices by diagonals #
for (i in 1:length(CorrMat)){
if (i==1) {
SigmaTot<-SigmaMat[[1]]%x%diag(lcorr[1])
invSigmaTot<-solve(SigmaMat[[1]])%x%diag(lcorr[1])
}
else {
SigmaTot<-dbind(SigmaTot,SigmaMat[[i]]%x%diag(lcorr[i]))
invSigmaTot<-dbind(invSigmaTot,solve(SigmaMat[[i]])%x%diag(lcorr[i]))
}
}
# Matrix SigmaTot is the resulting matrix #
# We have to make the random effects indendent via Cholesky decomposition #
# We need a kroneker product of cholesky matrix times the SigmaTot #
# You can do the cholesky on the total sigma matrix - the problem is the dimension is greater so maybe we loose computational efficiency #
# DO cholesky on SigmaMat list #
# We have to make functions to convert the ZZCorr into vectoral design for correlated random effects and back to the diagnoal design according to subject#
# This will make the computationally more efficient things #
# Now we modify the design matrix via cholesky decompositions #
# All these steps need to be reprogramed using matrix package although in the final product there might be not so many zeros sometime #
# REMARK: In what follows maybe not all steps are required to do again - if it takes a long time we can remove some - now i keep it for first attempt #
for (i in 1:length(CorrMat)){
itchol<-t(chol(SigmaMat[[i]])) # This is actually cholesky decomposition instead of inverse, there was before inverse which was wrong
CholeskyMatrices[[i]]<-itchol
currentindex<-seq(cumqcorr[i]+1,cumqcorr[i+1])
lengthcorrModelCur<-length(RespDist)
valuescorrModelCur<-as.numeric(names(table(corrModel)))
ZZCorrTemp<-rep(list(0),lengthcorrModelCur)
for (j in 1:lengthcorrModelCur){
for (k in currentindex) {
if (ZZCorrTemp[[j]][1]==0 & length(ZZCorrTemp[[j]])==1){
if (corrModel[k]==valuescorrModelCur[j]) ZZCorrTemp[[j]]<-ZZCorrVec[[k]]
else ZZCorrTemp[[j]]<-rep(0,ModelsDims[j])
}
else {
if (corrModel[k]==valuescorrModelCur[j]) ZZCorrTemp[[j]]<-cbind(ZZCorrTemp[[j]],ZZCorrVec[[k]])
else ZZCorrTemp[[j]]<-cbind(ZZCorrTemp[[j]],rep(0,ModelsDims[j]))
}
}
}
# Binding it all together #
for (j in 1:length(ZZCorrTemp)){
if (j==1) {
ZZCorrTempTot<-ZZCorrTemp[[j]]
nrowtot<-nrow(ZZCorrTemp[[j]])
}
else {
ZZCorrTempTot<-rbind(ZZCorrTempTot,ZZCorrTemp[[j]])
nrowtot<-c(nrowtot,nrow(ZZCorrTemp[[j]]))
}
}
cnrowtot<-cumsum(c(0,nrowtot))
# Now we use cholesky transform on the design matrix #
ZZCorrTempTotUpd<-ZZCorrTempTot%*%itchol
ZZShort[[i]]<-ZZCorrTempTot
# ZZCorrTempTotUpd is the new design matrix for the joint model from the correlated part #
# This design matrix is in the short form (vector form) - we need to expand it to the diagnoal form #
# Now we need to take into accout to which model we should link the SSC #
# Expansion to diagonal here we need matrix package already #
for (j in currentindex){
DiagDesign[[j]]<-model.matrix(~SSC[[j]]-1)
DiagDesign[[j]]<-DiagDesign[[j]]*ZZCorrTempTotUpd[,(1+j-currentindex[1])]
if (j==currentindex[1]) ZZCorrTotUpd<-DiagDesign[[j]]
else ZZCorrTotUpd<-cbind(ZZCorrTotUpd,DiagDesign[[j]])
#DiagDesign[[j]]<-model.matrix(~SSC[[j]]-1)
}
ZZCorrUpd[[i]]<-ZZCorrTotUpd
if (i==1) ZZCorrDesign<-ZZCorrUpd[[1]]
else ZZCorrDesign<-cbind(ZZCorrDesign,ZZCorrUpd[[i]])
# Fill with zeros the ZZCorrDesign #
ZZCorrDesign<-FillZeros(ZZCorrDesign,nr=sum(ModelsDims),nc=ncol(ZZCorrDesign))
}
q<-sapply(DiagDesign,ncol)
# End of if corrmat not null #
}
if (nrandind>0) {
ZZIndepTemp<-list(0)
for (i in 1:nrandind){
if (indepModel[i]==length(ModelsDims)) rowsdown<-0
else rowsdown<-sum(ModelsDims[(indepModel[i]+1):length(ModelsDims)])
if (indepModel[i]==1) rowsup<-0
else rowsup<-sum(ModelsDims[1:(indepModel[i]-1)])
# First expand up if possible #
ZZIndepTemp[[i]]<-FillZeros(ZZIndep[[i]],nr=(nrow(ZZIndep[[i]])+rowsup),nc=ncol(ZZIndep[[i]]),up=TRUE)
ZZIndepTemp[[i]]<-FillZeros(ZZIndepTemp[[i]],nr=(nrow(ZZIndepTemp[[i]])+rowsdown),nc=ncol(ZZIndepTemp[[i]]))
if (!exists("ZZCorrDesign") & i==1) ZZDesign<-ZZIndepTemp[[i]]
if (exists("ZZCorrDesign") & i==1) ZZDesign<-cbind(ZZCorrDesign,ZZIndepTemp[[1]])
if (i>1) ZZDesign<-cbind(ZZDesign,ZZIndepTemp[[i]])
}
}
else if(nrandcor>0) ZZDesign<-ZZCorrDesign
TT<-cbind(X,ZZDesign)
if (nrandcor>0){
for (i in 1:nrandcor){
TT<-rbind(TT,cbind(matrix(0,q[i],ptot+qcum[i]),diag(q[i]),matrix(0,q[i],qcum[nrandcor+nrandind+1]-qcum[i+1])))
}
}
if (nrandind>0){
for (i in 1:nrandind){
TT<-rbind(TT,cbind(matrix(0,q[i+nrandcor],ptot+qcum[i+nrandcor]),diag(q[i+nrandcor]),matrix(0,q[i+nrandcor],qcum[nrandind+nrandcor+1]-qcum[nrandcor+i+1])))
}
}
Z<-TT[(1:sum(ModelsDims)),((ptot+1):ncol(TT))]
}
if (StandardErrors==TRUE) {
# Now we need standard errors and diagnostics and we are done #
for (i in 1:length(RandDist)){
if (i==1) {
if (nrandcor>0) U[[i]]<-LinkR(VTCorrTot[(qcum[i]+1):qcum[i+1]],RandDist[i])
dWRdUTot<-DWRDU(U[[i]],RandDist[i])
d2WRdU2Tot<-D2WRDU2(U[[i]],RandDist[i])
WRTot<-WR[[1]]
UTot<-U[[1]]
bfuncv<- bfuncvgen(Vvec=V[[1]],Dist=RandDist[i])
}
else {
if (nrandcor>0) U[[i]]<-LinkR(VTCorrTot[(qcum[i]+1):qcum[i+1]],RandDist[i])
dWRdUTot<-c(dWRdUTot,DWRDU(U[[i]],RandDist[i]))
d2WRdU2Tot<-c(d2WRdU2Tot,D2WRDU2(U[[i]],RandDist[i]))
WRTot<-c(WRTot,WR[[i]])
UTot<-c(UTot,U[[i]])
bfuncv<-c(bfuncv,bfuncvgen(Vvec=V[[i]],Dist=RandDist[i]))
}
}
# Standard errors gradient hessian - correlation parameters #
# But these should be jointly with DRgamma #
if (!is.null(CorrMat)){
StdErrCorr<-sqrt(diag(solve(-HessCorr))) # These dont take into account the variablity of the dispersion components #
StdErrCorrZF<-sqrt(diag(solve(-HessCorrZF))) # Correlation need to be jointly with DRgamma values and then inverted
}
else {
StdErrCorr<-NULL
StdErrCorrZF<-NULL
invSigmaTotComp<-diag(ISIGMAMvec[(ntot+1):(ntot+qcum[nrandcor+nrandind+1])])
invTT2temp<-solve(t(ZOriginal*as.vector(Wvec/Phi))%*%(ZOriginal)+invSigmaTotComp)
}
# Standard errors for random effects - these are also not all ok should be jointly with fixed effects #
# Standard errors for Beta #
# Now we have to consider two situations: one when LAPFIX is used and one when it is not #
# It is now a problem if some Betas are estimated without the Laplace Approximations and some are not #
# clearly the Beta estimates are not orthogonal among each other, therefore for now we should adapt the approach that either
# all betas are estimated with the adjusted profile likelihood or with a joint likelihood #
if (all(!LaplaceFixed)==TRUE) {
StdErrBeta<-sqrt(diag(invDD)[1:ptot])
StdErrVs<-sqrt(diag(invDD)[(ptot+1):length(diag(invDD))]) # These standard errors are not the same as standard errors of PROC MIXED, the part of them is the same!
}
else if (all(LaplaceFixed)==TRUE) {
# Compute the derivative of vhat with respect to beta - it is a 3 dimensional array q (number of random effects) times (p x p) #
# To musi byc zmienione jesli jest wiecej niz jeden model !!!!!! #
dvhatdbeta<--invTT2temp%*%(t(ZOriginal*as.vector(Wvec/Phi)))%*%X # Pytanie czy ta pochodna poprawnie traktuje vhat z modelu (1) wzgledem beta z modelu (2) czy sa to zera? ale czy powinny to byc zera?
# Pochodna jest OK prawdopodobnie
# !!!!!! We need to use invSigmaTotComp when we use normal random effects and correlation - but is it multiplied by WR the independent parts of random effects ? !!!!! #
# Yes invSigmaTotComp is dbind with ISIGMAMvec, which is the multiplication #
# Compute second derivatives of dvhatdbeta1dbeta2 #
# You have to remember that now the matrix is from correlation of random effects #
# @!!!!! DWRDU is not stored anywhere you need to use a function for that #
# Also WR check where this is stored #
d2vhatdbeta2<-array(0,dim=c(ptot,ptot,length(VT)))
# compute dWRdUTot #
for (i in 1:ptot) {
for (j in 1:ptot) {
# This is more complicated when more than one model is used #
# Proper adjustments need to be made #
# This diagonal is tough to compute #
# IF both beta from the same model then the derivative is ok #
#DiagDesignMat <- 1 ### # Macierz ktora jest Xp+Zdvhatdbeta - definiuje ktore rzeczy sa na diagonals -
d2vhatdbeta2[i,j,]<--invTT2temp%*%(t(ZOriginal*as.vector((dWdmu*Wvec)/Phi))%*%((X[,i]+ZOriginal%*%dvhatdbeta[,i])*(X[,j]+ZOriginal%*%dvhatdbeta[,j]))+
diag(as.vector((dWRdUTot*WRTot)/Lambda))%*%(dvhatdbeta[,i]*dvhatdbeta[,j])) # This line here needs to be ammended #
#d2vhatdbeta2[i,j,]<--invTT2temp%*%t(ZOriginal*as.vector((dWdmu*Wvec)/Phi)%*% # Nowa definicja
}
}
# end if nModels equal 1
# Below we implement the computation of derivatives for vhat for more than one model !!!!
# Now we compute the hessian of the Laplace Approximation to the marginal likelihood #
# We need hessian of the TT2 matrix with respect to Beta and Beta*Beta #
# For the hessian we need the second derivative of the Wvec matrix #
# We directly compute the hessian of the Laplace Approximation #
HessianLapFixBeta<-matrix(0,ptot,ptot)
for (i in 1:ptot) {
for (j in 1:i) {
dTT2dbetai<-t(ZOriginal*as.vector((1/Phi)*dWdmu*Wvec*(X[,i]+ZOriginal%*%dvhatdbeta[,i])))%*%ZOriginal+diag(as.vector((1/Lambda)*dWRdUTot*WRTot*dvhatdbeta[,i]))
dTT2dbetaj<-t(ZOriginal*as.vector((1/Phi)*dWdmu*Wvec*(X[,j]+ZOriginal%*%dvhatdbeta[,j])))%*%ZOriginal+diag(as.vector((1/Lambda)*dWRdUTot*WRTot*dvhatdbeta[,j]))
d2TT2dbetaidbetaj<-t(ZOriginal*as.vector((1/Phi)*d2Wdmu2*Wvec*(X[,i]+ZOriginal%*%dvhatdbeta[,i])*Wvec*(X[,j]+ZOriginal%*%dvhatdbeta[,j])))%*%ZOriginal+
t(ZOriginal*as.vector((1/Phi)*dWdmu*dWdmu*Wvec*(X[,i]+ZOriginal%*%dvhatdbeta[,i])*(X[,j]+ZOriginal%*%dvhatdbeta[,j])))%*%ZOriginal+
t(ZOriginal*as.vector((1/Phi)*dWdmu*Wvec*(ZOriginal%*%d2vhatdbeta2[i,j,])))%*%ZOriginal+
diag(as.vector((1/Lambda)*d2WRdU2Tot*WRTot*dvhatdbeta[,i]*WRTot*dvhatdbeta[,j]))+
diag(as.vector((1/Lambda)*dWRdUTot*dWRdUTot*WRTot*dvhatdbeta[,i]*dvhatdbeta[,j]))+
diag(as.vector((1/Lambda)*dWRdUTot*WRTot*d2vhatdbeta2[i,j,]))
# !!!!!!! In the above we need to replace Lambda by invSigmaTotComp - although it does not matter as for normal dWRduTot=0 and dWdmu=0 #
# Maybe in Hessian Lap Fix we can also add terms which sum up to zero for better accuracy ?#
HessianLapFixBeta[i,j]<-sum(-(as.vector((X[,i]+ZOriginal%*%dvhatdbeta[,i])*Wvec*(1/Phi)*(X[,j]+ZOriginal%*%dvhatdbeta[,j])))) + sum(as.vector((1/Phi)*(Y-mu)*(ZOriginal%*%d2vhatdbeta2[i,j,])))-
as.vector(dvhatdbeta[,i])%*%invSigmaTotComp%*%as.vector(dvhatdbeta[,j])+as.vector((PsiM-UTot))%*%(invSigmaTotComp)%*%as.vector(d2vhatdbeta2[i,j,])-
0.5*sum(diag(invTT2temp%*%d2TT2dbetaidbetaj))+0.5*sum(diag(invTT2temp%*%dTT2dbetai%*%invTT2temp%*%dTT2dbetaj))
HessianLapFixBeta[j,i]<-HessianLapFixBeta[i,j]
}
}
StdErrBeta<-sqrt(diag(solve(-HessianLapFixBeta)))
StdErrVs<-sqrt(diag(invTT2temp))
}
else {
warning("Some fixed effects are estimated by Laplace some by h-likelihood; currently we do not know how to compute standard errors in this case")
StdErrBeta<-NULL
}
# Now we compute the standard errors of residual dispersion components #
# The problem is to determine which components are estimated and which are kept fixed #
SelectGamma<-NULL
SelectModel<-NULL
StdErrODEst <- NULL
for (i in 1:nModels) {
if (EstimateOverDisp[i] == TRUE) {
rowsTemp <- (cModelsDims[i]+1):(cModelsDims[i+1])
TempOD <- as.matrix(DDY[rowsTemp,])
colnames(TempOD)<-1:ncol(TempOD)
columnsTemp <- apply(matrix(as.logical(TempOD),nrow(TempOD),ncol(TempOD)),2,any)
whichGamma <- colnames(TempOD)[which(columnsTemp==TRUE)]
SelectModelTemp <- rep(i,length(whichGamma))
if (i==1) {
SelectGamma <- as.numeric(whichGamma)
SelectModel <- SelectModelTemp
}
else {
SelectGamma <- c(SelectGamma, as.numeric(whichGamma))
SelectModel <- c(SelectModel, SelectModelTemp)
} # This says which gamma are estimated and with respect to them the hessian is going to be computed #
}
}
HessianODEst<-matrix(0,length(SelectGamma),length(SelectGamma))
DevianceRespTotal <- rep(0,ntot)
for (i in 1:nModels) {
DevianceRespTemp<-rep(0,cModelsDims[i+1]-cModelsDims[i])
YTemp<-YList[[i]]
BTemp<-B[(cModelsDims[i]+1):cModelsDims[i+1]]
muTemp<-mu[(cModelsDims[i]+1):cModelsDims[i+1]]
PhiTemp<-Phi[(cModelsDims[i]+1):cModelsDims[i+1]]
if (RespDist[i]=="Binomial") {
DevianceRespTemp[YTemp!=0 & YTemp!=BTemp]<-2*(YTemp[YTemp!=0 & YTemp!=BTemp]*log(YTemp[YTemp!=0 & YTemp!=BTemp]/muTemp[YTemp!=0 & YTemp!=BTemp])-(YTemp[YTemp!=0 & YTemp!=BTemp]-BTemp[YTemp!=0 & YTemp!=BTemp])*log((BTemp[YTemp!=0 & YTemp!=BTemp]-YTemp[YTemp!=0 & YTemp!=BTemp])/(BTemp[YTemp!=0 & YTemp!=BTemp]-muTemp[YTemp!=0 & YTemp!=BTemp])))
DevianceRespTemp[YTemp==0]<-2*(BTemp[YTemp==0]*log((BTemp[YTemp==0])/(BTemp[YTemp==0]-muTemp[YTemp==0])))
DevianceRespTemp[YTemp==BTemp]<-2*(YTemp[YTemp==BTemp]*log(YTemp[YTemp==BTemp]/muTemp[YTemp==BTemp]))
}
if (RespDist[i]=="Poisson"){
DevianceRespTemp[YTemp!=0]<-2*(YTemp[YTemp!=0]*log(YTemp[YTemp!=0]/muTemp[YTemp!=0])-(YTemp[YTemp!=0]-muTemp[YTemp!=0]))
DevianceRespTemp[YTemp==0]<-2*muTemp[YTemp==0]
}
if (RespDist[i]=="Normal"){
DevianceRespTemp<-(YTemp-muTemp)^2
}
if (RespDist[i]=="Gamma"){
DevianceRespTemp<-2*(-log(YTemp/muTemp)+(YTemp-muTemp)/muTemp)
#DiagPMATODEstim[(cModelsDims[i]+1):cModelsDims[i+1]]<-DiagPMATODEstim[(cModelsDims[i]+1):cModelsDims[i+1]]+1+as.vector(2*log(PhiTemp)/PhiTemp)+as.vector(2*digamma(1/PhiTemp)/PhiTemp)
}
if (i == 1) DevianceRespTotal <- DevianceRespTemp
else DevianceRespTotal <- c(DevianceRespTotal, DevianceRespTemp)
}
# The part below needs to be removed #
#DevD<-DevianceRespTotal
#PhiD<-Phi
#XD<-X
#ZD<-ZOriginal
#WD<-Wvec
#invD<-invSigmaTotComp
DDr1<-cbind(t(X*as.vector(Wvec/Phi))%*%X,t(X*as.vector(Wvec/Phi))%*%ZOriginal)
DDr2<-cbind(t(ZOriginal*as.vector(Wvec/Phi))%*%X,t(ZOriginal*as.vector(Wvec/Phi))%*%ZOriginal+invSigmaTotComp)
DD<-rbind(DDr1,DDr2)
solveDD<-solve(DD)
# Now compute the actual Hessian #
if (!is.null(SelectGamma)) {
for (i in 1:length(SelectGamma)) {
for (j in 1:i){
modelPhi1<-SelectModel[i]
modelPhi2<-SelectModel[j]
if (modelPhi1 != modelPhi2 ) {
d2Qdphi2 <- 0
d2Qdgamma2 <- 0
}
# Derivative of the Quasi likelihood #
else {
d2Qdphi2 <- -(DevianceRespTotal[(cModelsDims[SelectModel[i]]+1):cModelsDims[SelectModel[i]+1]]/(Phi[(cModelsDims[SelectModel[i]]+1):cModelsDims[SelectModel[i]+1]]^3))+
0.5*(1/(Phi[(cModelsDims[SelectModel[i]]+1):cModelsDims[SelectModel[i]+1]]^2))
if (RespDist[SelectModel[i]]=="Gamma") {
PhiGCur<-as.vector(Phi[(cModelsDims[SelectModel[i]]+1):cModelsDims[SelectModel[i]+1]])
d2Qdphi2<-d2Qdphi2-(2*log(PhiGCur))/(PhiGCur^3)+(1/PhiGCur^3)-0.5*(1/PhiGCur^2)-(2*digamma(1/PhiGCur))/(PhiGCur^3)-(trigamma(1/PhiGCur)/PhiGCur^4)
}
#DDYODCuri<-rep(0,ntot)
DDYODCuri<-DDY[(cModelsDims[SelectModel[i]]+1):(cModelsDims[SelectModel[i]+1]),SelectGamma[i]]
#DDYODCurj<-rep(0,ntot)
DDYODCurj<-DDY[(cModelsDims[SelectModel[j]]+1):(cModelsDims[SelectModel[j]+1]),SelectGamma[j]]
d2Qdgamma2 <- (d2Qdphi2 * (as.vector(Phi[(cModelsDims[SelectModel[i]]+1):cModelsDims[SelectModel[i]+1]])^2) * as.vector(DDYODCuri))%*%
as.vector(DDYODCurj)
## cat("i: ",i,"; j:",j," s2Qdgamma2:",d2Qdgamma2)
}
# !!!!! Here a correction for the gamma distribution so the h is used instead of Q !!!!!! #
# Derivative of the determinant #
if (modelPhi1 == modelPhi2) {
PhiCur <- rep (0,ntot+qtot)
PhiCur[(cModelsDims[SelectModel[i]]+1):cModelsDims[SelectModel[i]+1]]<-Phi[(cModelsDims[SelectModel[i]]+1):cModelsDims[SelectModel[i]+1]]
dimmOD<-nrow(TTOriginal)-length(Wvec)
WvecODCur<-c(Wvec,rep(0,dimmOD))
DDYODCuri<-rep(0,ntot)
DDYODCuri[(cModelsDims[i]+1):(cModelsDims[i+1])]<-DDY[(cModelsDims[SelectModel[i]]+1):(cModelsDims[SelectModel[i]+1]),SelectGamma[i]]
DDYODCurj<-rep(0,ntot)
DDYODCurj[(cModelsDims[j]+1):(cModelsDims[j+1])]<-DDY[(cModelsDims[SelectModel[j]]+1):(cModelsDims[SelectModel[j]+1]),SelectGamma[j]]
dDDdgamma1p1 <- - cbind(t(X*as.vector(Wvec*DDYODCuri/Phi))%*%X,t(X*as.vector(Wvec*DDYODCuri/Phi))%*%ZOriginal)
dDDdgamma1p2 <- - cbind(t(ZOriginal*as.vector(Wvec*DDYODCuri/Phi))%*%X,t(ZOriginal*as.vector(Wvec*DDYODCuri/Phi))%*%ZOriginal)
dDDdgamma1 <- rbind(dDDdgamma1p1,dDDdgamma1p2)
dDDdgamma2p1 <- - cbind(t(X*as.vector(Wvec*DDYODCurj/Phi))%*%X,t(X*as.vector(Wvec*DDYODCurj/Phi))%*%ZOriginal)
dDDdgamma2p2 <- - cbind(t(ZOriginal*as.vector(Wvec*DDYODCurj/Phi))%*%X,t(ZOriginal*as.vector(Wvec*DDYODCurj/Phi))%*%ZOriginal)
dDDdgamma2 <- rbind(dDDdgamma2p1,dDDdgamma2p2)
d2DDdgamma12p1 <- 2* cbind(t(X*as.vector(Wvec*DDYODCuri*DDYODCurj/Phi))%*%X,t(X*as.vector(Wvec*DDYODCuri*DDYODCurj/Phi))%*%ZOriginal)
d2DDdgamma12p2 <- 2* cbind(t(ZOriginal*as.vector(Wvec*DDYODCuri*DDYODCurj/Phi))%*%X,t(ZOriginal*as.vector(Wvec*DDYODCuri*DDYODCurj/Phi))%*%ZOriginal)
d2DDdgamma12 <- rbind(d2DDdgamma12p1,d2DDdgamma12p2)
HessianODEst[i,j]<-d2Qdgamma2-0.5*sum(diag(solveDD%*%d2DDdgamma12))+0.5*sum(diag(solveDD%*%dDDdgamma1%*%solveDD%*%dDDdgamma2))
HessianODEst[j,i]<-HessianODEst[i,j]
## cat("Parameter: ",i," Drugi: ",j,"\n d2Q: ",d2Qdgamma2," trace: ",-0.5*sum(diag(solveDD%*%d2DDdgamma12))+0.5*sum(diag(solveDD%*%dDDdgamma1%*%solveDD%*%dDDdgamma2)))
}
else {
DDYODCuri<-rep(0,ntot)
DDYODCuri[(cModelsDims[i]+1):(cModelsDims[i+1])]<-DDY[(cModelsDims[SelectModel[i]]+1):(cModelsDims[SelectModel[i]+1]),SelectGamma[i]]
DDYODCurj<-rep(0,ntot)
DDYODCurj[(cModelsDims[j]+1):(cModelsDims[j+1])]<-DDY[(cModelsDims[SelectModel[j]]+1):(cModelsDims[SelectModel[j]+1]),SelectGamma[j]]
dDDdgamma1p1 <- - cbind(t(X*as.vector(Wvec*DDYODCuri/Phi))%*%X,t(X*as.vector(Wvec*DDYODCuri/Phi))%*%ZOriginal)
dDDdgamma1p2 <- - cbind(t(ZOriginal*as.vector(Wvec*DDYODCuri/Phi))%*%X,t(ZOriginal*as.vector(Wvec*DDYODCuri/Phi))%*%ZOriginal)
dDDdgamma1 <- rbind(dDDdgamma1p1,dDDdgamma1p2)
dDDdgamma2p1 <- - cbind(t(X*as.vector(Wvec*DDYODCurj/Phi))%*%X,t(X*as.vector(Wvec*DDYODCurj/Phi))%*%ZOriginal)
dDDdgamma2p2 <- - cbind(t(ZOriginal*as.vector(Wvec*DDYODCurj/Phi))%*%X,t(ZOriginal*as.vector(Wvec*DDYODCurj/Phi))%*%ZOriginal)
dDDdgamma2 <- rbind(dDDdgamma2p1,dDDdgamma2p2)
HessianODEst[i,j]<-0.5*sum(diag(solveDD%*%dDDdgamma1%*%solveDD%*%dDDdgamma2))
HessianODEst[j,i]<-HessianODEst[i,j]
}
}
}
colnames(HessianODEst)<-SelectGamma
rownames(HessianODEst)<-SelectGamma
StdErrODEst<-diag(sqrt(solve(-HessianODEst)))
names(StdErrODEst)<-paste("gamma",SelectGamma,sep="")
}
##########################################
##### Hessian for DRgamma parameters #####
##########################################
# We can use DVhatDlambda as it is already calculated
if (nrandcor > 0) nrandtot<-nrandcor
else nrandtot <- 0
if (nrandind > 0) nrandtot<-nrandtot+nrandind
HessianRVC<-matrix(0,nrandtot,nrandtot)
# Hessian of correlated part #
# debug(dhdranmat)
# debug(dDDdranmat)
# VTCorrTot as correlated random effects #
if (nrandcor > 0) {
DSigmadlambdaConstant<-solve(invSigmaTotComp) # Copy current matrix #
for (i in 1:length(CorrMat)){
# Compute the derivative of dSigmadlambda #
# Determine which lambda #
LambdaLocal<-rep(0,qcorr[i])
for (j in 1:qcorr[i]){
LambdaLocal[j]<-LambdaCorr[cumindCorrIndex[cumqcorr[i]+j]+1]
}
FDER <- rep(0,qcorr[i]) # gradient
for (Dindex in 1:qcorr[i]){ # says which derivative
CorrMatOutDeriv<-CorrMatOut[[i]]
diag(CorrMatOutDeriv)<-rep(0,qcorr[i])
CorrMatOutDeriv[Dindex,]<-CorrMatOutDeriv[Dindex,]/2
CorrMatOutDeriv[,Dindex]<-CorrMatOutDeriv[,Dindex]/2
CorrMatOutDeriv[Dindex,Dindex]<-1
CorrMatOutDeriv[-Dindex,-Dindex]<-0
DSigmaMatdlambda<-t(t(sqrt(LambdaLocal)*CorrMatOutDeriv)*(sqrt(LambdaLocal)/(LambdaLocal[Dindex])))
DSigmaTotdlambda<-DSigmaMatdlambda%x%diag(lcorr[i])
# Computing first order derivative #
DSigmadlambda1<-matrix(0,qcum[nrandtot+1],qcum[nrandtot+1])
DSigmadlambda1[(cumindCorrIndex[cumqcorr[i]+1]+1):(cumindCorrIndex[cumqcorr[i+1]+1]),(cumindCorrIndex[cumqcorr[i]+1]+1):(cumindCorrIndex[cumqcorr[i+1]+1])]<-
DSigmaTotdlambda
dvhatdlambda1 <- as.vector(dvhatdranmat(invTT2=invTT2temp,invSigmaMat=invSigmaTotComp,dSigmadlambda=DSigmadlambda1,Psi=PsiM,Uvec=UTot))
dDDdlambda1<-dDDdranmat(X=X,Z=ZOriginal,dWdmu=dWdmu,Wvec=Wvec,dvhatdlambda=dvhatdlambda1,invSigmaMat=invSigmaTotComp,dSigmadlambda=DSigmadlambda1,
WR=WRTot,dWRdu=dWRdUTot)
LambdaCur1<-LambdaLocal[Dindex]
FDER[Dindex] <-dhdranmatCorr(Z=ZOriginal,y=Y,mu=mu,Phi=Phi,dvhatdlambda=dvhatdlambda1,invSigmaMat=invSigmaTotComp,Psi=PsiM,Uvec=UTot,
Vvec=VTCorrTot,bfuncv=bfuncv,dSigmadlambda=DSigmadlambda1)-0.5*sum(diag(solveDD%*%dDDdlambda1))
if (Dindex==1) {
dVcur1 <- dvhatdlambda1
dSigm1 <- DSigmadlambda1
dDDd1 <- dDDdlambda1
}
# Now computing the second derivative #
for (i2 in 1:length(CorrMat)) {
LambdaLocal<-rep(0,qcorr[i])
for (j in 1:qcorr[i2]){
LambdaLocal[j]<-LambdaCorr[cumindCorrIndex[cumqcorr[i2]+j]+1]
}
for (Dindex2 in 1:qcorr[i2]){ # says which derivative
CorrMatOutDeriv<-CorrMatOut[[i2]]
diag(CorrMatOutDeriv)<-rep(0,qcorr[i2])
CorrMatOutDeriv[Dindex2,]<-CorrMatOutDeriv[Dindex2,]/2
CorrMatOutDeriv[,Dindex2]<-CorrMatOutDeriv[,Dindex2]/2
CorrMatOutDeriv[Dindex2,Dindex2]<-1
CorrMatOutDeriv[-Dindex2,-Dindex2]<-0
DSigmaMatdlambda<-t(t(sqrt(LambdaLocal)*CorrMatOutDeriv)*(sqrt(LambdaLocal)/(LambdaLocal[Dindex2])))
DSigmaTotdlambda<-DSigmaMatdlambda%x%diag(lcorr[i2])
# Computing second first order derivative #
DSigmadlambda2<-matrix(0,qcum[nrandtot+1],qcum[nrandtot+1])
DSigmadlambda2[(cumindCorrIndex[cumqcorr[i2]+1]+1):(cumindCorrIndex[cumqcorr[i2+1]+1]),(cumindCorrIndex[cumqcorr[i2]+1]+1):(cumindCorrIndex[cumqcorr[i2+1]+1])]<-
DSigmaTotdlambda
dvhatdlambda2 <- as.vector(dvhatdranmat(invTT2=invTT2temp,invSigmaMat=invSigmaTotComp,dSigmadlambda=DSigmadlambda2,Psi=PsiM,Uvec=UTot))
dDDdlambda2<-dDDdranmat(X=X,Z=ZOriginal,dWdmu=dWdmu,Wvec=Wvec,dvhatdlambda=dvhatdlambda2,invSigmaMat=invSigmaTotComp,dSigmadlambda=DSigmadlambda2,
WR=WRTot,dWRdu=dWRdUTot)
LambdaCur2<-LambdaLocal[Dindex2]
# Computing the second order derivative of the SigmaMat over the lambda #
# Three possiblities: 1 / same matrix same sigma
# : 2 / same matrix different sigma
# : 3 / different matrix CorrMat
D2Sigmadlambda12<-matrix(0,qcum[nrandtot+1],qcum[nrandtot+1])
if (i==i2 & Dindex==Dindex2) {
D2SigmaMatdlambda12 <- DSigmaMatdlambda/(-2*LambdaLocal[Dindex2])
D2SigmaMatdlambda12[Dindex,Dindex]<-0
D2SigmaTotdlambda12 <- D2SigmaMatdlambda12%x%diag(lcorr[i2])
D2Sigmadlambda12[(cumindCorrIndex[cumqcorr[i2]+1]+1):(cumindCorrIndex[cumqcorr[i2+1]+1]),(cumindCorrIndex[cumqcorr[i2]+1]+1):(cumindCorrIndex[cumqcorr[i2+1]+1])]<-
D2SigmaTotdlambda12
}
if (i==i2 & Dindex!=Dindex2) {
D2SigmaMatdlambda12temp <- DSigmadlambda2/(2*LambdaLocal[Dindex])
D2SigmaMatdlambda12 <- matrix(0,qcorr[i],qcorr[i])
D2SigmaMatdlambda12[Dindex,Dindex2] <- D2SigmaMatdlambda12temp[Dindex,Dindex2]
D2SigmaMatdlambda12[Dindex2,Dindex] <- D2SigmaMatdlambda12temp[Dindex2,Dindex]
D2SigmaTotdlambda12 <- D2SigmaMatdlambda12%x%diag(lcorr[i2])
D2Sigmadlambda12[(cumindCorrIndex[cumqcorr[i2]+1]+1):(cumindCorrIndex[cumqcorr[i2+1]+1]),(cumindCorrIndex[cumqcorr[i2]+1]+1):(cumindCorrIndex[cumqcorr[i2+1]+1])]<-
D2SigmaTotdlambda12
}
if (i!=i2) {
D2SigmaMatdlambda12 <- matrix(0, qcorr[i], qcorr[i])
D2Sigmadlambda12 <- matrix(0, qcum[nrandtot+1], qcum[nrandtot+1])
}
d2vhatdlambda12<-as.vector(d2vhatdranmat2(invTT2=invTT2temp,Z=ZOriginal,Phi=Phi,dWdmu=dWdmu,Wvec=Wvec,
dvhatdlambda1=dvhatdlambda1,dvhatdlambda2=dvhatdlambda2,invSigmaMat=invSigmaTotComp,WR=WRTot,
dWRdu=dWRdUTot,dSigmadlambda1=DSigmadlambda1,dSigmadlambda2=DSigmadlambda2,Psi=PsiM,Uvec=UTot,d2Sigmadlambda12=D2Sigmadlambda12))
if (Dindex2==1) {
dVcur2<-dvhatdlambda2
dSigm2<-DSigmadlambda2
dDDd2<-dDDdlambda2
}
# Computing which derivative we deal with #
firstindex<-cumqcorr[i]+Dindex
secondindex<-cumqcorr[i2]+Dindex2
# Computing the actual hessian #
# Define
d2hdlambda2 <- d2hdranmatCorrCorr(Z=ZOriginal,y=Y,mu=mu,Phi=Phi,d2vhatdlambda12=d2vhatdlambda12,dvhatdlambda1=dvhatdlambda1,
dvhatdlambda2=dvhatdlambda2,Wvec=Wvec,invSigmaMat=invSigmaTotComp,dSigmadlambda1=DSigmadlambda1,dSigmadlambda2=DSigmadlambda2,
d2Sigmadlambda12=D2SigmaTotdlambda12,Psi=PsiM,Uvec=UTot,Vvec=VTCorrTot,bfuncv=bfuncv,WR=WRTot)
d2DDdlambda12<-d2DDdranmat2(X=X,Z=ZOriginal,d2Wdmu2=d2Wdmu2,dWdmu=dWdmu,Wvec=Wvec,dvhatdlambda1=dvhatdlambda1,
dvhatdlambda2=dvhatdlambda2,d2vhatdlambda12=d2vhatdlambda12,invSigmaMat=invSigmaTotComp,
dSigmadlambda1=DSigmadlambda1,dSigmadlambda2=DSigmadlambda2,d2Sigmadlambda12=D2Sigmadlambda12,WR=WRTot,dWRdu=dWRdUTot,d2WRdu2=d2WRdU2Tot)
if (Dindex==1 & Dindex2==1) {
d2Vcur12<-d2vhatdlambda12
d2Sigm12<-D2Sigmadlambda12
d2DDd12<-d2DDdlambda12
}
HessianRVC[firstindex,secondindex]<-d2hdlambda2-0.5*sum(diag(solveDD%*%d2DDdlambda12))+0.5*sum(diag(solveDD%*%dDDdlambda1%*%solveDD%*%dDDdlambda2))
# This makes hessian with respect to gamma #
HessianRVC[firstindex,secondindex]<-HessianRVC[firstindex,secondindex]*LambdaCur1*LambdaCur2
}
}
}
}
}
# All dispersion parameters must be evaluated jointly #
# So far we have: HessianODEst - for gammagamma derivatives #
# HessCorr / HessCorrZF - hessian for correlations and fisher z #
# Still there could be the same residual variance over the two models that is not implemented yet !!!!!!!!!!!!!!!!!!!!! #
# Further extension to Truncated Poisson #
# and another extension to commmon betas etc #
# further extend to Purahmadi trick #
# Standard errors and diagnostics #
# Speed up the algorithm #
HELPVALUES<-list(X=X,Z=ZOriginal,dWdmu=dWdmu,Wvec=Wvec,dvhatdlambda=dvhatdlambda1,invSigmaMat=invSigmaTotComp,dSigmadlambda=DSigmadlambda1,
WR=WRTot,dWRdu=dWRdUTot,y=Y,mu=mu,Phi=Phi,Psi=PsiM,Uvec=UTot,Vvec=VTCorrTot,bfuncv=bfuncv,solveDD=solveDD,VT=VT,
dDDd=dDDdlambda1,d2Sigmadlambda12=D2Sigmadlambda12,dVcur1=dVcur1,dVcur2=dVcur2,d2Vcur12=d2Vcur12,dSigm1=dSigm1,dSigm2=dSigm2,
d2Sigm12=d2Sigm12,d2DDd12=d2DDd12,dDDd1=dDDd1,dDDd2=dDDd2)
}
if (StandardErrors==FALSE){
StdErrCorr <- FALSE
StdErrCorrZF <-FALSE
StdErrVs <- FALSE
StdErrBeta <- FALSE
StdErrODEst <- FALSE
StdErrDRgamma <- FALSE
}
OUT<-list(Beta=Beta,V=VT,DRgamma=DRgamma,DYgamma=DYgamma,Correlations=unlist(Correlations),StdErrCorr=StdErrCorr,StdErrCorrZF=StdErrCorrZF,
StdErrVs=StdErrVs,StdErrBeta=StdErrBeta,StdErrODEst=StdErrODEst,StdErrDRgamma=StdErrDRgamma,
M2h=M2h,M2pvh=M2pvh,M2pbvh=M2pbvh,CAIC=CAIC)#,HessianOD=HessianODEst,HessianRVC=HessianRVC,FDER=FDER,HessianCorr=HessCorrZF
if (nrandcor>0) OUT<-c(OUT)
return(OUT)
}
| /R/IWLS_CorrZIP.R | no_license | cran/hsem | R | false | false | 131,741 | r | IWLS_CorrZIP <-
function(Loadings,Correlations,corrModel,YList=NULL,
XList=NULL,ZZIndep=NULL,indepModel=NULL,SSIndep=NULL,BetaList=NULL,Vstart=NULL,OFFSETList=NULL,LinkList=c("Log"),DDRIndep=NULL,DRgammaIndep=NULL,
RespDist=c("Normal","Normal"),RandDistIndep=NULL,DDY=NULL,DYgamma=NULL,
FactDist=NULL,FF=NULL,SSF=NULL,CorrMat=NULL,ZZCorr=NULL,RandDistCorr=NULL,DDRCorr=NULL,DRCorrgamma=NULL,CustomVarMat=NULL,SSC=NULL,
EstimateOverDisp=c(FALSE,FALSE),LaplaceFixed=c(FALSE,FALSE),EstimateCorrelations=TRUE, EstimateVariances=TRUE,StandardErrors=TRUE,
Info=FALSE,DEBUG=FALSE,CONV=CONV,DRFgamma=NULL,APMethod="REML"){
# Compose all the designs first #
## if (!require(Matrix)) stop("Package matrix not installed")
# From design matrix into the two #
# Maximize TP and BN under independence #
# Estimate correlation #
# Funkcja do dodawania zer wedlug wierszy i column #
FillZeros<-function(a,nr,nc,up=FALSE,left=FALSE){
if (nr<nrow(a)) stop("number of specified rows is less than the matrix rows")
if (nc<ncol(a)) stop("number of specified columns is less than the matrix columns")
if (nr-nrow(a)>0) zeromatrow<-matrix(0,nr-nrow(a),ncol(a))
else zeromatrow<-NULL
if (nc-ncol(a)>0) zeromatcol<-matrix(0,nrow(a),nc-ncol(a))
else zeromatcol<-NULL
if (nr-nrow(a)>0 & nc-ncol(a)>0) zeromatdia<-matrix(0,nr-nrow(a),nc-ncol(a))
else zeromatdia<-NULL
if (!(up|left)) b<-rbind(cbind(a,zeromatcol),cbind(zeromatrow,zeromatdia))
if (up & !left) b<-rbind(cbind(zeromatrow,zeromatdia),cbind(a,zeromatcol))
if (left & !up) b<-rbind(cbind(zeromatcol,a),cbind(zeromatdia,zeromatrow))
if (up & left) b<-rbind(cbind(zeromatdia,zeromatrow),cbind(zeromatcol,a))
b
}
# Funkcja laczy po przekatnej #
dbind<-function(a,b){
out1<-cbind(a,matrix(0,nrow(a),ncol(b)))
out2<-cbind(matrix(0,nrow(b),ncol(a)),b)
out<-rbind(out1,out2)
out
}
# Creator of U the inverse of the link for V #
LinkR<-function(x,RandDist){
if (RandDist=="Normal") out<-x
if (RandDist=="Gamma") out<-exp(x)
if (RandDist=="IGamma") out<-(-1/x)
if (RandDist=="Beta") out<-exp(x)/(1+exp(x))
out
}
# Random effects W vector creator - this takes as an argument u vector#
WRVC<-function(x,RandDist){
if (RandDist=="Normal") out<-rep(1,length(x))
if (RandDist=="Gamma") out<-x
if (RandDist=="IGamma") out<-x^2
if (RandDist=="Beta") out<-x*(1-x)
out
}
# x- vscale y- uscale - computes deviances for the estimation of the lambda paramters #
DevRand<-function(x,y,RandDist){
if (RandDist=="Normal") out<-y^2
if (RandDist=="Gamma") out<-2*(y-x-1)
if (RandDist=="IGamma") out<-2*(log(y)-x-1)
if (RandDist=="Beta") out<--log(4*y*(1-y))
out
}
DWRDU<-function(x,RandDist){
if (RandDist=="Normal") out<-rep(0,length(x))
if (RandDist=="Gamma") out<-rep(1,length(x))
if (RandDist=="IGamma") out<-2*x
if (RandDist=="Beta") out<-1-2*x
out
}
D2WRDU2<-function(x,RandDist){
if (RandDist=="Normal") out<-rep(0,length(x))
if (RandDist=="Gamma") out<-rep(0,length(x))
if (RandDist=="IGamma") out<-rep(2,length(x))
if (RandDist=="Beta") out<-rep(2,length(x))
return(out)
}
# link of the main distribution part - choice between canonical inverse and logarithm #
LinkY<-function(mu,Link){
if (Link=="Inverse") eta<--(1/mu)
if (Link=="Log") eta<-log(mu)
if (Link=="Identity") eta<-mu
if (Link=="Logit") eta<-log(mu/(1-mu))
if (Link=="Probit") eta<-qnorm(mu)
if (Link=="CLogLog") eta<-log(-log(1-mu))
eta
}
# Inverse of the link #
InvLinkY<-function(eta,Link){
if (Link=="Inverse") mu<--(1/eta)
if (Link=="Log") mu<-exp(eta)
if (Link=="Identity") mu<-eta
if (Link=="Logit") mu<-exp(eta)/(1+exp(eta))
if (Link=="Probit") mu<-pnorm(eta)
if (Link=="CLogLog") mu<-1-exp(-exp(eta))
mu
}
# Generation of the weight matrix W# # This now has two arguments first one says what is the distribution assumed the second one what is the link #
# Also added parameter B for binomial distribution #
# WARNING !!!! _ currently only canonical links !!!!!!!!!!!!!!!! #
# These functions Wmatgen and dWdmugen should be ammended with B factor for the binomial distibution !!!!!!!!!!!!#
Wmatgen<-function(mu,B,Link,Dist){
if (Dist=="Normal") Vmat<-rep(1,length(mu))
if (Dist=="Poisson") Vmat<-mu
if (Dist=="Binomial") Vmat<-(B-mu)*(mu/B) # In binomial models mu=p*B therefore the transformation is used g(mu/B)=eta #
if (Dist=="Gamma") Vmat<-mu^2
if (Dist!="Binomial") B<-1 # This makes sure offset is not used here if distribution is different than binomial #
# Include B everywhere and set it to one for different then binomial distribution 3
#if (Link=="Inverse") Wvec<-(1/Vmat)
#if (Link=="Log") Wvec<-(1/Vmat)*(mu^2)
#if (Link=="Identity") Wvec<-(1/Vmat)*rep(1,length(mu))
#if (Link=="Logit") Wvec<-(1/Vmat)*
Wmat<-Vmat
Wmat
}
# Generation of bfuncv #
bfuncvgen<-function(Vvec,Dist){
if (Dist=="Normal") out<-((Vvec^2)/2)
if (Dist=="Gamma") out<-exp(Vvec)
if (Dist=="Beta") out<-log(1+exp(Vvec))
if (Dist=="IGamma") out<--log(-Vvec)
return(out)
}
# Still the problem with B in link and variance function for binomial seems not to be solved !!!!!!! #
dWdmugen<-function(mu,B,Link,Dist){
mu1<-mu/B
if (Dist=="Normal") {
Vmat<-rep(1,length(mu))
dVmatdmu<-rep(0,length(mu))
}
if (Dist=="Poisson") {
Vmat<-mu
dVmatdmu<-rep(1,length(mu))
}
if (Dist=="Binomial") {
Vmat<-(B-mu)*(mu/B)
dVmatdmu<-1-2*(mu/B)
}
if (Dist=="Gamma") {
Vmat<-mu^2
dVmatdmu<-2*mu
}
if (Dist!="Binomial") B<-1
if (Link=="Inverse") {
detadmu <- 1/(mu^2)
d2etadmu2 <- -2/(mu^3)
}
if (Link=="Log") {
detadmu<-1/mu
d2etadmu2<--1/(mu^2)
}
if (Link=="Identity") {
detadmu<-rep(1,length(mu))
d2etadmu2<-rep(0,length(mu))
}
if (Link=="Logit") {
detadmu<-1/(mu*(1-mu1))
d2etadmu2<--(1-2*mu1)/((mu*(1-mu1))^2)
}
dWdmu<--(1/Vmat^2)*dVmatdmu*((1/detadmu)^2)+2*(1/Vmat)*(1/detadmu)*(-1/detadmu^2)*d2etadmu2
dWdmu
}
d2Wdmu2gen<-function(mu,B,Link,Dist){
mu1<-mu/B
if (Dist=="Normal") {
Vmat<-rep(1,length(mu))
dVmatdmu<-rep(0,length(mu))
d2Vmatdmu2<-rep(0,length(mu))
}
if (Dist=="Poisson") {
Vmat<-mu
dVmatdmu<-rep(1,length(mu))
d2Vmatdmu2<-rep(0,length(mu))
}
if (Dist=="Binomial") {
Vmat<-(B-mu)*(mu/B)
dVmatdmu<-1-2*(mu/B)
d2Vmatdmu2<--2*(1/B)
}
if (Dist=="Gamma") {
Vmat<-mu^2
dVmatdmu<-2*mu
d2Vmatdmu2<-2
}
if (Dist!="Binomial") B<-1
if (Link=="Inverse") {
detadmu <- 1/(mu^2)
d2etadmu2 <- -2/(mu^3)
d3etadmu3 <- 6/(mu^4)
}
if (Link=="Log") {
detadmu<-1/mu
d2etadmu2<--1/(mu^2)
d3etadmu3<-2/(mu^3)
}
if (Link=="Identity") {
detadmu<-rep(1,length(mu))
d2etadmu2<-rep(0,length(mu))
d3etadmu3<-rep(0,length(mu))
}
if (Link=="Logit") {
detadmu<-1/(mu*(1-mu1))
d2etadmu2<--(1-2*mu1)/((mu*(1-mu1))^2)
d3etadmu3<-((2/B)*((mu*(1-mu1))^2)+2*(1-2*mu1)*mu*(1-2*mu1)*(1-mu1))/(mu*(1-mu1))^4
}
# Add d2Vmatdmu2 and d3etadmu3 to all the functions #
d2Wdmu2<-2*(1/Vmat^3)*(dVmatdmu^2)*((1/detadmu)^2)-(1/Vmat^2)*d2Vmatdmu2*((1/detadmu)^2)+2*(1/Vmat^2)*dVmatdmu*((1/detadmu)^3)*(d2etadmu2)-
2*(1/Vmat^2)*(dVmatdmu)*(1/detadmu)*(-1/detadmu^2)*d2etadmu2-2*(1/Vmat)*(1/detadmu^2)*d2etadmu2*(-1/detadmu^2)*d2etadmu2-
4*(1/Vmat)*(1/detadmu)*(-1/detadmu^3)*(d2etadmu2^2)+2*(1/Vmat)*(1/detadmu)*(-1/detadmu^2)*d3etadmu3
return(d2Wdmu2)
}
# Copy of a function for canonical links - direct computation much easier #
#d2Wdmu2gen<-function(mu,B,Link,Dist){
# if (Dist=="Normal") Vmat<-rep(0,length(mu))
# if (Dist=="Poisson") Vmat<-rep(0,length(mu))
# if (Dist=="Binomial") Vmat<--2/B # In binomial models mu=p*B therefore the transformation is used g(mu/B)=eta #
# if (Dist=="Gamma") Vmat<-rep(2,length(mu))
# if (Dist!="Binomial") B<-1 # This makes sure offset is not used here if distribution is different than binomial #
# Include B everywhere and set it to one for different then binomial distribution #
#if (Link=="Inverse") Wvec<-(1/Vmat)
#if (Link=="Log") Wvec<-(1/Vmat)*(mu^2)
#if (Link=="Identity") Wvec<-(1/Vmat)*rep(1,length(mu))
#if (Link=="Logit") Wvec<-(1/Vmat)*
# Wmat<-Vmat
# Wmat
#}
# Generation of the derivative dmudeta #
dmudetagen<-function(mu,B,Link,Dist){
if (Link=="Inverse") dmudeta<-mu^2
if (Link=="Log") dmudeta<-mu
if (Link=="Identity") dmudeta<-rep(1,length(mu))
if (Link=="Logit") dmudeta<-(B-mu)*(mu/B)
dmudeta
}
# Generation of the derivative dAdmu (y-mu)/Phi is outside#
#dAdmugen<-function(mu,Link){
# if (Link=="Inverse") dAdmu<-rep(0,length(mu))
# if (Link=="Log") dAdmu<-(1/mu^2)
# dAdmu
#}
# These functions are with fact index # - lets keep it for now
# These functins must be modified for large matricies
# These matricies must be reprogrammed e.g. nrand and nfact must be replaced by nrandcor and nrandind
SolverShort<-function(ISIGMAMvec,Z){
nr<-nrow(Z)
nc<-ncol(Z)
SigmaE<-ISIGMAMvec[1:nr]
SigmaR<-ISIGMAMvec[(nr+1):(nr+nc)] # This wont be a diagonal anymore in the correlated random effects models
tempmat<-t(Z*SigmaE)%*%Z+diag(SigmaR)
Inverse<-solve(tempmat)
rm(tempmat)
PP2<-cbind(Z%*%Inverse%*%t(Z),Z%*%Inverse)
PP2<-rbind(PP2,cbind(Inverse%*%t(Z),Inverse))
PP2<-t(t(PP2)*ISIGMAMvec)
DiagPP2<-diag(PP2)
rm(PP2)
list(Inverse=Inverse,DiagPP2=DiagPP2)
}
SolverLong<-function(ISIGMAMvec,zTot){
SigmaE<-ISIGMAMvec[1:ntot]
SigmaR<-ISIGMAMvec[(ntot+1):(ntot+qcum[nrandcor+nrandind+1])]
if (!exists("INV1")) {INV1<-SolverShort(ISIGMAMvec,Z)$Inverse}
INV1<-as.matrix(INV1)
AA<-as.matrix(t(X*SigmaE)%*%X)
BB<-as.matrix(t(X*SigmaE)%*%Z)
CC<-as.matrix(t(Z*SigmaE)%*%X)
AA1<-as.matrix(solve(AA-BB%*%INV1%*%CC))
BB1<--AA1%*%BB%*%INV1
CC1<--INV1%*%CC%*%AA1
DD1<-INV1+INV1%*%CC%*%AA1%*%BB%*%INV1
Inverse<-rbind(cbind(AA1,BB1),cbind(CC1,DD1))
DPMAT<-rep(0,ntot+qcum[nrandcor+nrandind+1])
# If n is large do the iteration over the row index #
DPMAT[1:ntot]<-diag(X%*%AA1%*%t(X*SigmaE)+Z%*%(CC1)%*%t(X*SigmaE)+X%*%(BB1)%*%t(Z*SigmaE)+Z%*%(DD1)%*%t(Z*SigmaE))
# For the random part #
DPMAT[(ntot+1):length(DPMAT)]<-diag(DD1)*SigmaR
rm(AA);rm(BB);rm(CC);rm(INV1)
tempmat<-rbind(cbind(X,as.matrix(Z)),cbind(matrix(0,qcum[nrandcor+nrandind+1],ptot),diag(qcum[nrandcor+nrandind+1])))
HELP1<-Inverse%*%t(tempmat*ISIGMAMvec)
NewParms<-HELP1%*%zTot
rm(Inverse);rm(HELP1);rm(tempmat)
rm(AA1);rm(BB1);rm(CC1);rm(DD1)
list(NewParms=NewParms,DiagPMAT=DPMAT)
}
nearPD<-function (M, eig.tol = 1e-06, conv.tol = 1e-07, posd.tol = 1e-08,
maxits = 100)
{
if (!(is.numeric(M) && is.matrix(M) && identical(M, t(M))))
stop("Input matrix M must be square and symmetric.\n")
inorm <- function(x) max(rowSums(abs(x)))
n <- ncol(M)
U <- matrix(0, n, n)
X <- M
iter <- 0
converged <- FALSE
while (iter < maxits && !converged) {
Y <- X
T <- Y - U
e <- eigen(Y, symmetric = TRUE)
Q <- e$vectors
d <- e$values
D <- if (length(d) > 1)
diag(d)
else as.matrix(d)
p <- (d > eig.tol * d[1])
QQ <- Q[, p, drop = FALSE]
X <- QQ %*% D[p, p, drop = FALSE] %*% t(QQ)
U <- X - T
X <- (X + t(X))/2
conv <- inorm(Y - X)/inorm(Y)
iter <- iter + 1
converged <- conv <= conv.tol
}
X <- (X + t(X))/2
e <- eigen(X, symmetric = TRUE)
d <- e$values
Eps <- posd.tol * abs(d[1])
if (d[n] < Eps) {
d[d < Eps] <- Eps
Q <- e$vectors
o.diag <- diag(X)
X <- Q %*% (d * t(Q))
D <- sqrt(pmax(Eps, o.diag)/diag(X))
X[] <- D * X * rep(D, each = n)
}
(X + t(X))/2
}
# First derivative of the SigmaA matrix #
# invSigmaMat and dSigmadlambda is a matrix #
dDDdranmat <- function(X,Z,dWdmu,Wvec,dvhatdlambda,invSigmaMat,dSigmadlambda,WR,dWRdu){
uprow <- dWdmu*Wvec*as.vector(Z%*%dvhatdlambda)
downrow <- -invSigmaMat%*%dSigmadlambda%*%(invSigmaMat*WR)+(invSigmaMat*dWRdu*WR*dvhatdlambda)
uprow1 <- t(X*uprow)%*%X
uprow2 <- t(X*uprow)%*%Z
dorow1 <- t(Z*uprow)%*%X
dorow2 <- t(Z*uprow)%*%Z+downrow
out <- rbind(cbind(uprow1,uprow2),cbind(dorow1,dorow2))
return(out)
}
# Second derivative of the SigmaA matrix with respect to random effects parameters #
d2DDdranmat2 <- function(X,Z,d2Wdmu2,dWdmu,Wvec,dvhatdlambda1,dvhatdlambda2,d2vhatdlambda12,invSigmaMat,dSigmadlambda1,dSigmadlambda2,d2Sigmadlambda12,WR,dWRdu,d2WRdu2){
uprow <- d2Wdmu2*Wvec*as.vector(Z%*%dvhatdlambda1)*as.vector(Z%*%dvhatdlambda2)+dWdmu*dWdmu*as.vector(Z%*%dvhatdlambda1)*as.vector(Z%*%dvhatdlambda2)+
dWdmu*Wvec*as.vector(Z%*%d2vhatdlambda12)
downrow <- invSigmaMat%*%dSigmadlambda1%*%invSigmaMat%*%dSigmadlambda2%*%(invSigmaMat*WR)+invSigmaMat%*%dSigmadlambda2%*%invSigmaMat%*%dSigmadlambda1%*%(invSigmaMat*WR)-
invSigmaMat%*%d2Sigmadlambda12%*%(invSigmaMat*WR)-invSigmaMat%*%dSigmadlambda1%*%(invSigmaMat*dWRdu*WR*dvhatdlambda2)-
invSigmaMat%*%dSigmadlambda2%*%(invSigmaMat*dWRdu*WR*dvhatdlambda1)+(invSigmaMat*d2WRdu2*WR*dvhatdlambda1*WR*dvhatdlambda2)+
(invSigmaMat*dWRdu*dWRdu*WR*dvhatdlambda1*dvhatdlambda2)+(invSigmaMat*dWRdu*WR*d2vhatdlambda12)
uprow1 <- t(X*uprow)%*%X
uprow2 <- t(X*uprow)%*%Z
dorow1 <- t(Z*uprow)%*%X
dorow2 <- t(Z*uprow)%*%Z+downrow
out <- rbind(cbind(uprow1,uprow2),cbind(dorow1,dorow2))
return(out)
}
dvhatdranmat <- function(invTT2,invSigmaMat,dSigmadlambda,Psi,Uvec){
out <- -invTT2%*%(invSigmaMat%*%(dSigmadlambda%*%(invSigmaMat%*%(Psi-Uvec))))
return(out)
}
# Second derivative of vhat with respect to random effects varaince components parameters #
d2vhatdranmat2 <- function(invTT2,Z,Phi,dWdmu,Wvec,dvhatdlambda1,dvhatdlambda2,invSigmaMat,dWRdu,WR,dSigmadlambda1,dSigmadlambda2,Psi,Uvec,d2Sigmadlambda12){
out1 <- (t(Z*as.vector(1/Phi)*dWdmu*Wvec*as.vector(Z%*%dvhatdlambda1))%*%as.vector(Z%*%dvhatdlambda2)) + invSigmaMat%*%(dWRdu*WR*dvhatdlambda1*dvhatdlambda2) -
invSigmaMat%*%(dSigmadlambda1%*%(invSigmaMat%*%as.vector(WR*dvhatdlambda2))) - invSigmaMat%*%(dSigmadlambda2%*%(invSigmaMat%*%as.vector(WR*dvhatdlambda1))) -
invSigmaMat%*%(dSigmadlambda1%*%(invSigmaMat%*%(dSigmadlambda2%*%(invSigmaMat%*%(Psi-Uvec))))) - invSigmaMat%*%(dSigmadlambda2%*%(invSigmaMat%*%(dSigmadlambda1%*%(invSigmaMat%*%(Psi-Uvec))))) +
invSigmaMat%*%(d2Sigmadlambda12%*%(invSigmaMat%*%(Psi-Uvec)))
out <- -invTT2%*%out1
return(out)
}
# First derivative of h with respect to random effects variance components #
# This function need to be ammended for different random effects using the c(psi,lambda) terms #
dhdranmatInd <- function(Z,y,mu,Phi,dvhatdlambda,invSigmaMat,Psi,Uvec,Vvec,bfuncv,dSigmadlambda,randist){
out1 <- dvhatdlambda%*%(t(Z*as.vector(1/Phi))%*%(y-mu)+invSigmaMat%*%(Psi-Uvec))+(Psi*Vvec-bfuncv)%*%(invSigmaMat^2)%*%rep(1,length(Vvec))
# depending on distribution a residual needs to added form c(psi,lambda) #
return(out1)
}
# Temporary first derivative #
dhdranmatCorr <- function(Z,y,mu,Phi,dvhatdlambda,invSigmaMat,Psi,Uvec,Vvec,bfuncv,dSigmadlambda){
out1 <- dvhatdlambda%*%(t(Z*as.vector(1/Phi))%*%(y-mu)+invSigmaMat%*%(Psi-Uvec))+0.5*Vvec%*%invSigmaMat%*%dSigmadlambda%*%invSigmaMat%*%Vvec-
0.5*sum(diag(invSigmaMat%*%dSigmadlambda))
return(out1)
}
# Second derivative of h with respect to random effects variance components #
d2hdranmatCorrCorr <- function(Z,y,mu,Phi,d2vhatdlambda12,dvhatdlambda1,dvhatdlambda2,Wvec,invSigmaMat,dSigmadlambda1,dSigmadlambda2,d2Sigmadlambda12,Psi,Uvec,Vvec,bfuncv,WR){
out <- d2vhatdlambda12%*%t(Z*as.vector(1/Phi))%*%(y-mu)-dvhatdlambda1%*%t(Z*as.vector(Wvec/Phi))%*%Z%*%dvhatdlambda2 +
d2vhatdlambda12%*%invSigmaMat%*%(Psi-Uvec)-dvhatdlambda1%*%invSigmaMat%*%dSigmadlambda2%*%invSigmaMat%*%(Psi-Uvec)-dvhatdlambda2%*%invSigmaMat%*%dSigmadlambda1%*%invSigmaMat%*%(Psi-Uvec)-
dvhatdlambda1%*%(invSigmaMat)%*%(WR*dvhatdlambda2)+
(sqrt(abs(Psi*Vvec-bfuncv))*sign(Psi*Vvec-bfuncv))%*%(invSigmaMat%*%dSigmadlambda1%*%invSigmaMat%*%dSigmadlambda2%*%invSigmaMat+invSigmaMat%*%dSigmadlambda2%*%invSigmaMat%*%dSigmadlambda1%*%invSigmaMat-
invSigmaMat%*%d2Sigmadlambda12%*%invSigmaMat)%*%(sqrt(abs(Psi*Vvec-bfuncv))*sign(Psi*Vvec-bfuncv))-
0.5*sum(diag(invSigmaMat%*%d2Sigmadlambda12))+0.5*sum(diag(invSigmaMat%*%dSigmadlambda1%*%invSigmaMat%*%dSigmadlambda2))
return(out)
}
#################################################################################
# We need to include a profile likelihood function for correlations p_v_beta(h) #
#################################################################################
#####
##### Maybe this function needs to update the random effects estimates #
#####
AdjProfCorrelations<-function(ZF){
# This function utilized lexical scooping for the higher level parameters #
Correls<-list(0)
ZF<-list(ZF)
for (i in 1:length(ZF)){
Correls[[i]]<-(1-exp(2*ZF[[i]]))/(1+exp(2*ZF[[i]]))
# Create the design matrix - SigmaTot ## do we need to use Cholesky step here - alternatively we could use the multivariate normal distribution #
}
# Unfold CorrMat #
TempCorrMat<-list(0)
for (i in 1:length(CorrMat)){
TempCorrMat[[i]]<-CorrMat[[i]]
for (j in 1:length(Correls[[i]])){
TempCorrMat[[i]][CorrMat[[i]]==j]<-Correls[[i]][j]
}
diag(TempCorrMat[[i]])<-1
CorrMatOut[[i]]<-TempCorrMat[[i]]
}
LambdaCorr<-exp(DDRCorr%*%DRCorrgamma)
# now create correlation matrix for all the random effects #
for (i in 1:length(CorrMat)){
LambdaLocal<-rep(0,qcorr[i])
for (j in 1:qcorr[i]){
LambdaLocal[j]<-LambdaCorr[cumindCorrIndex[cumqcorr[i]+j]+1]
}
SigmaMat[[i]]<-sqrt(LambdaLocal)*t(CorrMatOut[[i]]*sqrt(LambdaLocal))
}
# merging the matrices by diagonals #
for (i in 1:length(CorrMat)){
if (i==1) {
SigmaTot<-SigmaMat[[1]]%x%diag(lcorr[1])
invSigmaTot<-solve(SigmaMat[[1]])%x%diag(lcorr[1])
}
else {
SigmaTot<-dbind(SigmaTot,SigmaMat[[i]]%x%diag(lcorr[i]))
invSigmaTot<-dbind(invSigmaTot,solve(SigmaMat[[i]])%x%diag(lcorr[i]))
}
}
#############################################
##### Add here estimation of V and Beta #####
#############################################
invSigmaTotIn<-invSigmaTot
if (nrandind>0) invSigmaTotIn<-dbind(invSigmaTotIn,diag(ISIGMAMvec[(qcum[cumqcorr[length(CorrMat)+1]+1]+1):qcum[length(qcum)]]))
convIn<-10
INVTEMP<-solve(t(ZOriginal)%*%(ZOriginal*as.vector(Wvec/Phi))+invSigmaTotIn)%*%t(ZOriginal*as.vector(Wvec/Phi))
VTCorrTotIn<-VTCorrTot
while (convIn>0.01) {
OldVTCorrTotIn<-VTCorrTotIn
VTCorrTotIn<-solve(t(ZOriginal)%*%(ZOriginal*as.vector(Wvec/Phi))+invSigmaTotIn)%*%t(ZOriginal*as.vector(Wvec/Phi))%*%(zmain-X%*%Beta)
convIn<-sum(abs(OldVTCorrTotIn-VTCorrTotIn))
}
VTCorrTot<-VTCorrTotIn
# At this stage we have new SigmaTot #
# All what has to be computed here is the multivariate normal distributions of random effects where the correlations occur #
# and also the logarithm of the determinant where also the correlations are present , the other factors are independent of rho #
# at least in this function because we do not interate over the rho again to reestimate beta vs drgamma dygamma... #
## require(mvtnorm)
hlikelihood<-0
eta<-TTOriginal[1:ntot,]%*%as.matrix(c(Beta,VTCorrTot))
for (i in 1:nModels){
mu[(cModelsDims[i]+1):cModelsDims[i+1]]<-B[(cModelsDims[i]+1):cModelsDims[i+1]]*InvLinkY(eta[(cModelsDims[i]+1):cModelsDims[i+1]],LinkList[[i]])
if (RespDist[i]=="Normal") hlikelihood<-hlikelihood+sum(dnorm(YList[[i]],mu[(cModelsDims[i]+1):cModelsDims[i+1]],sd=sqrt(Phi[(cModelsDims[i]+1):cModelsDims[i+1]]),log=TRUE))
if (RespDist[i]=="Poisson") {
## hlikelihood<-hlikelihood+sum(dpois(YList[[i]],mu[(cModelsDims[i]+1):cModelsDims[i+1]],log=TRUE)/Phi[(cModelsDims[i]+1):cModelsDims[i+1]],log=TRUE)
temp<-sum((-mu[(cModelsDims[i]+1):cModelsDims[i+1]]+YList[[i]]*log(mu[(cModelsDims[i]+1):cModelsDims[i+1]])-lgamma(YList[[i]]+1))/Phi[(cModelsDims[i]+1):cModelsDims[i+1]])
hlikelihood<-hlikelihood+temp
}
if (RespDist[i]=="Binomial") hlikelihood<-hlikelihood+sum(dbinom(YList[[i]],B[(cModelsDims[i]+1):cModelsDims[i+1]],(mu[(cModelsDims[i]+1):cModelsDims[i+1]]/B[(cModelsDims[i]+1):cModelsDims[i+1]]),log=TRUE))
if (RespDist[i]=="Gamma") hlikelihood<-hlikelihood+sum(dgamma(YList[[i]],shape=(1/Phi[(cModelsDims[i]+1):cModelsDims[i+1]]),scale=(mu[(cModelsDims[i]+1):cModelsDims[i+1]]*Phi[(cModelsDims[i]+1):cModelsDims[i+1]])))
}
hlikelihood1<-hlikelihood
for (i in 1:length(CorrMat)){
VTemp<-unlist(VTCorrTot)[(qcum[cumqcorr[i]+1]+1):qcum[cumqcorr[i+1]+1]] # Extract empirical bayes corresponding to the correlated effects of CorrMat[[i]]
noraneff<-cumqcorr[i+1]-cumqcorr[i]
VTemp<-matrix(VTemp,length(VTemp)/noraneff,noraneff)
hlikelihood<-hlikelihood+sum(dmvnorm(VTemp,mean=rep(0,noraneff),sigma=SigmaMat[[i]],log=TRUE))
}
if (nrandind>0) {
for (i in 1:nrandind) {
if (RandDistIndep[i]=="Normal") hlikelihood<-hlikelihood+sum(dnorm(VTCorrTot[(qcum[cumqcorr[length(CorrMat)+1]]+i):qcum[cumqcorr[length(CorrMat)+1]]+i+1],log=TRUE))
}
}
# REMARK: There was a problem with invSigmaTot - it was set to independent unit matrix which is not true #
for (i in 1:length(CorrMat)){
if (i==1) {
SigmaTot<-SigmaMat[[1]]%x%diag(lcorr[1])
invSigmaTot<-solve(SigmaMat[[1]])%x%diag(lcorr[1])
}
else {
SigmaTot<-dbind(SigmaTot,SigmaMat[[i]]%x%diag(lcorr[i]))
invSigmaTot<-dbind(invSigmaTot,solve(SigmaMat[[i]])%x%diag(lcorr[i]))
}
}
hlikelihood2<-hlikelihood
MIDMAT<-dbind(diag(as.vector(Wvec/Phi)),invSigmaTot)
if ((qcum[cumqcorr[length(CorrMat)+1]+1]+1)-qcum[length(qcum)]<0) MIDMAT<-dbind(MIDMAT,diag(ISIGMAMvec[(qcum[cumqcorr[length(CorrMat)+1]+1]+1):qcum[length(qcum)]]))
DD<-t(TTOriginal)%*%MIDMAT%*%TTOriginal
TTOriginal3<-rbind(ZOriginal,diag(ncol(ZOriginal)))
DD1<-t(TTOriginal3)%*%MIDMAT%*%TTOriginal3
hlikelihood3<-hlikelihood-0.5*determinant((DD1/(2*pi)),logarithm=TRUE)$modulus
hlikelihood4<-hlikelihood-0.5*determinant((DD/(2*pi)),logarithm=TRUE)$modulus
AdjProfLike<--hlikelihood4
MIDMAT1<-dbind(diag(as.vector(Wvec/Phi)),0*invSigmaTot)
BB<-t(TTOriginal)%*%MIDMAT1%*%TTOriginal
pd<- sum(diag(solve(DD) %*% BB))
caic<--2*hlikelihood1+2*pd
res<-list(AdjProfLike,hlikelihood2,hlikelihood3,caic)
return(res)
}
##################################################################################
# Create the design system of all #
# Check if there are correlated random effects #
# if (is.null(CorrMat)) EstimCorrelation<-FALSE - this condition is not really necessary
# Therefore we dont need a newton-raphson to estimate correlations if corrmat does not exist #
# YBN<-(Y==0)
# YTP<-Y
# YTP[Y==0]<-NA
# nBN<-nrow(YBN)
# nTP<-nrow(YTP)
# pBN<-ncol(XBN)
# pTP<-ncol(XTP)
# This is a total X matrix #
# First model goes Binomial, second Truncated Poisson - or extensions to Poisson etc #
# First also we model the correlated random effects
# XTT<-dbind(XBN,XTP)
# nrandTP<-length(ZZTP) # number of truncate poisson components
# nrandBN<-length(ZZBN) # number of binomial
# nrandCR<-length(ZZCorr) # number of correlated components
# nrandTT<-nrandTP+nrandBN
nModels<-length(YList)
ModelsDims<-sapply(YList,nrow)
cModelsDims<-cumsum(c(0,ModelsDims))
RandDist<-c(RandDistCorr,RandDistIndep) # This specified the distribution of all random effects
ntot<-sum(ModelsDims)
qcorrels<-sapply(Correlations,length)
cumqcorrels<-cumsum(c(0,qcorrels))
# Design for the matrices of random effects ##
if (!is.null(DDRCorr) & !is.null(DDRIndep)) DDR<-dbind(DDRCorr,DDRIndep)
if (is.null(DDRCorr) & !is.null(DDRIndep)) DDR<-DDRIndep
if (!is.null(DDRCorr) & is.null(DDRIndep)) DDR<-DDRCorr
if (is.null(DDRCorr) & is.null(DDRIndep)) stop("You did not specify any design matrix for random effects!")
# Create the design matrices of X covariates and Y #
for (i in 1:length(YList)){
if (i==1) {
Y<-YList[[1]]
X<-XList[[1]]
}
else {
Y<-rbind(Y,YList[[i]])
X<-dbind(X,XList[[i]])
}
}
# Index of the covariates over all the models #
p<-sapply(XList,ncol)
ptot<-sum(p)
pcum<-cumsum(c(0,p))
# Create the matrix of covariances - first multiply correlations by standard deviations to create variance covariance matrix#
if(!is.null(CorrMat)) {
# Unfold CorrMat #
TempCorrMat<-list(0)
CorrMatOut<-list(0)
for (i in 1:length(CorrMat)){
TempCorrMat[[i]]<-CorrMat[[i]]
for (j in 1:length(Correlations[[i]])){
TempCorrMat[[i]][CorrMat[[i]]==j]<-Correlations[[i]][j]
}
diag(TempCorrMat[[i]])<-1
CorrMatOut[[i]]<-TempCorrMat[[i]]
}
LambdaCorr<-exp(DDRCorr%*%DRCorrgamma)
# now create correlation matrix for all the random effects #
qcorr<-rep(0,length(CorrMat))
lcorr<-rep(0,length(CorrMat)) # in this vector we store
for (i in 1:length(CorrMat)){
if (i==1) {
qcorr[1]<-nrow(CorrMat[[1]])
}
else qcorr[i]<-nrow(CorrMat[[i]])
cumqcorr<-cumsum(c(0,qcorr))
lcorr[i]<-ncol(ZZCorr[[cumqcorr[i]+1]])
}
for (i in 1:length(CorrMat)){
tempindex<-as.numeric(names(table(corrModel[(cumqcorr[i]+1):cumqcorr[i+1]])))
if (sum(LaplaceFixed[tempindex]==FALSE)!=length(LaplaceFixed[tempindex]) & sum(LaplaceFixed[tempindex]==TRUE)!=length(LaplaceFixed[tempindex]))
stop("You choose for some correlated effect LAPFIX=TRUE while others LAPFIX=FALSE this is not permitted!")
}
# create index of individual random effects #
indCorrIndex<-rep(0,length(ZZCorr))
for (i in 1:length(ZZCorr)){
indCorrIndex[i]<-ncol(ZZCorr[[i]])
}
cumindCorrIndex<-cumsum(c(0,indCorrIndex))
SigmaMat<-list(0)
for (i in 1:length(CorrMat)){
LambdaLocal<-rep(0,qcorr[i])
for (j in 1:qcorr[i]){
LambdaLocal[j]<-LambdaCorr[cumindCorrIndex[cumqcorr[i]+j]+1]
}
SigmaMat[[i]]<-sqrt(LambdaLocal)*t(CorrMatOut[[i]]*sqrt(LambdaLocal))
}
# merging the matrices by diagonals #
for (i in 1:length(CorrMat)){
if (i==1) {
SigmaTot<-SigmaMat[[1]]%x%diag(lcorr[1])
invSigmaTot<-solve(SigmaMat[[1]])%x%diag(lcorr[1])
}
else {
SigmaTot<-dbind(SigmaTot,SigmaMat[[i]]%x%diag(lcorr[i]))
invSigmaTot<-dbind(invSigmaTot,solve(SigmaMat[[i]])%x%diag(lcorr[i]))
}
}
# Matrix SigmaTot is the resulting matrix #
# We have to make the random effects indendent via Cholesky decomposition #
# We need a kroneker product of cholesky matrix times the SigmaTot #
# You can do the cholesky on the total sigma matrix - the problem is the dimension is greater so maybe we loose computational efficiency #
# DO cholesky on SigmaMat list #
# We have to make functions to convert the ZZCorr into vectoral design for correlated random effects and back to the diagnoal design according to subject#
# This will make the computationally more efficient things #
ZZCorrVec<-list(0)
for (i in 1:length(ZZCorr)){
ZZCorrVec[[i]]<-ZZCorr[[i]]%*%rep(1,ncol(ZZCorr[[i]]))
}
# Determine how many models we have linked by correlation #
# Now we modify the design matrix via cholesky decompositions #
# All these steps need to be reprogramed using matrix package although in the final product there might be not so many zeros sometime #
ZZCorrUpd<-list(0)
DiagDesign<-list(0)
CholeskyMatrices<-list(0)
ZZShort<-list(0)
for (i in 1:length(CorrMat)){
itchol<-t(chol(SigmaMat[[i]])) # This is actually cholesky decomposition instead of inverse, there was before inverse which was wrong
CholeskyMatrices[[i]]<-itchol
currentindex<-seq(cumqcorr[i]+1,cumqcorr[i+1])
lengthcorrModelCur<-length(RespDist)
valuescorrModelCur<-as.numeric(names(table(corrModel)))
ZZCorrTemp<-rep(list(0),lengthcorrModelCur)
for (j in 1:lengthcorrModelCur){
for (k in currentindex) {
if (ZZCorrTemp[[j]][1]==0 & length(ZZCorrTemp[[j]])==1){
if (corrModel[k]==valuescorrModelCur[j]) ZZCorrTemp[[j]]<-ZZCorrVec[[k]] # If the observation belongs to this model than make it #
else ZZCorrTemp[[j]]<-rep(0,ModelsDims[j])
}
else {
if (corrModel[k]==valuescorrModelCur[j]) ZZCorrTemp[[j]]<-cbind(ZZCorrTemp[[j]],ZZCorrVec[[k]])
else ZZCorrTemp[[j]]<-cbind(ZZCorrTemp[[j]],rep(0,ModelsDims[j]))
}
}
}
# Binding it all together #
for (j in 1:length(ZZCorrTemp)){
if (j==1) {
ZZCorrTempTot<-ZZCorrTemp[[j]]
nrowtot<-nrow(ZZCorrTemp[[j]])
}
else {
ZZCorrTempTot<-rbind(ZZCorrTempTot,ZZCorrTemp[[j]])
nrowtot<-c(nrowtot,nrow(ZZCorrTemp[[j]]))
}
}
cnrowtot<-cumsum(c(0,nrowtot))
# Now we use cholesky transform on the design matrix #
ZZCorrTempTotUpd<-ZZCorrTempTot%*%itchol
ZZShort[[i]]<-ZZCorrTempTot
# ZZCorrTempTotUpd is the new design matrix for the joint model from the correlated part #
# This design matrix is in the short form (vector form) - we need to expand it to the diagnoal form #
# Now we need to take into accout to which model we should link the SSC #
# Expansion to diagnoal here we need matrix package already #
for (j in currentindex){
DiagDesign[[j]]<-model.matrix(~SSC[[j]]-1)
DiagDesign[[j]]<-DiagDesign[[j]]*ZZCorrTempTotUpd[,(1+j-currentindex[1])]
if (j==currentindex[1]) ZZCorrTotUpd<-DiagDesign[[j]]
else ZZCorrTotUpd<-cbind(ZZCorrTotUpd,DiagDesign[[j]])
#DiagDesign[[j]]<-model.matrix(~SSC[[j]]-1)
}
ZZCorrUpd[[i]]<-ZZCorrTotUpd
if (i==1) ZZCorrDesign<-ZZCorrUpd[[1]]
else ZZCorrDesign<-cbind(ZZCorrDesign,ZZCorrUpd[[i]])
}
q<-sapply(DiagDesign,ncol)
}
# DiagDesign contains individual design matrix for each random effects - which are now independent !!!!!!!!!!!!! #
# The models must be fitted jointly if there are correlations between random effects between different models #
# !!!!! It works !!!!! #
# Now we need to add independent design matricies and then create the vector of random effect corresponding to the design matrices #
# Independent random effects are in ZZIndep indexed by
# Handle independent random effects here #
if (exists("ZZIndep")) nrandind<-length(ZZIndep)
else nrandind<-0
if (exists("ZZCorr")) nrandcor<-length(ZZCorr)
else nrandcor<-0
Beta<-unlist(BetaList)
if (nrandind>0) {
ZZIndepTemp<-list(0)
for (i in 1:nrandind){
if (indepModel[i]==length(ModelsDims)) rowsdown<-0
else rowsdown<-sum(ModelsDims[(indepModel[i]+1):length(ModelsDims)][!is.na(ModelsDims[(indepModel[i]+1):length(ModelsDims)])])
if (indepModel[i]==1) rowsup<-0
else rowsup<-sum(ModelsDims[1:(indepModel[i]-1)])
# First expand up if possible #
ZZIndepTemp[[i]]<-FillZeros(ZZIndep[[i]],nr=(nrow(ZZIndep[[i]])+rowsup),nc=ncol(ZZIndep[[i]]),up=TRUE)
ZZIndepTemp[[i]]<-FillZeros(ZZIndepTemp[[i]],nr=(nrow(ZZIndepTemp[[i]])+rowsdown),nc=ncol(ZZIndepTemp[[i]]))
if (!exists("ZZCorrDesign") & i==1) ZZDesign<-ZZIndepTemp[[i]]
if (exists("ZZCorrDesign") & i==1) ZZDesign<-cbind(ZZCorrDesign,ZZIndepTemp[[1]])
if (i>1) ZZDesign<-cbind(ZZDesign,ZZIndepTemp[[i]])
if (i==1) ZZIndepDesign<-ZZIndepTemp[[1]]
if (i>1) ZZIndepDesign<-cbind(ZZIndepDesign,ZZIndepTemp[[i]])
}
}
else if(nrandcor>0) ZZDesign<-ZZCorrDesign
if (nrandind>0) {
if (!is.null(CorrMat)) q<-c(q,sapply(ZZIndepTemp,ncol))
else q<-sapply(ZZIndepTemp,ncol)
}
# Create ZOriginal and TTOriginal #
# ZOriginal is not well defined here !!!!!! - it must be corrected #
ZOriginal<-NULL
if (nrandcor>0) {
for (i in 1:nrandcor) {# These also dont need to be from different models #
if (corrModel[i]==length(ModelsDims)) rowsdown<-0
else rowsdown<-sum(ModelsDims[(corrModel[i]+1):length(ModelsDims)][!is.na(ModelsDims[(corrModel[i]+1):length(ModelsDims)])])
if (corrModel[i]==1) rowsup<-0
else rowsup<-sum(ModelsDims[1:(corrModel[i]-1)])
ZZCorrTemp<-FillZeros(ZZCorr[[i]],nr=(nrow(ZZCorr[[i]])+rowsup),nc=ncol(ZZCorr[[i]]),up=TRUE)
ZZCorrTemp<-FillZeros(ZZCorrTemp,nr=(nrow(ZZCorrTemp)+rowsdown),nc=ncol(ZZCorrTemp))
if (i==1) ZOriginal<-ZZCorrTemp
else {
ZOriginal<-cbind(ZOriginal,ZZCorrTemp)
}
}
}
if (nrandind>0) {
if (i==1 & is.null(ZOriginal)) ZOriginal<-ZZIndepDesign
else ZOriginal<-cbind(ZOriginal,ZZIndepDesign)
}
TTOriginal1<-cbind(X,ZOriginal)
TTOriginal2<-cbind(matrix(0,ncol(ZOriginal),ptot),diag(ncol(ZOriginal)))
TTOriginal<-rbind(TTOriginal1,TTOriginal2)
###################
qcum<-cumsum(c(0,q))
qtot<-qcum[length(qcum)]
if (is.null(Vstart)) Vstart<-rep(0,sum(q))
# index of correlated random effects
V<-list(0)
U<-list(0)
## print(nrow(ZZDesign))
## print(ncol(ZZDesign))
## print(nrow(X))
TT<-cbind(X,ZZDesign)
PsiM<-rep(0,sum(q))
if (nrandcor>0){
for (i in 1:nrandcor){
TT<-rbind(TT,cbind(matrix(0,q[i],ptot+qcum[i]),diag(q[i]),matrix(0,q[i],qcum[nrandcor+nrandind+1]-qcum[i+1])))
V[[i]]<-as.matrix(Vstart[(qcum[i]+1):(qcum[i+1])])
if (i==1) VT<-V[[1]]
else VT<-c(VT,list(V[[i]]))
if (RandDistCorr[i]=="Normal") PsiM[(qcum[i]+1):qcum[i+1]]<-0
if (RandDistCorr[i]=="Gamma") PsiM[(qcum[i]+1):qcum[i+1]]<-1
if (RandDistCorr[i]=="IGamma") PsiM[(qcum[i]+1):qcum[i+1]]<-1
if (RandDistCorr[i]=="Beta") PsiM[(qcum[i]+1):qcum[i+1]]<-0.5
}
}
if (nrandind>0){
for (i in 1:nrandind){
TT<-rbind(TT,cbind(matrix(0,q[i+nrandcor],ptot+qcum[i+nrandcor]),diag(q[i+nrandcor]),matrix(0,q[i+nrandcor],qcum[nrandind+nrandcor+1]-qcum[nrandcor+i+1])))
V[[i+nrandcor]]<-as.matrix(Vstart[(qcum[i+nrandcor]+1):(qcum[i+nrandcor+1])])
if ((i+nrandcor)==1) VT<-V[[i]]
else VT<-c(VT,list(V[[i+nrandcor]]))
if (RandDistIndep[i]=="Normal") PsiM[(qcum[i+nrandcor]+1):qcum[i+nrandcor+1]]<-0
if (RandDistIndep[i]=="Gamma") PsiM[(qcum[i+nrandcor]+1):qcum[i+nrandcor+1]]<-1
if (RandDistIndep[i]=="IGamma") PsiM[(qcum[i+nrandcor]+1):qcum[i+nrandcor+1]]<-1
if (RandDistIndep[i]=="Beta") PsiM[(qcum[i+nrandcor]+1):qcum[i+nrandcor+1]]<-0.5
}
}
# OFFSET and Binomial denominators management - i am not sure how to do it - keep it for later while we develop the code #
# The offset needs to be done for the whole vector corresponding to Y - such as all models are considered Poisson or Binomial -begrijp je? #
Z<-TT[(1:sum(ModelsDims)),((ptot+1):ncol(TT))]
B<-0
if (is.null(OFFSETList)) B<-rep(1,nrow(Y))
else {
for (i in 1:nModels){
if (length(OFFSETList[[i]])==1 & OFFSETList[[i]]==0) B[(cModelsDims[i]+1):(cModelsDims[i+1])]<-1
else B[(cModelsDims[i]+1):(cModelsDims[i+1])]<-OFFSETList[[i]]
}
}
if (nrandcor>0) DRgamma<-rep(0,ncol(DDRCorr))
else DRgamma<-NULL
if (nrandind>0) DRgamma<-c(DRgamma,DRgammaIndep)
Iteration<-0
Convergence<-100
while (Convergence>CONV){
# for (iii in 1:1) {
Iteration<-Iteration+1
# if (Info) cat("\n Iteration: ",Iteration," Convergence: ",Convergence,"\n")
###############################################
# PLAN: #
# Update Mean Structure #
# Laplace approximation to the mean structure #
# Variances of random components #
# Overdispersions #
# Correlations #
###############################################
MeanParmsLast<-c(Beta,unlist(VT))
# Lambda of correlated random effects is equal to one - these after cholesky transformation are denoted LambdaChol#
if (nrandcor>0) {
DRgamma[1:cumqcorr[length(cumqcorr)]]<-0
}
Lambda<-exp(DDR%*%DRgamma)
# Overdispersion - now for all models overdispersion is coded in one matrix DDY and one vector DYgamma FOR ALL MODELS AT THE SAME TIME #
Phi<-exp(DDY%*%DYgamma)
GammaMvec<-c(Phi,Lambda)
# Mean values #
eta<-TT[1:ntot,]%*%as.matrix(c(Beta,unlist(VT)))
# Here different link is applied to different model #
mu<-0
Wvec<-0
dWdmu<-0
dmudeta<-0
d2Wdmu2<-0
for (i in 1:nModels){
mu[(cModelsDims[i]+1):cModelsDims[i+1]]<-B[(cModelsDims[i]+1):cModelsDims[i+1]]*InvLinkY(eta[(cModelsDims[i]+1):cModelsDims[i+1]],LinkList[[i]])
Wvec[(cModelsDims[i]+1):cModelsDims[i+1]]<-Wmatgen(mu[(cModelsDims[i]+1):cModelsDims[i+1]],B[(cModelsDims[i]+1):cModelsDims[i+1]],LinkList[[i]],RespDist[i])
dWdmu[(cModelsDims[i]+1):cModelsDims[i+1]]<-dWdmugen(mu[(cModelsDims[i]+1):cModelsDims[i+1]],B[(cModelsDims[i]+1):cModelsDims[i+1]],LinkList[[i]],RespDist[i])
d2Wdmu2[(cModelsDims[i]+1):cModelsDims[i+1]]<-d2Wdmu2gen(mu[(cModelsDims[i]+1):cModelsDims[i+1]],B[(cModelsDims[i]+1):cModelsDims[i+1]],LinkList[[i]],RespDist[i])
dmudeta[(cModelsDims[i]+1):cModelsDims[i+1]]<-dmudetagen(mu[(cModelsDims[i]+1):cModelsDims[i+1]],B[(cModelsDims[i]+1):cModelsDims[i+1]],LinkList[[i]],RespDist[i])
}
# So far the Link and Dist are equivalent as only canonical links are allowed #
WR<-list(0)
UT<-0
WRT<-list(0)
if (nrandcor+nrandind>0) {
for (i in 1:(nrandcor+nrandind)) {
U[[i]]<-LinkR(V[[i]],RandDist[i])
if (i==1) UT<-U[[1]]
else UT<-c(UT,U[[i]])
WR[[i]]<-WRVC(U[[i]],RandDist[i])
if (i==1) WRT<-WR[[1]]
else WRT<-c(WRT,WR[[i]])
}
}
WTotvec<-c(Wvec,WRT)
ISIGMAMvec<-as.vector((1/GammaMvec)*WTotvec)
#dAdmu<-dAdmugen(mu,Link)
#Amat<-Amatgen(mu,Link)
# Adjustment computation for the Laplace Approximation to the mean #
# Now a Laplace Approximation to the mean can be used for one model but not for another therefore LaplaceFixed is a vector #
# We will not make a LAPFIX by model, instead we adjust all at the same time:
# Those models which are LAPFIX=FALSE need to have Design matrix in this part Z equal to zero for random effects corresponding to these models
# The adjustment terms are of length n1+n2+.... so for all the models, if one model is LAPFIX=FALSE then the adjustemt terms for those models are evenly
# Redistributed among the rest of the models which are LAPFIX=TRUE
Scorr<-rep(0,sum(ModelsDims))
################################################
# Consider the revision of the procedure below #
# By adding the factor random effects #
################################################
# There are two options : one is that there are too many adjustments than the ys - so they have to be redistributed
# : two is that rows are deleted and there are as many adjustments as the ys but instead all the dimension change
# : total matrices must be used in computation of the derivitive vhatbeta
# PART OF CREATING DESIGN MATRICES FOR LAPFIX CAN BE MOVED OUT OF THE ITERATIONS AS THE SAME MATRICES WILL BE USED - BUT ONLY CORRELATION CHANGES THE
# CORRELATED PART - THINK HOW TO DO IT
CorrTerms<-0
if (any(LaplaceFixed)==TRUE){
# Exlcude from the Z matrix the collumns which are not integreatd out - as this model is not LAPFIX=TRUE #
# We need an index to denote which random effects are integrated out #
ZLapFix<-Z
VLapFix<-unlist(VT)
qLapFix<-q
ISIGMAMvecLapFix<-ISIGMAMvec
WvecLapFix<-Wvec
dWdmuLapFix<-dWdmu
dmudetaLapFix<-dmudeta
integratedModels<-seq(1,nModels)
ModelsDimsLapFix<-ModelsDims
if (nrandcor>0) {
for (i in 1:nrandcor){
if (LaplaceFixed[corrModel[i]]==FALSE) {
ZLapFix[,(qcum[i]+1):(qcum[i+1])]<-0
VLapFix[(qcum[i]+1):(qcum[i+1])]<-NA
qLapFix[i]<-NA
ISIGMAMvecLapFix[(ntot+qcum[i]+1):(ntot+qcum[i+1])]<-NA
}
}
}
if (nrandind>0) {
for (i in 1:nrandind){
if (LaplaceFixed[indepModel[i]]==FALSE) {
ZLapFix[,(qcum[i+nrandcor]+1):(qcum[i+nrandcor+1])]<-0
VLapFix[(qcum[i+nrandcor]+1):(qcum[i+nrandcor+1])]<-NA
qLapFix[i+nrandcor]<-NA
ISIGMAMvecLapFix[(ntot+qcum[i+nrandcor]+1):(ntot+qcum[i+nrandcor+1])]<-NA
}
}
}
nrandint<-0
integratedIndex<-NA
if (nrandcor>0) {
for (i in 1:nrandcor){
if (LaplaceFixed[corrModel[i]]==TRUE) {
nrandint<-nrandint+1
if (is.na(integratedIndex[1])) integratedIndex<-i
else integratedIndex<-c(integratedIndex,i)
}
}
}
if (nrandind>0) {
for (i in 1:nrandind){
if (LaplaceFixed[indepModel[i]]==TRUE) {
nrandint<-nrandint+1
if (is.na(integratedIndex[1])) integratedIndex<-i
else integratedIndex<-c(integratedIndex,i+nrandcor) # integratedIndex contains the number of random effect which is integrated out its position in original design matrix#
}
}
}
# Remove all the columns which are of zero - but question should we keep track which columns are removed ? #
ZLapFix<-ZLapFix[,(apply(ZLapFix,2,sum)!=0)]
VLapFix<-VLapFix[!is.na(VLapFix)]
qLapFix<-qLapFix[!is.na(qLapFix)]
qcumLapFix<-cumsum(c(0,qLapFix))
# Remove Rows if are all zeros - and create an index which rows are removed - note that we remove the rows for Y response together with #
rowLapFix<-rep(1,nModels) # This index will determine which rows are deleted for which model if index is set to zero it means that the rows for that model
# are deleted
obsLapFix<-rep(1,ntot) # This vector will select which adjustments are assigned to ys and which need to be redistributed at the moment i am doing it, it is
# still unclear how it will work
for (i in 1:nModels) {
if (all(apply(ZLapFix[(cModelsDims[i]+1):cModelsDims[i+1],],1,sum)==0)) {
integratedModels[i]<-NA
rowLapFix[i]<-0
ISIGMAMvecLapFix[(cModelsDims[i]+1):cModelsDims[i+1]]<-NA # This removes the main effects - we still need to remove the random effects matrix #
WvecLapFix[(cModelsDims[i]+1):cModelsDims[i+1]]<-NA
dWdmuLapFix[(cModelsDims[i]+1):cModelsDims[i+1]]<-NA
dmudetaLapFix[(cModelsDims[i]+1):cModelsDims[i+1]]<-NA
ModelsDimsLapFix[i]<-NA
if (nrandcor>0){
for (j in 1:nrandcor){
if (corrModel[j]==i) ISIGMAMvecLapFix[(ntot+qcum[j]+1):(ntot+qcum[j+1])]<-NA
}
}
if (nrandind>0){
for (j in 1:nrandind){
if (indepModel[j]==i) ISIGMAMvecLapFix[(ntot+qcum[j+nrandcor]+1):(ntot+qcum[j+nrandcor+1])]<-NA
}
}
}
if (LaplaceFixed[i]==FALSE) obsLapFix[(cModelsDims[i]+1):cModelsDims[i+1]]<-0
}
ZLapFix<-ZLapFix[apply(ZLapFix,1,sum)!=0,]
ISIGMAMvecLapFix<-ISIGMAMvecLapFix[!is.na(ISIGMAMvecLapFix)]
WvecLapFix<-WvecLapFix[!is.na(WvecLapFix)]
dWdmuLapFix<-dWdmuLapFix[!is.na(dWdmuLapFix)]
dmudetaLapFix<-dmudetaLapFix[!is.na(dmudetaLapFix)]
ntotLapFix<-length(WvecLapFix)
integratedModels<-integratedModels[!is.na(integratedModels)]
ModelsDimsLapFix<-ModelsDimsLapFix[!is.na(ModelsDimsLapFix)]
cModelsDimsLapFix<-cumsum(c(0,ModelsDimsLapFix))
nintMod<-length(integratedModels)
# We need to separate the design matrix for the derivative with respect to beta with the design which is used in the trace part #
TT2<-TT[,(ptot+1):(ptot+sum(q))]
TT2LapFix<-rbind(ZLapFix,diag(ncol(ZLapFix)))
# The function Solver Short needs to be reprogrammed - but how can we invert a matrix if there is a zero row or zero collumn #
# One thing is to use a generalized inverse, however other thing is to - do we need zeros in the inverse function #
# This is for the derivative #
OUT1<-SolverShort(ISIGMAMvec,Z)
OUTLapFix<-SolverShort(ISIGMAMvecLapFix,ZLapFix) # This needs to be adjusted to accomodate the zero rows, than the diagonal matrix is reduced #
# A new argument Z is added.as the design matrix in this part might be different than the general Z
#print("DimTT2");print(dim(TT2));print("Dim2");print(length(rep(ISIGMAMvec,each=nrow(t(TT2)))))
INV1<-OUT1$Inverse # These are used for derivative of random effects #
DiagPP2<-OUT1$DiagPP2
INV1LapFix<-OUTLapFix$Inverse # These are used for the correction factor determinant over integrated random effects #
DiagPP2LapFix<-OUTLapFix$DiagPP2 # This will not work because ntot and dimensions are given globally we need to change it !#
rm(OUT1)
rm(OUTLapFix)
MOD<-INV1%*%(t(Z)*rep((1/Phi),each=ncol(Z))) # This is for random effects derivative
ADJDER1<-list(0)
ADJDER2<-list(0)
# How to modify this? #
# Now we need to iterate over the random effects which are integrated out #
for (i in 1:nrandint){
ADJDER1[[i]]<--ZLapFix[,(qcumLapFix[i]+1):qcumLapFix[i+1]]%*%(MOD[(qcum[integratedIndex[i]]+1):(qcum[integratedIndex[i]+1]),])#*rep(Wvec,each=nrow(MOD[(qcum[i]+1):(qcum[i+1]),])))
ADJDER2[[i]]<--MOD[(qcum[integratedIndex[i]]+1):(qcum[integratedIndex[i]+1]),]
}
# Computing correction quantities for the Laplace Approximation of fixed effects #
# This here gets difficult ntot and ntotLapFix - how to make sure we do the correct thing #
CorrTermsTemp<-list(0)
CorrTermsTemp[[1]]<-as.vector(DiagPP2LapFix[1:ntotLapFix])*as.vector(1/WvecLapFix)*as.vector(1/WvecLapFix)*(dWdmuLapFix)*dmudetaLapFix
# We need to scale CorrTerms[[1]] onto the whole ntot #
CorrTerms<-list(0)
CorrTerms[[1]]<-rep(0,ntot)
for (i in 1:nintMod){
CorrTerms[[1]][(cModelsDims[integratedModels[i]]+1):cModelsDims[integratedModels[i]+1]]<-CorrTermsTemp[[1]][(cModelsDimsLapFix[i]+1):cModelsDimsLapFix[i+1]]
}
for (i in 1:nrandint){
ADJ1<-rep(0,ntot)
ADJ2<-rep(0,ntot)
ADJ1<-t(ADJDER1[[i]])%*%(as.vector(DiagPP2LapFix[1:ntotLapFix])*as.vector(1/WvecLapFix)*as.vector(dWdmuLapFix)*as.vector(dmudetaLapFix)) # Check this one
if (RandDist[integratedIndex[i]]=="Gamma") ADJ2<-t(ADJDER2[[i]])%*%as.vector(DiagPP2LapFix[(ntotLapFix+qcumLapFix[i]+1):(ntotLapFix+qcumLapFix[1+i])])
if (RandDist[integratedIndex[i]]=="IGamma") ADJ2<-t(ADJDER2[[i]])%*%(as.vector(DiagPP2LapFix[(ntotLapFix+qcumLapFix[i]+1):(ntotLapFix+qcumLapFix[1+i])])*as.vector(2*U[[integratedIndex[i]]]))
if (RandDist[integratedIndex[i]]=="Beta") ADJ2<-t(ADJDER2[[i]])%*%(as.vector(DiagPP2LapFix[(ntotLapFix+qcumLapFix[i]+1):(ntotLapFix+qcumLapFix[1+i])])*as.vector(1-2*U[[integratedIndex[i]]]))
CorrTerms<-c(CorrTerms,list(ADJ1,ADJ2))
}
# Here we need to have in mind two scenarions, when there is cholesky correlation therefore design over n1+n2 but integrated only the one model #
# And scenario two when only one model is integrated but there are no cholesky correlation #
CorrTermsLength<-length(CorrTerms)
CorrTerms<-as.matrix(unlist(CorrTerms))
dim(CorrTerms)<-c(ntot,CorrTermsLength)
Scorr<-0.5*Phi*dmudeta*apply(CorrTerms,1,sum)
# Now we need to take out the corrections for which ys are not adjusted and split them evenly over those who are #
}
Ystar<-Y-Scorr
zmain<-eta+(Ystar-mu)/dmudeta
PsiMstar<-PsiM+(Lambda*crossprod(Z,as.matrix(Wvec*Scorr*(1/Phi)/dmudeta)))
zrand<-list(0)
if (nrandcor>0){
for (i in 1:nrandcor){
zrand[[i]]<-V[[i]]+(PsiMstar[(qcum[i]+1):qcum[i+1]]-U[[i]])/WR[[i]]
}
}
if (nrandind>0){
for (i in 1:nrandind){
zrand[[i+nrandcor]]<-V[[i+nrandcor]]+(PsiMstar[(qcum[i+nrandcor]+1):qcum[i+nrandcor+1]]-U[[i+nrandcor]])/WR[[i+nrandcor]]
}
}
zrand<-as.matrix(unlist(zrand))
zTot<-as.matrix(c(zmain,zrand))
# Updating Equations #
MeanParmsLast<-c(Beta,unlist(VT))
OUT1<-SolverLong(ISIGMAMvec,zTot)
MeanParms<-OUT1$NewParms
DiagPMAT<-OUT1$DiagPMAT
rm(OUT1)
if (exists("INV1")) rm(INV1)
#print("Block 2");print(CPTISIGMAM);#print("EISIGMAMvec");#print(EISIGMAMvec);print("zTot");print(zTot)
Beta<-MeanParms[1:ptot]
if (nrandcor>0){
for (i in 1:nrandcor){
V[[i]]<-MeanParms[(ptot+qcum[i]+1):(ptot+qcum[i+1])]
if (i==1) VT<-V[[i]]
else VT<-c(VT,V[[i]])
}
}
if (nrandind>0){
for (i in 1:nrandind){
V[[i+nrandcor]]<-MeanParms[(ptot+qcum[i+nrandcor]+1):(ptot+qcum[i+nrandcor+1])]
if (i==1) VT<-V[[i+nrandcor]]
else VT<-c(VT,V[[i+nrandcor]])
}
}
Convergence<-sum(abs(MeanParms-MeanParmsLast))
## if (DEBUG==TRUE) print("Convergence Mean");print(Convergence)
# Now we move to the estimation of dispersion and overdispersion given the correlation parameters rho based on transformed multivariat random effects to the
# independence scenario - so we continue the procedure, first dispersion for each random effect, then overdispersion for each model
# After that we reestimate the correlation and construct updated variance covariance matrix this ends the joint model
###############################
##### Variance Components #####
###############################
if (EstimateVariances==TRUE) {
# Reevaluation of mean and u #
eta<-TT[1:ntot,]%*%as.matrix(c(Beta,unlist(VT)))
# Here different link is applied to different model #
for (i in 1:nModels){
mu[(cModelsDims[i]+1):cModelsDims[i+1]]<-B[(cModelsDims[i]+1):cModelsDims[i+1]]*InvLinkY(eta[(cModelsDims[i]+1):cModelsDims[i+1]],LinkList[[i]])
Wvec[(cModelsDims[i]+1):cModelsDims[i+1]]<-Wmatgen(mu[(cModelsDims[i]+1):cModelsDims[i+1]],B[(cModelsDims[i]+1):cModelsDims[i+1]],LinkList[[i]],RespDist[i])
dWdmu[(cModelsDims[i]+1):cModelsDims[i+1]]<-dWdmugen(mu[(cModelsDims[i]+1):cModelsDims[i+1]],B[(cModelsDims[i]+1):cModelsDims[i+1]],LinkList[[i]],RespDist[i])
dmudeta[(cModelsDims[i]+1):cModelsDims[i+1]]<-dmudetagen(mu[(cModelsDims[i]+1):cModelsDims[i+1]],B[(cModelsDims[i]+1):cModelsDims[i+1]],LinkList[[i]],RespDist[i])
}
if (DEBUG==TRUE) {
muout <- mu
Wvecout <- Wvec
dWdmuout <- dWdmu
dmudetaout <- dmudeta
}
# So far the Link and Dist are equivalent as only canonical links are allowed #
WR<-list(0)
UT<-0
WRT<-0
dWRdu<-0
if (nrandcor+nrandind>0) {
for (i in 1:(nrandcor+nrandind)) {
U[[i]]<-LinkR(V[[i]],RandDist[i])
if (i==1) UT<-U[[1]]
else UT<-c(UT,U[[i]])
WR[[i]]<-WRVC(U[[i]],RandDist[i])
if (i==1) WRT<-WR[[1]]
else WRT<-c(WRT,WR[[i]])
if (i==1) dWRdu<-DWRDU(U[[i]],RandDist[i])
else dWRdu<-c(dWRdu,DWRDU(U[[i]],RandDist[i]))
}
}
WTotvec<-c(Wvec,WRT)
ISIGMAMvec<-as.vector((1/GammaMvec)*WTotvec)
#Computing Deviances : this must be adjusted for correlated and independent random effects #
DevianceRand<-list(0)
if (nrandcor>0) {
for (i in 1:nrandcor){
DevianceRand[[i]]<-DevRand(V[[i]],U[[i]],RandDist[i])
}
}
if (nrandind>0) {
for (i in 1:nrandind){
DevianceRand[[i+nrandcor]]<-DevRand(V[[i+nrandcor]],U[[i+nrandcor]],RandDist[i+nrandcor])
}
}
# Truncated Computations #
#MTheta<-1-exp(-mu)
#M1Theta<-exp(-mu)*mu
#M2Theta<-exp(-mu)*mu*(1-mu)
#M3Theta<-M2Theta*(1-mu)-mu*M1Theta
#WTildevec<-as.vector(Wvec+((M2Theta/MTheta)-(M1Theta/MTheta)^2))
DVhatDlambda<-list(0)
# ERROR - This part needs to be adjusted !!!!!!! #
# OLD CODE FOR DERIVATIVE #
if (nrandcor>0) {
for (i in 1:nrandcor){
DVhatDlambda[[i]]<--solve((t(Z[,(qcum[i]+1):qcum[i+1]])*rep(Wvec/Phi,each=ncol(Z[,(qcum[i]+1):qcum[i+1]])))%*%Z[,(qcum[i]+1):qcum[i+1]]+diag(WR[[i]]/Lambda[(1+qcum[i]):qcum[i+1]]))%*%(PsiM[(qcum[i]+1):(qcum[i+1])]-U[[i]])/(Lambda[(1+qcum[i]):qcum[i+1]]^2)
if (i==1) derold<-DVhatDlambda[[i]]
}
}
if (nrandind>0) {
for (i in 1:nrandind){
DVhatDlambda[[i+nrandcor]]<--solve((t(Z[,(qcum[i+nrandcor]+1):qcum[i+nrandcor+1]])*rep(Wvec/Phi,each=ncol(Z[,(qcum[i+nrandcor]+1):qcum[i+nrandcor+1]])))%*%Z[,(qcum[i+nrandcor]+1):qcum[i+nrandcor+1]]+diag(WR[[i+nrandcor]]/Lambda[(1+qcum[i+nrandcor]):qcum[i+nrandcor+1]]))%*%(PsiM[(qcum[i+nrandcor]+1):(qcum[i+nrandcor+1])]-U[[i+nrandcor]])/(Lambda[(1+qcum[i+nrandcor]):qcum[i+nrandcor+1]]^2)
}
}
# New code for derivative #
# We need to create the lambda vector #
#if (nrandcor>0) {
# for (i in 1:nrandcor) {
# LambdaTemp<-rep(0,length(Lambda))
# LambdaTemp[(qcum[i]+1):qcum[i+1]]<-1
# DVhatDlambda[[i]]<--solve(t(Z*as.vector(Wvec/Phi))%*%Z+diag(as.vector(WRT/Lambda)))%*%(((PsiM-UT)/(Lambda^2))*LambdaTemp)
# if (i==1) dernew<-DVhatDlambda[[i]]
# }
#}
#if (nrandind>0) {
# for (i in 1:nrandind) {
# LambdaTemp<-rep(0,length(Lambda))
# LambdaTemp[(qcum[i+nrandcor]+1):qcum[i+nrandcor+1]]<-Lambda[(qcum[i+nrandcor]+1):qcum[i+nrandcor+1]]
# DVhatDlambda[[i+nrandcor]]<--solve(t(Z*as.vector(Wvec/Phi))%*%Z+diag(as.vector(WRT/Lambda)))%*%(((PsiM-UT)/(Lambda^2))*LambdaTemp)
# }
#}
#DWTildeDthetavec<-as.vector(WTildevec-(M2Theta/MTheta)+(M3Theta/MTheta)-3*((M1Theta*M2Theta)/(MTheta^2))+((M1Theta^2)/(MTheta^2))+2*((M1Theta^3)/(MTheta^3)))
#DWTildeDmuvec<-as.vector(DWTildeDthetavec/mu)
qmod<-list(0)
qCur<-list(0)
qrr<-list(0)
if (nrandcor>0) {
for (i in 1:nrandcor){
SSCur<-rep(0,ntot+sum(q))
SSCur[1:ntot]<-SSC[[i]]
SSCur[(ntot+1+qcum[i]):(ntot+qcum[i+1])]<-(1:(qcum[i+1]-qcum[i]))
DlogWDloglambda<-matrix(0,ntot+sum(q),ntot+sum(q))
##### This here can be replaced the diagonal matrix dlogwdloglambda #####
DlogWDloglambda[1:ntot,1:ntot]<-diag(as.vector(1/Wvec)*as.vector(dWdmu)*as.vector(dmudeta)*(as.vector(Z[,(1+qcum[i]):qcum[i+1]]%*%(DVhatDlambda[[i]]*Lambda[(1+qcum[i]):qcum[i+1]])))) # RRRRR
DlogWDloglambda[(ntot+1+qcum[i]):(ntot+qcum[i+1]),(ntot+1+qcum[i]):(ntot+qcum[i+1])]<-diag(DWRDU(U[[i]],RandDist[i])*(as.vector(DVhatDlambda[[i]])*as.vector(Lambda[(1+qcum[i]):qcum[i+1]]))) # RRRRR
qmod[[i]]<-DiagPMAT*diag(DlogWDloglambda)
qCur[[i]]<-cbind(qmod[[i]],SSCur)
qCur[[i]]<-tapply(qCur[[i]][,1],qCur[[i]][,2],sum)
qCur[[i]]<-qCur[[i]][row.names(qCur[[i]])!="0"]
qrr[[i]]<-DiagPMAT[(ntot+1+qcum[i]):(ntot+qcum[i+1])]
## if (DEBUG) {print("Basis qrr");print(qrr);}
qrr[[i]]<-qrr[[i]]-qCur[[i]]
# Correction to estimate the true likelihood instead of EQL #
if (RandDist[i]=="Gamma") qrr[[i]]<-qrr[[i]]+1+2*(log(Lambda[(1+qcum[i]):(qcum[i+1])])/Lambda[(1+qcum[i]):(qcum[i+1])])+2*(digamma(1/Lambda[(1+qcum[i]):(qcum[i+1])])/Lambda[(1+qcum[i]):(qcum[i+1])])
if (RandDist[i]=="IGamma") qrr[[i]]<-qrr[[i]]+1+(2/Lambda[(1+qcum[i]):(qcum[i+1])])-2*(log(1/Lambda[(1+qcum[i]):(qcum[i+1])])/Lambda[(1+qcum[i]):(qcum[i+1])])-2*((1+Lambda[(1+qcum[i]):(qcum[i+1])])/Lambda[(1+qcum[i]):(qcum[i+1])])+2*(digamma(1+(1/Lambda[(1+qcum[i]):(qcum[i+1])]))/Lambda[(1+qcum[i]):(qcum[i+1])])
if (RandDist[i]=="Beta") qrr[[i]]<-qrr[[i]]+1-2*(digamma(1/Lambda[(1+qcum[i]):(qcum[i+1])])/Lambda[(1+qcum[i]):(qcum[i+1])])+2*(digamma(1/(2*Lambda[(1+qcum[i]):(qcum[i+1])]))/Lambda[(1+qcum[i]):(qcum[i+1])])+log(4)/Lambda[(1+qcum[i]):(qcum[i+1])]
# Applying the correction for the deviances #
DevianceRand[[i]]<-DevianceRand[[i]]/(1-qrr[[i]])
}
}
if (nrandind>0) {
for (i in 1:nrandind){
SSCur<-rep(0,ntot+sum(q))
SSCur[1:ntot]<-SSIndep[[i]]
SSCur[(ntot+1+qcum[i+nrandcor]):(ntot+qcum[i+nrandcor+1])]<-(1:(qcum[i+nrandcor+1]-qcum[i+nrandcor]))
DlogWDloglambda<-matrix(0,ntot+sum(q),ntot+sum(q))
##### This here can be replaced the diagonal matrix dlogwdloglambda #####
DlogWDloglambda[1:ntot,1:ntot]<-diag(as.vector(1/Wvec)*as.vector(dWdmu)*as.vector(dmudeta)*(as.vector(Z[,(qcum[i+nrandcor]+1):qcum[i+nrandcor+1]]%*%(DVhatDlambda[[i+nrandcor]]*Lambda[(1+qcum[i+nrandcor]):qcum[i+nrandcor+1]])))) # RRRRR
DlogWDloglambda[(ntot+1+qcum[i+nrandcor]):(ntot+qcum[i+nrandcor+1]),(ntot+1+qcum[i+nrandcor]):(ntot+qcum[i+nrandcor+1])]<-diag(DWRDU(U[[i+nrandcor]],RandDist[i+nrandcor])*(as.vector(DVhatDlambda[[i+nrandcor]])*as.vector(Lambda[(1+qcum[i+nrandcor]):qcum[i+nrandcor+1]]))) # RRRRR
qmod[[i+nrandcor]]<-DiagPMAT*diag(DlogWDloglambda)
qCur[[i+nrandcor]]<-cbind(qmod[[i+nrandcor]],SSCur)
qCur[[i+nrandcor]]<-tapply(qCur[[i+nrandcor]][,1],qCur[[i+nrandcor]][,2],sum)
qCur[[i+nrandcor]]<-qCur[[i+nrandcor]][row.names(qCur[[i+nrandcor]])!="0"]
qrr[[i+nrandcor]]<-DiagPMAT[(ntot+1+qcum[i+nrandcor]):(ntot+qcum[i+nrandcor+1])]
## if (DEBUG) {print("Basis qrr");print(qrr);}
qrr[[i+nrandcor]]<-qrr[[i+nrandcor]]-qCur[[i+nrandcor]]
# Correction to estimate the true likelihood instead of EQL #
if (RandDist[i+nrandcor]=="Gamma") qrr[[i+nrandcor]]<-qrr[[i+nrandcor]]+1+2*(log(Lambda[(1+qcum[i+nrandcor]):(qcum[i+nrandcor+1])])/Lambda[(1+qcum[i+nrandcor]):(qcum[i+nrandcor+1])])+2*(digamma(1/Lambda[(1+qcum[i+nrandcor]):(qcum[i+nrandcor+1])])/Lambda[(1+qcum[i+nrandcor]):(qcum[i+nrandcor+1])])
if (RandDist[i+nrandcor]=="IGamma") qrr[[i+nrandcor]]<-qrr[[i+nrandcor]]+1+(2/Lambda[(1+qcum[i+nrandcor]):(qcum[i+nrandcor+1])])-2*(log(1/Lambda[(1+qcum[i+nrandcor]):(qcum[i+nrandcor+1])])/Lambda[(1+qcum[i+nrandcor]):(qcum[i+nrandcor+1])])-2*((1+Lambda[(1+qcum[i+nrandcor]):(qcum[i+nrandcor+1])])/Lambda[(1+qcum[i+nrandcor]):(qcum[i+nrandcor+1])])+2*(digamma(1+(1/Lambda[(1+qcum[i+nrandcor]):(qcum[i+nrandcor+1])]))/Lambda[(1+qcum[i+nrandcor]):(qcum[i+nrandcor+1])])
if (RandDist[i+nrandcor]=="Beta") qrr[[i+nrandcor]]<-qrr[[i+nrandcor]]+1-2*(digamma(1/Lambda[(1+qcum[i+nrandcor]):(qcum[i+nrandcor+1])])/Lambda[(1+qcum[i+nrandcor]):(qcum[i+nrandcor+1])])+2*(digamma(1/(2*Lambda[(1+qcum[i+nrandcor]):(qcum[i+nrandcor+1])]))/Lambda[(1+qcum[i+nrandcor]):(qcum[i+nrandcor+1])])+log(4)/Lambda[(1+qcum[i+nrandcor]):(qcum[i+nrandcor+1])]
# Applying the correction for the deviances #
DevianceRand[[i+nrandcor]]<-DevianceRand[[i+nrandcor]]/(1-qrr[[i+nrandcor]])
}
}
## if (DEBUG) {print("Qrr");print(qrr);}
######################################
##### Gamma model for dispersion #####
######################################
# To speed up we can separate the models for dispersion by component instead of inverting the whole matrix #
# However if there are not so many dispersion components it should not be a problem #
invSigmaGammaR<-list(0)
if (nrandcor>0){
for (i in 1:nrandcor){
invSigmaGammaR[[i]]<-((1-qrr[[i]])/4)
}
}
if (nrandind>0){
for (i in 1:nrandind){
invSigmaGammaR[[i+nrandcor]]<-((1-qrr[[i+nrandcor]])/4)
}
}
muGammaR<-Lambda
oldDRgamma<-DRgamma
ksiR<-DDR%*%DRgamma
DevianceRand<-unlist(DevianceRand)
ZRresp<-ksiR+(DevianceRand-muGammaR)/muGammaR
invSigmaGammaR<-diag(unlist(invSigmaGammaR))
if (nrandcor==0) cumqcorr <- 0
OldIndepgamma <- DRgamma[(cumqcorr[length(cumqcorr)]+1):length(DRgamma)]
DRgamma<-solve(crossprod(DDR,invSigmaGammaR)%*%DDR,crossprod(DDR,invSigmaGammaR)%*%ZRresp)
StdErrDRgamma<-sqrt(diag(solve(crossprod(DDR,invSigmaGammaR)%*%DDR)))
NewIndepgamma <-DRgamma[(cumqcorr[length(cumqcorr)]+1):length(DRgamma)]
if (DEBUG==TRUE){
## print("DRgamma");print(DRgamma)
## print("Beta");print(Beta)
## print("VS");print(VT)
}
#if (DEBUG) {print("Phi End");print(Phi)}
if (nrandind>0) Convergence <- Convergence + sum(abs(OldIndepgamma-NewIndepgamma))
# Maybe the piece of code below should be moved after the correlation is estimated #
####################################################################################################################
##### In this part we redesign the Z matrix - update with new correlation and new variances for the correlated #####
####################################################################################################################
# We need to add the construction of correlation matrix after the correlations have been upadted in front of this piece code #
if (nrandcor>0){
for (i in 1:length(CorrMat)){
currentGamma<-DRgamma[(cumqcorr[i]+1):cumqcorr[i+1]]
DiagVar<-diag(exp(currentGamma))
## if (DEBUG) print("Correlation Matrix");print(DiagVar)
# Apply inverse cholesky #
currentMat<-CholeskyMatrices[[i]]%*%DiagVar%*%t(CholeskyMatrices[[i]])
# Now update with correlations #
currentMat<-diag(currentMat)
currentMat<-sqrt(currentMat)*t(CorrMatOut[[i]]*sqrt(currentMat))
CholeskyMatrices[[i]]<-t(chol(currentMat))
# Now current mat is the the new varcov matrix with the same correlation as before #
ZZCorrTempTotUpd<-ZZShort[[i]]%*%CholeskyMatrices[[i]]
currentindex<-seq(cumqcorr[i]+1,cumqcorr[i+1])
for (j in currentindex) {
DiagDesign[[j]]<-model.matrix(~SSC[[j]]-1)
DiagDesign[[j]]<-DiagDesign[[j]]*ZZCorrTempTotUpd[,(1+j-currentindex[1])]
if (j==currentindex[1]) ZZCorrTotUpd<-DiagDesign[[j]]
else ZZCorrTotUpd<-cbind(ZZCorrTotUpd,DiagDesign[[j]])
}
ZZCorrUpd[[i]]<-ZZCorrTotUpd
if (i==1) ZZCorrDesign<-ZZCorrUpd[[1]]
else ZZCorrDesign<-cbind(ZZCorrDesign,ZZCorrUpd[[i]])
}
}
if (nrandind>0) {
ZZIndepTemp<-list(0)
for (i in 1:nrandind){
if (indepModel[i]==length(ModelsDims)) rowsdown<-0
else rowsdown<-sum(ModelsDims[(indepModel[i]+1):length(ModelsDims)])
if (indepModel[i]==1) rowsup<-0
else rowsup<-sum(ModelsDims[1:(indepModel[i]-1)])
# First expand up if possible #
ZZIndepTemp[[i]]<-FillZeros(ZZIndep[[i]],nr=(nrow(ZZIndep[[i]])+rowsup),nc=ncol(ZZIndep[[i]]),up=TRUE)
ZZIndepTemp[[i]]<-FillZeros(ZZIndepTemp[[i]],nr=(nrow(ZZIndepTemp[[i]])+rowsdown),nc=ncol(ZZIndepTemp[[i]]))
if (!exists("ZZCorrDesign") & i==1) ZZDesign<-ZZIndepTemp[[i]]
if (exists("ZZCorrDesign") & i==1) ZZDesign<-cbind(ZZCorrDesign,ZZIndepTemp[[1]])
if (i>1) ZZDesign<-cbind(ZZDesign,ZZIndepTemp[[i]])
}
}
else if(nrandcor>0) ZZDesign<-ZZCorrDesign
TT<-cbind(X,ZZDesign)
if (nrandcor>0){
for (i in 1:nrandcor){
TT<-rbind(TT,cbind(matrix(0,q[i],ptot+qcum[i]),diag(q[i]),matrix(0,q[i],qcum[nrandcor+nrandind+1]-qcum[i+1])))
}
}
if (nrandind>0){
for (i in 1:nrandind){
TT<-rbind(TT,cbind(matrix(0,q[i+nrandcor],ptot+qcum[i+nrandcor]),diag(q[i+nrandcor]),matrix(0,q[i+nrandcor],qcum[nrandind+nrandcor+1]-qcum[nrandcor+i+1])))
}
}
Z<-TT[(1:sum(ModelsDims)),((ptot+1):ncol(TT))]
# Koniec respecyfikacji macierzy efektow losowych w czesci odpowiadajacej efektom skorelowanym #
#Convergence<-Convergence+sum(abs(DRgamma-oldDRgamma))
}
##########################
##### Overdispersion #####
##########################
# now work on overdispersion #
# we need to select the parameters which are supposed to be EstimateOverDisp=TRUE not all models have estimated overdispersion true #
if (any(EstimateOverDisp)){
# First select the matrix which will be used for the estimation #
for (i in 1:nModels){
if (EstimateOverDisp[i]==TRUE) {
if (i==1) indexODEstim<-seq(cModelsDims[i]+1,cModelsDims[i+1])
else indexODEstim<-c(indexODEstim,seq(cModelsDims[i]+1,cModelsDims[i+1]))
ntotODEstim<-length(indexODEstim)
}
}
#PMAT<-TT%*%solve(CPTISIGMAM%*%TT)%*%CPTISIGMAM # This stays the same - just a selection need to be done of the diagonals corresponding to the models which are #
# EstimOverDisp = TRUE #
qrrO<-rep(0,ntotODEstim)
# Applying the correction for the deviances #
DevianceResp<-rep(0,ntotODEstim)
DiagPMATODEstim<-DiagPMAT
for (i in 1:nModels){
if (EstimateOverDisp[i]==TRUE){
DevianceRespTemp<-rep(0,cModelsDims[i+1]-cModelsDims[i])
YTemp<-YList[[i]]
BTemp<-B[(cModelsDims[i]+1):cModelsDims[i+1]]
muTemp<-mu[(cModelsDims[i]+1):cModelsDims[i+1]]
PhiTemp<-Phi[(cModelsDims[i]+1):cModelsDims[i+1]]
if (RespDist[i]=="Binomial") {
DevianceRespTemp[YTemp!=0 & YTemp!=BTemp]<-2*(YTemp[YTemp!=0 & YTemp!=BTemp]*log(YTemp[YTemp!=0 & YTemp!=BTemp]/muTemp[YTemp!=0 & YTemp!=BTemp])-(YTemp[YTemp!=0 & YTemp!=BTemp]-BTemp[YTemp!=0 & YTemp!=BTemp])*log((BTemp[YTemp!=0 & YTemp!=BTemp]-YTemp[YTemp!=0 & YTemp!=BTemp])/(BTemp[YTemp!=0 & YTemp!=BTemp]-muTemp[YTemp!=0 & YTemp!=BTemp])))
DevianceRespTemp[YTemp==0]<-2*(BTemp[YTemp==0]*log((BTemp[YTemp==0])/(BTemp[YTemp==0]-muTemp[YTemp==0])))
DevianceRespTemp[YTemp==BTemp]<-2*(YTemp[YTemp==BTemp]*log(YTemp[YTemp==BTemp]/muTemp[YTemp==BTemp]))
}
if (RespDist[i]=="Poisson"){
DevianceRespTemp[YTemp!=0]<-2*(YTemp[YTemp!=0]*log(YTemp[YTemp!=0]/muTemp[YTemp!=0])-(YTemp[YTemp!=0]-muTemp[YTemp!=0]))
DevianceRespTemp[YTemp==0]<-2*muTemp[YTemp==0]
}
if (RespDist[i]=="Normal"){
DevianceRespTemp<-(YTemp-muTemp)^2
}
if (RespDist[i]=="Gamma"){
DevianceRespTemp<-2*(-log(YTemp/muTemp)+(YTemp-muTemp)/muTemp)
DiagPMATODEstim[(cModelsDims[i]+1):cModelsDims[i+1]]<-DiagPMATODEstim[(cModelsDims[i]+1):cModelsDims[i+1]]+1+as.vector(2*log(PhiTemp)/PhiTemp)+as.vector(2*digamma(1/PhiTemp)/PhiTemp)
}
DevianceResp[(cModelsDims[i]+1):cModelsDims[i+1]]<-DevianceRespTemp
}
}
qrrO<-DiagPMAT[indexODEstim]
## if (DEBUG) {print("qrrO");print(qrrO)}
DevianceResp<-DevianceResp/(1-qrrO)
## if (DEBUG) {print("DevResp");print(DevianceResp)}
# Algorithm for Gamma model #
invSigmaGammaO<-((1-qrrO)/4)
muGammaO<-Phi[indexODEstim]
oldDYgamma<-DYgamma
# select DDY which are to estimate #
DDYODEstim<-DDY[indexODEstim,,drop=F]
# select columns which are not zero #
tempind<-apply(matrix(as.logical(DDYODEstim),nrow(DDYODEstim),ncol(DDYODEstim)),2,any)
indexgammaODEstim<-which(tempind==TRUE)
# remove collums with all zeros from the design matrix #
DDYODEstim<-DDYODEstim[,indexgammaODEstim,drop=F]
DYgammaODEstim<-DYgamma[indexgammaODEstim]
ksiO<-DDYODEstim%*%DYgammaODEstim
ZOresp<-ksiO+(DevianceResp-muGammaO)/muGammaO
## if (DEBUG) {print("ZOresp");print(ZOresp)}
invSigmaGammaO<-diag(invSigmaGammaO)
DYgammaODEstim<-solve(crossprod(DDYODEstim,invSigmaGammaO)%*%DDYODEstim,crossprod(DDYODEstim,invSigmaGammaO)%*%ZOresp)
DYgamma[indexgammaODEstim]<-DYgammaODEstim
## if (DEBUG) {print("DYgamma");print(DYgamma)}
Convergence<-Convergence+sum(abs(DYgamma-oldDYgamma))
## if (DEBUG) {print("Convergence overdisp");print(Convergence)}
}
VTCorr<-list(0)
if (nrandcor>0){
for (i in 1:length(CorrMat)){
currentindex<-seq((cumqcorr[i]+1),cumqcorr[i+1])
DRgamma[currentindex]<-log(diag(CholeskyMatrices[[i]]%*%t(CholeskyMatrices[[i]])))
DRCorrgammaOld<-DRCorrgamma
DRCorrgamma<-DRgamma[currentindex] #HERE is probably an error if more than one correlation matrix is provided #
## print("DRCorrgamma");print(DRCorrgamma)
Convergence<-Convergence+sum(abs(DRCorrgammaOld-DRCorrgamma))
# Transform the random effects back to the scale of correlated #
VTemp<-unlist(VT)[(qcum[cumqcorr[i]+1]+1):qcum[cumqcorr[i+1]+1]] # Extract empirical bayes corresponding to the correlated effects of CorrMat[[i]]
noraneff<-cumqcorr[i+1]-cumqcorr[i]
VTemp<-matrix(VTemp,length(VTemp)/noraneff,noraneff)
VTCorr[[i]]<-CholeskyMatrices[[i]]%*%t(VTemp)
VTCorr[[i]]<-matrix(t(VTCorr[[i]]),nrow(VTemp)*noraneff,1)
if (i==1) VTCorrTot<-VTCorr[[1]]
else VTCorrTot<-c(VTCorrTot,VTCorr[[i]])
}
if (nrandind>0) VTCorrTot<-c(VTCorrTot,unlist(VT)[(qcum[cumqcorr[length(CorrMat)+1]]+1):qcum[length(qcum)]]) # This adds up independent empirical bayes to VTCorrTot
}
# In the estimation of the correlation the original design matrix is used #
################################################################
##### Estimate Correlations - Final Step in the Algorithm #####
################################################################
# How will we approach this ? #
# NOTES NOTES NOTES #
# 1. We need to transform the design to the correlated desging #
# 2. Compute first and second order derivatives #
# 3. Update the correlations #
# 4. Transform back to the independent design #
# It is not that easy all this #
# Iterate over correlations - for each compute the derivative #
# double iterate over correlations for each pair compute hessian #
# Strategy 1 - Use the previous variances values not the updated ones #
# We update the whole matrix of correlations after we have variances and correlations updated #
# Strategy 2 - use already here the updated matrix of variances - WHICH IS BETTER ? #
# We try strategy 1 it seems faster to program but is it faster to work? #
if (!is.null(CorrMat)) {
ncorr<-length(unlist(Correlations))
# How many random effects correlated there are - nrandcor #
ScoreCorr<-rep(0,ncorr)
HessCorr<-matrix(0,ncorr,ncorr)
dvhatdcorr<-list(0)
dSigmadcorr<-list(0) # Derivative of variance covariance matrix with respect to rho #
# Note the derivative of Sigma with respect to two correlations (second order derivative) is equal to zero #
# We need to remember that the change is made not for the correlation but on Fishers z transform -Inf/Inf #
ZF <-list(0) #This is Fishers Z
dCorrdZF<-list(0)
OldCorrelations<-Correlations
# Compute score equations #
# The order in the program is as in the Correlations #
# Iterated over the number of correlations not random effects which are correlated #
# REMARK : It is wrong here in the way we treat correlations - it is not a vector but a list !#
for (i in 1:length(CorrMat)) {
# Compute current Fisher Z #
# if (Correlations[i]==1) ZF[i]<--Inf
# else if (Correlations[i]==-1) ZF[i]<-Inf
for (j in 1:length(Correlations[[i]])){
# Make ZF as a vector not as a list here #
ZF[[i]][j]<-0.5*log((1-Correlations[[i]][j])/(1+Correlations[[i]][j]))
dCorrdZF[[i]][j] <- - 4 * exp(2*ZF[[i]][j]) /((1+exp(2*ZF[[i]][i]))^2)
}
}
# Compute derivative of Total Sigma matrix with respect to rho #
# Here the creation of CorrMatDer #
for (i in 1:ncorr){ # This says which derivative
CurrentMatrix<-sum(cumqcorr<i)
for (j in 1:length(CorrMat)){
currentindex<-i-sum(cumqcorr[cumqcorr<i])
if (j==CurrentMatrix) {
TempCorrDerMat<-SigmaMat[[j]]
if (Correlations[[j]][currentindex]!=0) TempCorrDerMat[CorrMat[[j]]==currentindex]<-SigmaMat[[j]][CorrMat[[j]]==currentindex]/(Correlations[[j]][currentindex])
else {
standardDeviations<-as.vector(sqrt(diag(SigmaMat[[j]])))
standardDeviations<-standardDeviations%*%t(standardDeviations)
TempCorrDerMat[CorrMat[[j]]==currentindex]<-standardDeviations[CorrMat[[j]]==currentindex]
}
TempCorrDerMat[CorrMat[[j]]!=currentindex]<-0
TempCorrDerMat<-TempCorrDerMat%x%diag(lcorr[j])
if (j==1) dSigmadcorr[[i]]<-TempCorrDerMat
else dSigmadcorr[[i]]<-dbind(dSigmadcorr[[i]],TempCorrDerMat[[j]])
}
else {
TempCorrDerMat<-SigmaMat[[j]]*0
TempCorrDerMat<-TempCorrDerMat%x%diag(lcorr[j])
if (j==1) dSigmadcorr[[i]]<-TempCorrDerMat
else dSigmadcorr[[i]]<-dbind(dSigmadcorr[[i]],TempCorrDerMat[[j]])
}
}
}
for (i in 1:length(RandDist)){
if (i==1) { dWRdUTot<-DWRDU(U[[i]],RandDist[i])
d2WRdU2Tot<-D2WRDU2(U[[i]],RandDist[i])
WRTot<-WR[[1]]
UTot<-U[[1]]
}
else { dWRdUTot<-c(dWRdUTot,DWRDU(U[[i]],RandDist[i]))
d2WRdU2Tot<-c(d2WRdU2Tot,D2WRDU2(U[[i]],RandDist[i]))
WRTot<-c(WRTot,WR[[i]])
UTot<-c(UTot,U[[i]])
}
}
# We need to add the zeros for uncorrelated random effects #
if (nrandind>0) dSigmadcorr[[i]]<-dbind(dSigmadcorr[[i]],diag(qcum[length(qcum)]-qcum[cumqcorr[length(cumqcorr)]])*0)
invSigmaTotComp<-invSigmaTot
if ((qcum[cumqcorr[length(CorrMat)+1]+1]+1)-qcum[length(qcum)]<0) invSigmaTotComp<-dbind(invSigmaTot,diag(ISIGMAMvec[(qcum[cumqcorr[length(CorrMat)+1]]+1):qcum[length(qcum)]]))
invTT2temp<-solve(t(ZOriginal)%*%(ZOriginal*as.vector(Wvec/Phi))+invSigmaTotComp)
# within this iteration nest another iteration where hessian and second derivative of vhat is computed #
# Is this derivative for exponential family or only for normal distribution #
for (i in 1:ncorr){
dvhatdcorr[[i]]<-as.vector(invTT2temp%*%(invSigmaTotComp%*%(dSigmadcorr[[i]]%*%(invSigmaTotComp%*%VTCorrTot)))) # This seems to be okay
}
VTCorrTot<-as.vector(VTCorrTot)
# DD matrix #
MIDMAT<-dbind(diag(as.vector(Wvec/Phi)),invSigmaTot)
if ((qcum[cumqcorr[length(CorrMat)+1]+1]+1)-qcum[length(qcum)]<0) MIDMAT<-dbind(MIDMAT,diag(ISIGMAMvec[(qcum[cumqcorr[length(CorrMat)+1]+1]+1):qcum[length(qcum)]]))
DD<-t(TTOriginal)%*%MIDMAT%*%TTOriginal
invDD<-solve(DD)
# Computing score and hessian with respect to the correlation parameters #
for (i in 1:ncorr) {
#dDDdrho1<-dbind(matrix(0,ptot,ptot),-invSigmaTotComp%*%dSigmadcorr[[i]]%*%invSigmaTotComp) ##### ERROR / FIXED - This implementation is only for normal random effects #####
# The below version is for GLM distribution #
dDDdrho1 <- dDDdranmat(X,ZOriginal,dWdmu,Wvec,dvhatdlambda=dvhatdcorr[[i]],invSigmaMat=invSigmaTotComp,dSigmadlambda=dSigmadcorr[[i]],WR=WRTot,dWRdu=dWRdUTot)
# The below score is okay but must be adjusted for indepnedent random effects #
ScoreCorr[i]<-(dvhatdcorr[[i]])%*%t(ZOriginal)%*%((Y-mu)*(1/Phi))-0.5*sum(diag(invSigmaTotComp%*%dSigmadcorr[[i]]))-dvhatdcorr[[i]]%*%invSigmaTotComp%*%VTCorrTot+
0.5*VTCorrTot%*%invSigmaTotComp%*%dSigmadcorr[[i]]%*%invSigmaTotComp%*%VTCorrTot-0.5*sum(diag(invDD%*%dDDdrho1)) # Score is okay ! #
#ScoreCorr[i]<--0.5*sum(diag(invSigmaTotComp%*%dSigmadcorr[[i]]))+0.5*VTCorrTot%*%invSigmaTotComp%*%dSigmadcorr[[i]]%*%invSigmaTotComp%*%VTCorrTot-0.5*sum(diag(invDD%*%dDDdrho1))
# Now proceed to the Hessian computation #
for (j in 1:ncorr) {
#dDDdrho2<-dbind(matrix(0,ptot,ptot),-invSigmaTotComp%*%dSigmadcorr[[j]]%*%invSigmaTotComp)
dDDdrho2 <- dDDdranmat(X,ZOriginal,dWdmu,Wvec,dvhatdlambda=dvhatdcorr[[j]],invSigmaMat=invSigmaTotComp,dSigmadlambda=dSigmadcorr[[j]],WR=WRTot,dWRdu=dWRdUTot)
d2vhatdcorr2<--invTT2temp%*%((t(ZOriginal*as.vector(1/Phi)*dWdmu*Wvec*as.vector(ZOriginal%*%dvhatdcorr[[i]]))%*%as.vector(ZOriginal%*%dvhatdcorr[[j]]))+invSigmaTotComp%*%(dSigmadcorr[[j]]%*%(invSigmaTotComp%*%(dSigmadcorr[[i]]%*%(invSigmaTotComp%*%VTCorrTot))))+invSigmaTotComp%*%(dSigmadcorr[[i]]%*%
(invSigmaTotComp%*%(dSigmadcorr[[j]]%*%(invSigmaTotComp%*%VTCorrTot))))-invSigmaTotComp%*%(dSigmadcorr[[j]]%*%(invSigmaTotComp%*%dvhatdcorr[[i]]))-
invSigmaTotComp%*%(dSigmadcorr[[i]]%*%(invSigmaTotComp%*%dvhatdcorr[[j]])))
d2vhatdcorr2<-as.vector(d2vhatdcorr2)
#d2DDdrho2<-dbind(matrix(0,ptot,ptot),invSigmaTotComp%*%dSigmadcorr[[j]]%*%invSigmaTotComp%*%dSigmadcorr[[i]]%*%invSigmaTotComp+invSigmaTotComp%*%dSigmadcorr[[i]]%*%invSigmaTotComp%*%dSigmadcorr[[j]]%*%invSigmaTotComp)
d2DDdrho2 <- d2DDdranmat2(X,ZOriginal,d2Wdmu2,dWdmu,Wvec,dvhatdlambda1=dvhatdcorr[[i]],dvhatdlambda2=dvhatdcorr[[j]],d2vhatdlambda12=d2vhatdcorr2,
invSigmaMat=invSigmaTotComp,dSigmadlambda1=dSigmadcorr[[i]],dSigmadlambda2=dSigmadcorr[[j]],d2Sigmadlambda12=dSigmadcorr[[j]]*0,WR=WRTot,dWRdu=dWRdUTot,d2WRdu2=d2WRdU2Tot)
HessCorr[i,j]<--dvhatdcorr[[i]]%*%t(ZOriginal)%*%(ZOriginal*as.vector(Wvec/Phi))%*%dvhatdcorr[[j]]+d2vhatdcorr2%*%t(ZOriginal*as.vector(1/Phi))%*%(Y-mu)+
0.5*sum(diag(invSigmaTotComp%*%dSigmadcorr[[i]]%*%invSigmaTotComp%*%dSigmadcorr[[j]]))+
dvhatdcorr[[i]]%*%invSigmaTotComp%*%dSigmadcorr[[j]]%*%invSigmaTotComp%*%VTCorrTot-dvhatdcorr[[j]]%*%invSigmaTotComp%*%dvhatdcorr[[i]]-d2vhatdcorr2%*%invSigmaTotComp%*%VTCorrTot+
dvhatdcorr[[j]]%*%invSigmaTotComp%*%dSigmadcorr[[i]]%*%invSigmaTotComp%*%VTCorrTot-
VTCorrTot%*%invSigmaTotComp%*%dSigmadcorr[[j]]%*%invSigmaTotComp%*%dSigmadcorr[[i]]%*%invSigmaTotComp%*%VTCorrTot-
# Adding the trace of the derivative #
0.5*(sum(diag(invDD%*%d2DDdrho2))-sum(diag(invDD%*%dDDdrho1%*%invDD%*%dDDdrho2)))
}
}
# Now Lets create the derivative with respect to Fishers Z #
dCorrdZF<-unlist(dCorrdZF)
ScoreCorrZF<-ScoreCorr*dCorrdZF
HessCorrZF<-matrix(0,ncorr,ncorr)
for (i in 1:ncorr) {
for (j in 1:ncorr) {
HessCorrZF[i,j]<-HessCorr[i,j]*dCorrdZF[i]*dCorrdZF[j]
}
}
HessCorrZF<-(HessCorrZF+t(HessCorrZF))/2
# Now we apply Newton-Raphson updater #
object1<-AdjProfCorrelations(unlist(ZF))
REML<-2*object1[[1]]
## print("REML");print(REML)
if (EstimateCorrelations==TRUE){
ZFOld<-ZF
if (nrow(as.matrix(-HessCorrZF))==1 & ncol(as.matrix(-HessCorrZF))==1) {
if (-HessCorrZF<=0) HessCorrZF<--0.00001
}
else {
## HessCorrZF<--HessCorrZF
## print("HessCorrZF");print(HessCorrZF)
## print("ScoreCorrZF");print(ScoreCorrZF)
## if (det(-as.matrix(HessCorrZF))<=0) HessCorrZF<--nearPD(-as.matrix(HessCorrZF))
}
check<-0
frac<-1
IHessCorrZF<-solve(HessCorrZF)
object2<-AdjProfCorrelations(unlist(ZFOld))
while (check==0) {
TempZFStart<-unlist(ZF)-frac*(IHessCorrZF%*%ScoreCorrZF)
# If condition to check if the likelihood is increasing #
if (any(abs(TempZFStart)>3)) {
check<-0
frac<-frac/2
}
else {
object1<-AdjProfCorrelations(TempZFStart)
REML<-2*object1[[1]]
## print("REML");print(REML)
if (object1[[1]]-object2[[1]]<=0) {
check<-1
for (i in 1:length(CorrMat)){
ZF[[i]]<-TempZFStart[(1+cumqcorrels[i]):cumqcorrels[i+1]]
}
}
else {
check<-0
frac<-frac/2
}
}
}
##### Update Correlations and design matrices #####
M2h<--2*object1[[2]]
M2pvh<--2*object1[[3]]
M2pbvh<-REML
CAIC<-object1[[4]]
for (i in 1:length(CorrMat)){
Correlations[[i]]<-((1-exp(2*ZF[[i]]))/(1+exp(2*ZF[[i]])))
}
}
## if (DEBUG==TRUE) print("Correlations");print(Correlations);
Convergence<-Convergence+sum(abs(unlist(Correlations)-unlist(OldCorrelations)))
M2h<--2*object1[[2]]
M2pvh<--2*object1[[3]]
M2pbvh<-REML
CAIC<-object1[[4]]
# Now make a re-design of the Z matrix taking into account new correlations and new variances #
# We have new correlations - update correlations #
TempCorrMat<-list(0)
for (i in 1:length(CorrMat)){
TempCorrMat[[i]]<-CorrMat[[i]]
for (j in 1:length(Correlations[[i]])){
TempCorrMat[[i]][CorrMat[[i]]==j]<-Correlations[[i]][j]
}
diag(TempCorrMat[[i]])<-1
CorrMatOut[[i]]<-TempCorrMat[[i]]
}
LambdaCorr<-exp(DDRCorr%*%DRCorrgamma)
# now create correlation matrix for all the random effects #
# create index of individual random effects #
SigmaMat<-list(0)
for (i in 1:length(CorrMat)){
LambdaLocal<-rep(0,qcorr[i])
for (j in 1:qcorr[i]){
LambdaLocal[j]<-LambdaCorr[cumindCorrIndex[cumqcorr[i]+j]+1]
}
SigmaMat[[i]]<-sqrt(LambdaLocal)*t(CorrMatOut[[i]]*sqrt(LambdaLocal))
}
# merging the matrices by diagonals #
for (i in 1:length(CorrMat)){
if (i==1) {
SigmaTot<-SigmaMat[[1]]%x%diag(lcorr[1])
invSigmaTot<-solve(SigmaMat[[1]])%x%diag(lcorr[1])
}
else {
SigmaTot<-dbind(SigmaTot,SigmaMat[[i]]%x%diag(lcorr[i]))
invSigmaTot<-dbind(invSigmaTot,solve(SigmaMat[[i]])%x%diag(lcorr[i]))
}
}
# Matrix SigmaTot is the resulting matrix #
# We have to make the random effects indendent via Cholesky decomposition #
# We need a kroneker product of cholesky matrix times the SigmaTot #
# You can do the cholesky on the total sigma matrix - the problem is the dimension is greater so maybe we loose computational efficiency #
# DO cholesky on SigmaMat list #
# We have to make functions to convert the ZZCorr into vectoral design for correlated random effects and back to the diagnoal design according to subject#
# This will make the computationally more efficient things #
# Now we modify the design matrix via cholesky decompositions #
# All these steps need to be reprogramed using matrix package although in the final product there might be not so many zeros sometime #
# REMARK: In what follows maybe not all steps are required to do again - if it takes a long time we can remove some - now i keep it for first attempt #
for (i in 1:length(CorrMat)){
itchol<-t(chol(SigmaMat[[i]])) # This is actually cholesky decomposition instead of inverse, there was before inverse which was wrong
CholeskyMatrices[[i]]<-itchol
currentindex<-seq(cumqcorr[i]+1,cumqcorr[i+1])
lengthcorrModelCur<-length(RespDist)
valuescorrModelCur<-as.numeric(names(table(corrModel)))
ZZCorrTemp<-rep(list(0),lengthcorrModelCur)
for (j in 1:lengthcorrModelCur){
for (k in currentindex) {
if (ZZCorrTemp[[j]][1]==0 & length(ZZCorrTemp[[j]])==1){
if (corrModel[k]==valuescorrModelCur[j]) ZZCorrTemp[[j]]<-ZZCorrVec[[k]]
else ZZCorrTemp[[j]]<-rep(0,ModelsDims[j])
}
else {
if (corrModel[k]==valuescorrModelCur[j]) ZZCorrTemp[[j]]<-cbind(ZZCorrTemp[[j]],ZZCorrVec[[k]])
else ZZCorrTemp[[j]]<-cbind(ZZCorrTemp[[j]],rep(0,ModelsDims[j]))
}
}
}
# Binding it all together #
for (j in 1:length(ZZCorrTemp)){
if (j==1) {
ZZCorrTempTot<-ZZCorrTemp[[j]]
nrowtot<-nrow(ZZCorrTemp[[j]])
}
else {
ZZCorrTempTot<-rbind(ZZCorrTempTot,ZZCorrTemp[[j]])
nrowtot<-c(nrowtot,nrow(ZZCorrTemp[[j]]))
}
}
cnrowtot<-cumsum(c(0,nrowtot))
# Now we use cholesky transform on the design matrix #
ZZCorrTempTotUpd<-ZZCorrTempTot%*%itchol
ZZShort[[i]]<-ZZCorrTempTot
# ZZCorrTempTotUpd is the new design matrix for the joint model from the correlated part #
# This design matrix is in the short form (vector form) - we need to expand it to the diagnoal form #
# Now we need to take into accout to which model we should link the SSC #
# Expansion to diagonal here we need matrix package already #
for (j in currentindex){
DiagDesign[[j]]<-model.matrix(~SSC[[j]]-1)
DiagDesign[[j]]<-DiagDesign[[j]]*ZZCorrTempTotUpd[,(1+j-currentindex[1])]
if (j==currentindex[1]) ZZCorrTotUpd<-DiagDesign[[j]]
else ZZCorrTotUpd<-cbind(ZZCorrTotUpd,DiagDesign[[j]])
#DiagDesign[[j]]<-model.matrix(~SSC[[j]]-1)
}
ZZCorrUpd[[i]]<-ZZCorrTotUpd
if (i==1) ZZCorrDesign<-ZZCorrUpd[[1]]
else ZZCorrDesign<-cbind(ZZCorrDesign,ZZCorrUpd[[i]])
# Fill with zeros the ZZCorrDesign #
ZZCorrDesign<-FillZeros(ZZCorrDesign,nr=sum(ModelsDims),nc=ncol(ZZCorrDesign))
}
q<-sapply(DiagDesign,ncol)
# End of if corrmat not null #
}
if (nrandind>0) {
ZZIndepTemp<-list(0)
for (i in 1:nrandind){
if (indepModel[i]==length(ModelsDims)) rowsdown<-0
else rowsdown<-sum(ModelsDims[(indepModel[i]+1):length(ModelsDims)])
if (indepModel[i]==1) rowsup<-0
else rowsup<-sum(ModelsDims[1:(indepModel[i]-1)])
# First expand up if possible #
ZZIndepTemp[[i]]<-FillZeros(ZZIndep[[i]],nr=(nrow(ZZIndep[[i]])+rowsup),nc=ncol(ZZIndep[[i]]),up=TRUE)
ZZIndepTemp[[i]]<-FillZeros(ZZIndepTemp[[i]],nr=(nrow(ZZIndepTemp[[i]])+rowsdown),nc=ncol(ZZIndepTemp[[i]]))
if (!exists("ZZCorrDesign") & i==1) ZZDesign<-ZZIndepTemp[[i]]
if (exists("ZZCorrDesign") & i==1) ZZDesign<-cbind(ZZCorrDesign,ZZIndepTemp[[1]])
if (i>1) ZZDesign<-cbind(ZZDesign,ZZIndepTemp[[i]])
}
}
else if(nrandcor>0) ZZDesign<-ZZCorrDesign
TT<-cbind(X,ZZDesign)
if (nrandcor>0){
for (i in 1:nrandcor){
TT<-rbind(TT,cbind(matrix(0,q[i],ptot+qcum[i]),diag(q[i]),matrix(0,q[i],qcum[nrandcor+nrandind+1]-qcum[i+1])))
}
}
if (nrandind>0){
for (i in 1:nrandind){
TT<-rbind(TT,cbind(matrix(0,q[i+nrandcor],ptot+qcum[i+nrandcor]),diag(q[i+nrandcor]),matrix(0,q[i+nrandcor],qcum[nrandind+nrandcor+1]-qcum[nrandcor+i+1])))
}
}
Z<-TT[(1:sum(ModelsDims)),((ptot+1):ncol(TT))]
}
if (StandardErrors==TRUE) {
# Now we need standard errors and diagnostics and we are done #
for (i in 1:length(RandDist)){
if (i==1) {
if (nrandcor>0) U[[i]]<-LinkR(VTCorrTot[(qcum[i]+1):qcum[i+1]],RandDist[i])
dWRdUTot<-DWRDU(U[[i]],RandDist[i])
d2WRdU2Tot<-D2WRDU2(U[[i]],RandDist[i])
WRTot<-WR[[1]]
UTot<-U[[1]]
bfuncv<- bfuncvgen(Vvec=V[[1]],Dist=RandDist[i])
}
else {
if (nrandcor>0) U[[i]]<-LinkR(VTCorrTot[(qcum[i]+1):qcum[i+1]],RandDist[i])
dWRdUTot<-c(dWRdUTot,DWRDU(U[[i]],RandDist[i]))
d2WRdU2Tot<-c(d2WRdU2Tot,D2WRDU2(U[[i]],RandDist[i]))
WRTot<-c(WRTot,WR[[i]])
UTot<-c(UTot,U[[i]])
bfuncv<-c(bfuncv,bfuncvgen(Vvec=V[[i]],Dist=RandDist[i]))
}
}
# Standard errors gradient hessian - correlation parameters #
# But these should be jointly with DRgamma #
if (!is.null(CorrMat)){
StdErrCorr<-sqrt(diag(solve(-HessCorr))) # These dont take into account the variablity of the dispersion components #
StdErrCorrZF<-sqrt(diag(solve(-HessCorrZF))) # Correlation need to be jointly with DRgamma values and then inverted
}
else {
StdErrCorr<-NULL
StdErrCorrZF<-NULL
invSigmaTotComp<-diag(ISIGMAMvec[(ntot+1):(ntot+qcum[nrandcor+nrandind+1])])
invTT2temp<-solve(t(ZOriginal*as.vector(Wvec/Phi))%*%(ZOriginal)+invSigmaTotComp)
}
# Standard errors for random effects - these are also not all ok should be jointly with fixed effects #
# Standard errors for Beta #
# Now we have to consider two situations: one when LAPFIX is used and one when it is not #
# It is now a problem if some Betas are estimated without the Laplace Approximations and some are not #
# clearly the Beta estimates are not orthogonal among each other, therefore for now we should adapt the approach that either
# all betas are estimated with the adjusted profile likelihood or with a joint likelihood #
if (all(!LaplaceFixed)==TRUE) {
StdErrBeta<-sqrt(diag(invDD)[1:ptot])
StdErrVs<-sqrt(diag(invDD)[(ptot+1):length(diag(invDD))]) # These standard errors are not the same as standard errors of PROC MIXED, the part of them is the same!
}
else if (all(LaplaceFixed)==TRUE) {
# Compute the derivative of vhat with respect to beta - it is a 3 dimensional array q (number of random effects) times (p x p) #
# To musi byc zmienione jesli jest wiecej niz jeden model !!!!!! #
dvhatdbeta<--invTT2temp%*%(t(ZOriginal*as.vector(Wvec/Phi)))%*%X # Pytanie czy ta pochodna poprawnie traktuje vhat z modelu (1) wzgledem beta z modelu (2) czy sa to zera? ale czy powinny to byc zera?
# Pochodna jest OK prawdopodobnie
# !!!!!! We need to use invSigmaTotComp when we use normal random effects and correlation - but is it multiplied by WR the independent parts of random effects ? !!!!! #
# Yes invSigmaTotComp is dbind with ISIGMAMvec, which is the multiplication #
# Compute second derivatives of dvhatdbeta1dbeta2 #
# You have to remember that now the matrix is from correlation of random effects #
# @!!!!! DWRDU is not stored anywhere you need to use a function for that #
# Also WR check where this is stored #
d2vhatdbeta2<-array(0,dim=c(ptot,ptot,length(VT)))
# compute dWRdUTot #
for (i in 1:ptot) {
for (j in 1:ptot) {
# This is more complicated when more than one model is used #
# Proper adjustments need to be made #
# This diagonal is tough to compute #
# IF both beta from the same model then the derivative is ok #
#DiagDesignMat <- 1 ### # Macierz ktora jest Xp+Zdvhatdbeta - definiuje ktore rzeczy sa na diagonals -
d2vhatdbeta2[i,j,]<--invTT2temp%*%(t(ZOriginal*as.vector((dWdmu*Wvec)/Phi))%*%((X[,i]+ZOriginal%*%dvhatdbeta[,i])*(X[,j]+ZOriginal%*%dvhatdbeta[,j]))+
diag(as.vector((dWRdUTot*WRTot)/Lambda))%*%(dvhatdbeta[,i]*dvhatdbeta[,j])) # This line here needs to be ammended #
#d2vhatdbeta2[i,j,]<--invTT2temp%*%t(ZOriginal*as.vector((dWdmu*Wvec)/Phi)%*% # Nowa definicja
}
}
# end if nModels equal 1
# Below we implement the computation of derivatives for vhat for more than one model !!!!
# Now we compute the hessian of the Laplace Approximation to the marginal likelihood #
# We need hessian of the TT2 matrix with respect to Beta and Beta*Beta #
# For the hessian we need the second derivative of the Wvec matrix #
# We directly compute the hessian of the Laplace Approximation #
HessianLapFixBeta<-matrix(0,ptot,ptot)
for (i in 1:ptot) {
for (j in 1:i) {
dTT2dbetai<-t(ZOriginal*as.vector((1/Phi)*dWdmu*Wvec*(X[,i]+ZOriginal%*%dvhatdbeta[,i])))%*%ZOriginal+diag(as.vector((1/Lambda)*dWRdUTot*WRTot*dvhatdbeta[,i]))
dTT2dbetaj<-t(ZOriginal*as.vector((1/Phi)*dWdmu*Wvec*(X[,j]+ZOriginal%*%dvhatdbeta[,j])))%*%ZOriginal+diag(as.vector((1/Lambda)*dWRdUTot*WRTot*dvhatdbeta[,j]))
d2TT2dbetaidbetaj<-t(ZOriginal*as.vector((1/Phi)*d2Wdmu2*Wvec*(X[,i]+ZOriginal%*%dvhatdbeta[,i])*Wvec*(X[,j]+ZOriginal%*%dvhatdbeta[,j])))%*%ZOriginal+
t(ZOriginal*as.vector((1/Phi)*dWdmu*dWdmu*Wvec*(X[,i]+ZOriginal%*%dvhatdbeta[,i])*(X[,j]+ZOriginal%*%dvhatdbeta[,j])))%*%ZOriginal+
t(ZOriginal*as.vector((1/Phi)*dWdmu*Wvec*(ZOriginal%*%d2vhatdbeta2[i,j,])))%*%ZOriginal+
diag(as.vector((1/Lambda)*d2WRdU2Tot*WRTot*dvhatdbeta[,i]*WRTot*dvhatdbeta[,j]))+
diag(as.vector((1/Lambda)*dWRdUTot*dWRdUTot*WRTot*dvhatdbeta[,i]*dvhatdbeta[,j]))+
diag(as.vector((1/Lambda)*dWRdUTot*WRTot*d2vhatdbeta2[i,j,]))
# !!!!!!! In the above we need to replace Lambda by invSigmaTotComp - although it does not matter as for normal dWRduTot=0 and dWdmu=0 #
# Maybe in Hessian Lap Fix we can also add terms which sum up to zero for better accuracy ?#
HessianLapFixBeta[i,j]<-sum(-(as.vector((X[,i]+ZOriginal%*%dvhatdbeta[,i])*Wvec*(1/Phi)*(X[,j]+ZOriginal%*%dvhatdbeta[,j])))) + sum(as.vector((1/Phi)*(Y-mu)*(ZOriginal%*%d2vhatdbeta2[i,j,])))-
as.vector(dvhatdbeta[,i])%*%invSigmaTotComp%*%as.vector(dvhatdbeta[,j])+as.vector((PsiM-UTot))%*%(invSigmaTotComp)%*%as.vector(d2vhatdbeta2[i,j,])-
0.5*sum(diag(invTT2temp%*%d2TT2dbetaidbetaj))+0.5*sum(diag(invTT2temp%*%dTT2dbetai%*%invTT2temp%*%dTT2dbetaj))
HessianLapFixBeta[j,i]<-HessianLapFixBeta[i,j]
}
}
StdErrBeta<-sqrt(diag(solve(-HessianLapFixBeta)))
StdErrVs<-sqrt(diag(invTT2temp))
}
else {
warning("Some fixed effects are estimated by Laplace some by h-likelihood; currently we do not know how to compute standard errors in this case")
StdErrBeta<-NULL
}
# Now we compute the standard errors of residual dispersion components #
# The problem is to determine which components are estimated and which are kept fixed #
SelectGamma<-NULL
SelectModel<-NULL
StdErrODEst <- NULL
for (i in 1:nModels) {
if (EstimateOverDisp[i] == TRUE) {
rowsTemp <- (cModelsDims[i]+1):(cModelsDims[i+1])
TempOD <- as.matrix(DDY[rowsTemp,])
colnames(TempOD)<-1:ncol(TempOD)
columnsTemp <- apply(matrix(as.logical(TempOD),nrow(TempOD),ncol(TempOD)),2,any)
whichGamma <- colnames(TempOD)[which(columnsTemp==TRUE)]
SelectModelTemp <- rep(i,length(whichGamma))
if (i==1) {
SelectGamma <- as.numeric(whichGamma)
SelectModel <- SelectModelTemp
}
else {
SelectGamma <- c(SelectGamma, as.numeric(whichGamma))
SelectModel <- c(SelectModel, SelectModelTemp)
} # This says which gamma are estimated and with respect to them the hessian is going to be computed #
}
}
HessianODEst<-matrix(0,length(SelectGamma),length(SelectGamma))
DevianceRespTotal <- rep(0,ntot)
for (i in 1:nModels) {
DevianceRespTemp<-rep(0,cModelsDims[i+1]-cModelsDims[i])
YTemp<-YList[[i]]
BTemp<-B[(cModelsDims[i]+1):cModelsDims[i+1]]
muTemp<-mu[(cModelsDims[i]+1):cModelsDims[i+1]]
PhiTemp<-Phi[(cModelsDims[i]+1):cModelsDims[i+1]]
if (RespDist[i]=="Binomial") {
DevianceRespTemp[YTemp!=0 & YTemp!=BTemp]<-2*(YTemp[YTemp!=0 & YTemp!=BTemp]*log(YTemp[YTemp!=0 & YTemp!=BTemp]/muTemp[YTemp!=0 & YTemp!=BTemp])-(YTemp[YTemp!=0 & YTemp!=BTemp]-BTemp[YTemp!=0 & YTemp!=BTemp])*log((BTemp[YTemp!=0 & YTemp!=BTemp]-YTemp[YTemp!=0 & YTemp!=BTemp])/(BTemp[YTemp!=0 & YTemp!=BTemp]-muTemp[YTemp!=0 & YTemp!=BTemp])))
DevianceRespTemp[YTemp==0]<-2*(BTemp[YTemp==0]*log((BTemp[YTemp==0])/(BTemp[YTemp==0]-muTemp[YTemp==0])))
DevianceRespTemp[YTemp==BTemp]<-2*(YTemp[YTemp==BTemp]*log(YTemp[YTemp==BTemp]/muTemp[YTemp==BTemp]))
}
if (RespDist[i]=="Poisson"){
DevianceRespTemp[YTemp!=0]<-2*(YTemp[YTemp!=0]*log(YTemp[YTemp!=0]/muTemp[YTemp!=0])-(YTemp[YTemp!=0]-muTemp[YTemp!=0]))
DevianceRespTemp[YTemp==0]<-2*muTemp[YTemp==0]
}
if (RespDist[i]=="Normal"){
DevianceRespTemp<-(YTemp-muTemp)^2
}
if (RespDist[i]=="Gamma"){
DevianceRespTemp<-2*(-log(YTemp/muTemp)+(YTemp-muTemp)/muTemp)
#DiagPMATODEstim[(cModelsDims[i]+1):cModelsDims[i+1]]<-DiagPMATODEstim[(cModelsDims[i]+1):cModelsDims[i+1]]+1+as.vector(2*log(PhiTemp)/PhiTemp)+as.vector(2*digamma(1/PhiTemp)/PhiTemp)
}
if (i == 1) DevianceRespTotal <- DevianceRespTemp
else DevianceRespTotal <- c(DevianceRespTotal, DevianceRespTemp)
}
# The part below needs to be removed #
#DevD<-DevianceRespTotal
#PhiD<-Phi
#XD<-X
#ZD<-ZOriginal
#WD<-Wvec
#invD<-invSigmaTotComp
DDr1<-cbind(t(X*as.vector(Wvec/Phi))%*%X,t(X*as.vector(Wvec/Phi))%*%ZOriginal)
DDr2<-cbind(t(ZOriginal*as.vector(Wvec/Phi))%*%X,t(ZOriginal*as.vector(Wvec/Phi))%*%ZOriginal+invSigmaTotComp)
DD<-rbind(DDr1,DDr2)
solveDD<-solve(DD)
# Now compute the actual Hessian #
if (!is.null(SelectGamma)) {
for (i in 1:length(SelectGamma)) {
for (j in 1:i){
modelPhi1<-SelectModel[i]
modelPhi2<-SelectModel[j]
if (modelPhi1 != modelPhi2 ) {
d2Qdphi2 <- 0
d2Qdgamma2 <- 0
}
# Derivative of the Quasi likelihood #
else {
d2Qdphi2 <- -(DevianceRespTotal[(cModelsDims[SelectModel[i]]+1):cModelsDims[SelectModel[i]+1]]/(Phi[(cModelsDims[SelectModel[i]]+1):cModelsDims[SelectModel[i]+1]]^3))+
0.5*(1/(Phi[(cModelsDims[SelectModel[i]]+1):cModelsDims[SelectModel[i]+1]]^2))
if (RespDist[SelectModel[i]]=="Gamma") {
PhiGCur<-as.vector(Phi[(cModelsDims[SelectModel[i]]+1):cModelsDims[SelectModel[i]+1]])
d2Qdphi2<-d2Qdphi2-(2*log(PhiGCur))/(PhiGCur^3)+(1/PhiGCur^3)-0.5*(1/PhiGCur^2)-(2*digamma(1/PhiGCur))/(PhiGCur^3)-(trigamma(1/PhiGCur)/PhiGCur^4)
}
#DDYODCuri<-rep(0,ntot)
DDYODCuri<-DDY[(cModelsDims[SelectModel[i]]+1):(cModelsDims[SelectModel[i]+1]),SelectGamma[i]]
#DDYODCurj<-rep(0,ntot)
DDYODCurj<-DDY[(cModelsDims[SelectModel[j]]+1):(cModelsDims[SelectModel[j]+1]),SelectGamma[j]]
d2Qdgamma2 <- (d2Qdphi2 * (as.vector(Phi[(cModelsDims[SelectModel[i]]+1):cModelsDims[SelectModel[i]+1]])^2) * as.vector(DDYODCuri))%*%
as.vector(DDYODCurj)
## cat("i: ",i,"; j:",j," s2Qdgamma2:",d2Qdgamma2)
}
# !!!!! Here a correction for the gamma distribution so the h is used instead of Q !!!!!! #
# Derivative of the determinant #
if (modelPhi1 == modelPhi2) {
PhiCur <- rep (0,ntot+qtot)
PhiCur[(cModelsDims[SelectModel[i]]+1):cModelsDims[SelectModel[i]+1]]<-Phi[(cModelsDims[SelectModel[i]]+1):cModelsDims[SelectModel[i]+1]]
dimmOD<-nrow(TTOriginal)-length(Wvec)
WvecODCur<-c(Wvec,rep(0,dimmOD))
DDYODCuri<-rep(0,ntot)
DDYODCuri[(cModelsDims[i]+1):(cModelsDims[i+1])]<-DDY[(cModelsDims[SelectModel[i]]+1):(cModelsDims[SelectModel[i]+1]),SelectGamma[i]]
DDYODCurj<-rep(0,ntot)
DDYODCurj[(cModelsDims[j]+1):(cModelsDims[j+1])]<-DDY[(cModelsDims[SelectModel[j]]+1):(cModelsDims[SelectModel[j]+1]),SelectGamma[j]]
dDDdgamma1p1 <- - cbind(t(X*as.vector(Wvec*DDYODCuri/Phi))%*%X,t(X*as.vector(Wvec*DDYODCuri/Phi))%*%ZOriginal)
dDDdgamma1p2 <- - cbind(t(ZOriginal*as.vector(Wvec*DDYODCuri/Phi))%*%X,t(ZOriginal*as.vector(Wvec*DDYODCuri/Phi))%*%ZOriginal)
dDDdgamma1 <- rbind(dDDdgamma1p1,dDDdgamma1p2)
dDDdgamma2p1 <- - cbind(t(X*as.vector(Wvec*DDYODCurj/Phi))%*%X,t(X*as.vector(Wvec*DDYODCurj/Phi))%*%ZOriginal)
dDDdgamma2p2 <- - cbind(t(ZOriginal*as.vector(Wvec*DDYODCurj/Phi))%*%X,t(ZOriginal*as.vector(Wvec*DDYODCurj/Phi))%*%ZOriginal)
dDDdgamma2 <- rbind(dDDdgamma2p1,dDDdgamma2p2)
d2DDdgamma12p1 <- 2* cbind(t(X*as.vector(Wvec*DDYODCuri*DDYODCurj/Phi))%*%X,t(X*as.vector(Wvec*DDYODCuri*DDYODCurj/Phi))%*%ZOriginal)
d2DDdgamma12p2 <- 2* cbind(t(ZOriginal*as.vector(Wvec*DDYODCuri*DDYODCurj/Phi))%*%X,t(ZOriginal*as.vector(Wvec*DDYODCuri*DDYODCurj/Phi))%*%ZOriginal)
d2DDdgamma12 <- rbind(d2DDdgamma12p1,d2DDdgamma12p2)
HessianODEst[i,j]<-d2Qdgamma2-0.5*sum(diag(solveDD%*%d2DDdgamma12))+0.5*sum(diag(solveDD%*%dDDdgamma1%*%solveDD%*%dDDdgamma2))
HessianODEst[j,i]<-HessianODEst[i,j]
## cat("Parameter: ",i," Drugi: ",j,"\n d2Q: ",d2Qdgamma2," trace: ",-0.5*sum(diag(solveDD%*%d2DDdgamma12))+0.5*sum(diag(solveDD%*%dDDdgamma1%*%solveDD%*%dDDdgamma2)))
}
else {
DDYODCuri<-rep(0,ntot)
DDYODCuri[(cModelsDims[i]+1):(cModelsDims[i+1])]<-DDY[(cModelsDims[SelectModel[i]]+1):(cModelsDims[SelectModel[i]+1]),SelectGamma[i]]
DDYODCurj<-rep(0,ntot)
DDYODCurj[(cModelsDims[j]+1):(cModelsDims[j+1])]<-DDY[(cModelsDims[SelectModel[j]]+1):(cModelsDims[SelectModel[j]+1]),SelectGamma[j]]
dDDdgamma1p1 <- - cbind(t(X*as.vector(Wvec*DDYODCuri/Phi))%*%X,t(X*as.vector(Wvec*DDYODCuri/Phi))%*%ZOriginal)
dDDdgamma1p2 <- - cbind(t(ZOriginal*as.vector(Wvec*DDYODCuri/Phi))%*%X,t(ZOriginal*as.vector(Wvec*DDYODCuri/Phi))%*%ZOriginal)
dDDdgamma1 <- rbind(dDDdgamma1p1,dDDdgamma1p2)
dDDdgamma2p1 <- - cbind(t(X*as.vector(Wvec*DDYODCurj/Phi))%*%X,t(X*as.vector(Wvec*DDYODCurj/Phi))%*%ZOriginal)
dDDdgamma2p2 <- - cbind(t(ZOriginal*as.vector(Wvec*DDYODCurj/Phi))%*%X,t(ZOriginal*as.vector(Wvec*DDYODCurj/Phi))%*%ZOriginal)
dDDdgamma2 <- rbind(dDDdgamma2p1,dDDdgamma2p2)
HessianODEst[i,j]<-0.5*sum(diag(solveDD%*%dDDdgamma1%*%solveDD%*%dDDdgamma2))
HessianODEst[j,i]<-HessianODEst[i,j]
}
}
}
colnames(HessianODEst)<-SelectGamma
rownames(HessianODEst)<-SelectGamma
StdErrODEst<-diag(sqrt(solve(-HessianODEst)))
names(StdErrODEst)<-paste("gamma",SelectGamma,sep="")
}
##########################################
##### Hessian for DRgamma parameters #####
##########################################
# We can use DVhatDlambda as it is already calculated
if (nrandcor > 0) nrandtot<-nrandcor
else nrandtot <- 0
if (nrandind > 0) nrandtot<-nrandtot+nrandind
HessianRVC<-matrix(0,nrandtot,nrandtot)
# Hessian of correlated part #
# debug(dhdranmat)
# debug(dDDdranmat)
# VTCorrTot as correlated random effects #
if (nrandcor > 0) {
DSigmadlambdaConstant<-solve(invSigmaTotComp) # Copy current matrix #
for (i in 1:length(CorrMat)){
# Compute the derivative of dSigmadlambda #
# Determine which lambda #
LambdaLocal<-rep(0,qcorr[i])
for (j in 1:qcorr[i]){
LambdaLocal[j]<-LambdaCorr[cumindCorrIndex[cumqcorr[i]+j]+1]
}
FDER <- rep(0,qcorr[i]) # gradient
for (Dindex in 1:qcorr[i]){ # says which derivative
CorrMatOutDeriv<-CorrMatOut[[i]]
diag(CorrMatOutDeriv)<-rep(0,qcorr[i])
CorrMatOutDeriv[Dindex,]<-CorrMatOutDeriv[Dindex,]/2
CorrMatOutDeriv[,Dindex]<-CorrMatOutDeriv[,Dindex]/2
CorrMatOutDeriv[Dindex,Dindex]<-1
CorrMatOutDeriv[-Dindex,-Dindex]<-0
DSigmaMatdlambda<-t(t(sqrt(LambdaLocal)*CorrMatOutDeriv)*(sqrt(LambdaLocal)/(LambdaLocal[Dindex])))
DSigmaTotdlambda<-DSigmaMatdlambda%x%diag(lcorr[i])
# Computing first order derivative #
DSigmadlambda1<-matrix(0,qcum[nrandtot+1],qcum[nrandtot+1])
DSigmadlambda1[(cumindCorrIndex[cumqcorr[i]+1]+1):(cumindCorrIndex[cumqcorr[i+1]+1]),(cumindCorrIndex[cumqcorr[i]+1]+1):(cumindCorrIndex[cumqcorr[i+1]+1])]<-
DSigmaTotdlambda
dvhatdlambda1 <- as.vector(dvhatdranmat(invTT2=invTT2temp,invSigmaMat=invSigmaTotComp,dSigmadlambda=DSigmadlambda1,Psi=PsiM,Uvec=UTot))
dDDdlambda1<-dDDdranmat(X=X,Z=ZOriginal,dWdmu=dWdmu,Wvec=Wvec,dvhatdlambda=dvhatdlambda1,invSigmaMat=invSigmaTotComp,dSigmadlambda=DSigmadlambda1,
WR=WRTot,dWRdu=dWRdUTot)
LambdaCur1<-LambdaLocal[Dindex]
FDER[Dindex] <-dhdranmatCorr(Z=ZOriginal,y=Y,mu=mu,Phi=Phi,dvhatdlambda=dvhatdlambda1,invSigmaMat=invSigmaTotComp,Psi=PsiM,Uvec=UTot,
Vvec=VTCorrTot,bfuncv=bfuncv,dSigmadlambda=DSigmadlambda1)-0.5*sum(diag(solveDD%*%dDDdlambda1))
if (Dindex==1) {
dVcur1 <- dvhatdlambda1
dSigm1 <- DSigmadlambda1
dDDd1 <- dDDdlambda1
}
# Now computing the second derivative #
for (i2 in 1:length(CorrMat)) {
LambdaLocal<-rep(0,qcorr[i])
for (j in 1:qcorr[i2]){
LambdaLocal[j]<-LambdaCorr[cumindCorrIndex[cumqcorr[i2]+j]+1]
}
for (Dindex2 in 1:qcorr[i2]){ # says which derivative
CorrMatOutDeriv<-CorrMatOut[[i2]]
diag(CorrMatOutDeriv)<-rep(0,qcorr[i2])
CorrMatOutDeriv[Dindex2,]<-CorrMatOutDeriv[Dindex2,]/2
CorrMatOutDeriv[,Dindex2]<-CorrMatOutDeriv[,Dindex2]/2
CorrMatOutDeriv[Dindex2,Dindex2]<-1
CorrMatOutDeriv[-Dindex2,-Dindex2]<-0
DSigmaMatdlambda<-t(t(sqrt(LambdaLocal)*CorrMatOutDeriv)*(sqrt(LambdaLocal)/(LambdaLocal[Dindex2])))
DSigmaTotdlambda<-DSigmaMatdlambda%x%diag(lcorr[i2])
# Computing second first order derivative #
DSigmadlambda2<-matrix(0,qcum[nrandtot+1],qcum[nrandtot+1])
DSigmadlambda2[(cumindCorrIndex[cumqcorr[i2]+1]+1):(cumindCorrIndex[cumqcorr[i2+1]+1]),(cumindCorrIndex[cumqcorr[i2]+1]+1):(cumindCorrIndex[cumqcorr[i2+1]+1])]<-
DSigmaTotdlambda
dvhatdlambda2 <- as.vector(dvhatdranmat(invTT2=invTT2temp,invSigmaMat=invSigmaTotComp,dSigmadlambda=DSigmadlambda2,Psi=PsiM,Uvec=UTot))
dDDdlambda2<-dDDdranmat(X=X,Z=ZOriginal,dWdmu=dWdmu,Wvec=Wvec,dvhatdlambda=dvhatdlambda2,invSigmaMat=invSigmaTotComp,dSigmadlambda=DSigmadlambda2,
WR=WRTot,dWRdu=dWRdUTot)
LambdaCur2<-LambdaLocal[Dindex2]
# Computing the second order derivative of the SigmaMat over the lambda #
# Three possiblities: 1 / same matrix same sigma
# : 2 / same matrix different sigma
# : 3 / different matrix CorrMat
D2Sigmadlambda12<-matrix(0,qcum[nrandtot+1],qcum[nrandtot+1])
if (i==i2 & Dindex==Dindex2) {
D2SigmaMatdlambda12 <- DSigmaMatdlambda/(-2*LambdaLocal[Dindex2])
D2SigmaMatdlambda12[Dindex,Dindex]<-0
D2SigmaTotdlambda12 <- D2SigmaMatdlambda12%x%diag(lcorr[i2])
D2Sigmadlambda12[(cumindCorrIndex[cumqcorr[i2]+1]+1):(cumindCorrIndex[cumqcorr[i2+1]+1]),(cumindCorrIndex[cumqcorr[i2]+1]+1):(cumindCorrIndex[cumqcorr[i2+1]+1])]<-
D2SigmaTotdlambda12
}
if (i==i2 & Dindex!=Dindex2) {
D2SigmaMatdlambda12temp <- DSigmadlambda2/(2*LambdaLocal[Dindex])
D2SigmaMatdlambda12 <- matrix(0,qcorr[i],qcorr[i])
D2SigmaMatdlambda12[Dindex,Dindex2] <- D2SigmaMatdlambda12temp[Dindex,Dindex2]
D2SigmaMatdlambda12[Dindex2,Dindex] <- D2SigmaMatdlambda12temp[Dindex2,Dindex]
D2SigmaTotdlambda12 <- D2SigmaMatdlambda12%x%diag(lcorr[i2])
D2Sigmadlambda12[(cumindCorrIndex[cumqcorr[i2]+1]+1):(cumindCorrIndex[cumqcorr[i2+1]+1]),(cumindCorrIndex[cumqcorr[i2]+1]+1):(cumindCorrIndex[cumqcorr[i2+1]+1])]<-
D2SigmaTotdlambda12
}
if (i!=i2) {
D2SigmaMatdlambda12 <- matrix(0, qcorr[i], qcorr[i])
D2Sigmadlambda12 <- matrix(0, qcum[nrandtot+1], qcum[nrandtot+1])
}
d2vhatdlambda12<-as.vector(d2vhatdranmat2(invTT2=invTT2temp,Z=ZOriginal,Phi=Phi,dWdmu=dWdmu,Wvec=Wvec,
dvhatdlambda1=dvhatdlambda1,dvhatdlambda2=dvhatdlambda2,invSigmaMat=invSigmaTotComp,WR=WRTot,
dWRdu=dWRdUTot,dSigmadlambda1=DSigmadlambda1,dSigmadlambda2=DSigmadlambda2,Psi=PsiM,Uvec=UTot,d2Sigmadlambda12=D2Sigmadlambda12))
if (Dindex2==1) {
dVcur2<-dvhatdlambda2
dSigm2<-DSigmadlambda2
dDDd2<-dDDdlambda2
}
# Computing which derivative we deal with #
firstindex<-cumqcorr[i]+Dindex
secondindex<-cumqcorr[i2]+Dindex2
# Computing the actual hessian #
# Define
d2hdlambda2 <- d2hdranmatCorrCorr(Z=ZOriginal,y=Y,mu=mu,Phi=Phi,d2vhatdlambda12=d2vhatdlambda12,dvhatdlambda1=dvhatdlambda1,
dvhatdlambda2=dvhatdlambda2,Wvec=Wvec,invSigmaMat=invSigmaTotComp,dSigmadlambda1=DSigmadlambda1,dSigmadlambda2=DSigmadlambda2,
d2Sigmadlambda12=D2SigmaTotdlambda12,Psi=PsiM,Uvec=UTot,Vvec=VTCorrTot,bfuncv=bfuncv,WR=WRTot)
d2DDdlambda12<-d2DDdranmat2(X=X,Z=ZOriginal,d2Wdmu2=d2Wdmu2,dWdmu=dWdmu,Wvec=Wvec,dvhatdlambda1=dvhatdlambda1,
dvhatdlambda2=dvhatdlambda2,d2vhatdlambda12=d2vhatdlambda12,invSigmaMat=invSigmaTotComp,
dSigmadlambda1=DSigmadlambda1,dSigmadlambda2=DSigmadlambda2,d2Sigmadlambda12=D2Sigmadlambda12,WR=WRTot,dWRdu=dWRdUTot,d2WRdu2=d2WRdU2Tot)
if (Dindex==1 & Dindex2==1) {
d2Vcur12<-d2vhatdlambda12
d2Sigm12<-D2Sigmadlambda12
d2DDd12<-d2DDdlambda12
}
HessianRVC[firstindex,secondindex]<-d2hdlambda2-0.5*sum(diag(solveDD%*%d2DDdlambda12))+0.5*sum(diag(solveDD%*%dDDdlambda1%*%solveDD%*%dDDdlambda2))
# This makes hessian with respect to gamma #
HessianRVC[firstindex,secondindex]<-HessianRVC[firstindex,secondindex]*LambdaCur1*LambdaCur2
}
}
}
}
}
# All dispersion parameters must be evaluated jointly #
# So far we have: HessianODEst - for gammagamma derivatives #
# HessCorr / HessCorrZF - hessian for correlations and fisher z #
# Still there could be the same residual variance over the two models that is not implemented yet !!!!!!!!!!!!!!!!!!!!! #
# Further extension to Truncated Poisson #
# and another extension to commmon betas etc #
# further extend to Purahmadi trick #
# Standard errors and diagnostics #
# Speed up the algorithm #
HELPVALUES<-list(X=X,Z=ZOriginal,dWdmu=dWdmu,Wvec=Wvec,dvhatdlambda=dvhatdlambda1,invSigmaMat=invSigmaTotComp,dSigmadlambda=DSigmadlambda1,
WR=WRTot,dWRdu=dWRdUTot,y=Y,mu=mu,Phi=Phi,Psi=PsiM,Uvec=UTot,Vvec=VTCorrTot,bfuncv=bfuncv,solveDD=solveDD,VT=VT,
dDDd=dDDdlambda1,d2Sigmadlambda12=D2Sigmadlambda12,dVcur1=dVcur1,dVcur2=dVcur2,d2Vcur12=d2Vcur12,dSigm1=dSigm1,dSigm2=dSigm2,
d2Sigm12=d2Sigm12,d2DDd12=d2DDd12,dDDd1=dDDd1,dDDd2=dDDd2)
}
if (StandardErrors==FALSE){
StdErrCorr <- FALSE
StdErrCorrZF <-FALSE
StdErrVs <- FALSE
StdErrBeta <- FALSE
StdErrODEst <- FALSE
StdErrDRgamma <- FALSE
}
OUT<-list(Beta=Beta,V=VT,DRgamma=DRgamma,DYgamma=DYgamma,Correlations=unlist(Correlations),StdErrCorr=StdErrCorr,StdErrCorrZF=StdErrCorrZF,
StdErrVs=StdErrVs,StdErrBeta=StdErrBeta,StdErrODEst=StdErrODEst,StdErrDRgamma=StdErrDRgamma,
M2h=M2h,M2pvh=M2pvh,M2pbvh=M2pbvh,CAIC=CAIC)#,HessianOD=HessianODEst,HessianRVC=HessianRVC,FDER=FDER,HessianCorr=HessCorrZF
if (nrandcor>0) OUT<-c(OUT)
return(OUT)
}
|
rm(list =ls())
DE = read.csv('D:\\transcriptLevelAnalysis\\quautity\\proteinCodingfilterF0DEseqres.csv',header = F,row.names = 1)
x = DE$V3
y = -1*log10(DE$V7)
color_vec= rep('gray',length(x))
## set color by x axis value
color_vec[x >1 & y >(log10(0.05)*-1)] = 'firebrick2'
## set color by y axis value
color_vec[x <(-1) & y >(log10(0.05)*-1)] = 'royal blue'
par(mar=c(6, 5, 2, 1.4))
## plot
plot(x,y,pch=20,cex=0.7,col=color_vec,xaxt='n',yaxt='n',
ylim = c(0,8),xlim = c(-4,4),
xlab = 'log2 (Fold_Change)',
ylab = '-log10 (padj)',
cex.lab = 1.5,
main = 'Volcano plot of mRNAs expression (EH vs R)',cex.main=0.8
)
## x axis set
axis(side = 1,at=c(-4,-2,-1,0,1,2,4),labels = c('-4','-2','-1','0','1','2','4'))
## y axis set
axis(side = 2,at=c(0,-log10(0.05),2,4,6,8),labels = c('0','p=0.05','2','4','6','8'))
## abline fold-change threshold lines
abline(v=-1,lty=2)
abline(v=1,lty=2)
## abline p-adj threshold line
abline(h= -log10(0.05),lty=2)
| /基础包火山图.R | no_license | lf135998/R-scripts-for-RNA-seq | R | false | false | 1,003 | r | rm(list =ls())
DE = read.csv('D:\\transcriptLevelAnalysis\\quautity\\proteinCodingfilterF0DEseqres.csv',header = F,row.names = 1)
x = DE$V3
y = -1*log10(DE$V7)
color_vec= rep('gray',length(x))
## set color by x axis value
color_vec[x >1 & y >(log10(0.05)*-1)] = 'firebrick2'
## set color by y axis value
color_vec[x <(-1) & y >(log10(0.05)*-1)] = 'royal blue'
par(mar=c(6, 5, 2, 1.4))
## plot
plot(x,y,pch=20,cex=0.7,col=color_vec,xaxt='n',yaxt='n',
ylim = c(0,8),xlim = c(-4,4),
xlab = 'log2 (Fold_Change)',
ylab = '-log10 (padj)',
cex.lab = 1.5,
main = 'Volcano plot of mRNAs expression (EH vs R)',cex.main=0.8
)
## x axis set
axis(side = 1,at=c(-4,-2,-1,0,1,2,4),labels = c('-4','-2','-1','0','1','2','4'))
## y axis set
axis(side = 2,at=c(0,-log10(0.05),2,4,6,8),labels = c('0','p=0.05','2','4','6','8'))
## abline fold-change threshold lines
abline(v=-1,lty=2)
abline(v=1,lty=2)
## abline p-adj threshold line
abline(h= -log10(0.05),lty=2)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/StyleDefaults.R
\name{setNodeCustomHeatMapChart}
\alias{setNodeCustomHeatMapChart}
\title{Set Node Custom HeatMap Chart}
\usage{
setNodeCustomHeatMapChart(columns, colors = NULL, range = NULL,
orientation = "HORIZONTAL", rangeAxis = FALSE, zeroLine = FALSE,
axisWidth = 0.25, axisColor = "#000000", axisFontSize = 1,
slot = 1, style.name = "default", base.url = .defaultBaseUrl)
}
\arguments{
\item{columns}{List of node column names to be displayed.}
\item{colors}{(optional) List of colors to be matched with columns or with
range, depending on type. Default is a set of colors from an appropriate
Brewer palette.}
\item{range}{(optional) Min and max values of chart. Default is to use min
and max from specified data columns.}
\item{orientation}{(optional) VERTICAL or HORIZONTAL (default).}
\item{rangeAxis}{(optional) Show axis with range of values. Default is FALSE.}
\item{zeroLine}{(optional) Show a line at zero. Default is FALSE.}
\item{axisWidth}{(optional) Width of axis lines, if shown. Default is 0.25.}
\item{axisColor}{(optional) Color of axis lines, if shown. Default is black.}
\item{axisFontSize}{(optional) Font size of axis labels, if shown. Default
is 1.}
\item{slot}{(optional) Which custom graphics slot to modify. Slots 1-9 are
available for independent charts, gradients and images. Default is 1.}
\item{style.name}{(optional) Name of style; default is "default" style}
\item{base.url}{(optional) Ignore unless you need to specify a custom domain,
port or version to connect to the CyREST API. Default is http://localhost:1234
and the latest version of the CyREST API supported by this version of RCy3.}
}
\value{
None
}
\description{
Makes a heatmap chart per node using specified node table columns by
setting a default custom graphic style.
}
\examples{
\donttest{
setNodeCustomHeatMapChart(c("data1","data2","data3"))
}
}
\seealso{
setNodeCustomPosition, removeNodeCustomGraphics
}
| /man/setNodeCustomHeatMapChart.Rd | permissive | shraddhapai/RCy3 | R | false | true | 2,010 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/StyleDefaults.R
\name{setNodeCustomHeatMapChart}
\alias{setNodeCustomHeatMapChart}
\title{Set Node Custom HeatMap Chart}
\usage{
setNodeCustomHeatMapChart(columns, colors = NULL, range = NULL,
orientation = "HORIZONTAL", rangeAxis = FALSE, zeroLine = FALSE,
axisWidth = 0.25, axisColor = "#000000", axisFontSize = 1,
slot = 1, style.name = "default", base.url = .defaultBaseUrl)
}
\arguments{
\item{columns}{List of node column names to be displayed.}
\item{colors}{(optional) List of colors to be matched with columns or with
range, depending on type. Default is a set of colors from an appropriate
Brewer palette.}
\item{range}{(optional) Min and max values of chart. Default is to use min
and max from specified data columns.}
\item{orientation}{(optional) VERTICAL or HORIZONTAL (default).}
\item{rangeAxis}{(optional) Show axis with range of values. Default is FALSE.}
\item{zeroLine}{(optional) Show a line at zero. Default is FALSE.}
\item{axisWidth}{(optional) Width of axis lines, if shown. Default is 0.25.}
\item{axisColor}{(optional) Color of axis lines, if shown. Default is black.}
\item{axisFontSize}{(optional) Font size of axis labels, if shown. Default
is 1.}
\item{slot}{(optional) Which custom graphics slot to modify. Slots 1-9 are
available for independent charts, gradients and images. Default is 1.}
\item{style.name}{(optional) Name of style; default is "default" style}
\item{base.url}{(optional) Ignore unless you need to specify a custom domain,
port or version to connect to the CyREST API. Default is http://localhost:1234
and the latest version of the CyREST API supported by this version of RCy3.}
}
\value{
None
}
\description{
Makes a heatmap chart per node using specified node table columns by
setting a default custom graphic style.
}
\examples{
\donttest{
setNodeCustomHeatMapChart(c("data1","data2","data3"))
}
}
\seealso{
setNodeCustomPosition, removeNodeCustomGraphics
}
|
#---------------------------------------------------------------------------------------
# 3 - MATRIX VISUALIZATION
#---------------------------------------------------------------------------------------
cat("\014")
# source(paste0(pathsCodes,'00_init.R'))
# source(paste0(pathsCodes,'01_cleaning_transforming.R'))
# source(paste0(pathsCodes,'02_data_exploration.R'))
set.seed(42)
# heatmaps for warmup
getMatrixHeatmap <- function(){
g1 <- image(real_ratings_matrix_type,
main = "Heatmap of the full rating matrix", xlab='Movies', ylab='Users')
g2 <- image(real_ratings_matrix_type[1:25, 1:25], main = "Heatmap of the first 25 rows and
columns", xlab='Movies', ylab='Users')
# minimum movies per user, and user per movie
min_movies_per_user <- quantile(rowCounts(real_ratings_matrix_type), 0.95)
min_users_per_movie <- quantile(colCounts(real_ratings_matrix_type), 0.99)
g3 <- image(real_ratings_matrix_type[rowCounts(real_ratings_matrix_type) > min_movies_per_user,
colCounts(real_ratings_matrix_type) > min_users_per_movie],
main = "Heatmap of the top users and movies",
xlab='Movies', ylab='Users')
print(g1)
print(g2)
print(g3)
cat("\014")
}
getMatrixHeatmap()
| /code/03_matrix_visualization.R | no_license | mvuckovic70/recommenderSystems | R | false | false | 1,285 | r |
#---------------------------------------------------------------------------------------
# 3 - MATRIX VISUALIZATION
#---------------------------------------------------------------------------------------
cat("\014")
# source(paste0(pathsCodes,'00_init.R'))
# source(paste0(pathsCodes,'01_cleaning_transforming.R'))
# source(paste0(pathsCodes,'02_data_exploration.R'))
set.seed(42)
# heatmaps for warmup
getMatrixHeatmap <- function(){
g1 <- image(real_ratings_matrix_type,
main = "Heatmap of the full rating matrix", xlab='Movies', ylab='Users')
g2 <- image(real_ratings_matrix_type[1:25, 1:25], main = "Heatmap of the first 25 rows and
columns", xlab='Movies', ylab='Users')
# minimum movies per user, and user per movie
min_movies_per_user <- quantile(rowCounts(real_ratings_matrix_type), 0.95)
min_users_per_movie <- quantile(colCounts(real_ratings_matrix_type), 0.99)
g3 <- image(real_ratings_matrix_type[rowCounts(real_ratings_matrix_type) > min_movies_per_user,
colCounts(real_ratings_matrix_type) > min_users_per_movie],
main = "Heatmap of the top users and movies",
xlab='Movies', ylab='Users')
print(g1)
print(g2)
print(g3)
cat("\014")
}
getMatrixHeatmap()
|
## ---- echo = FALSE-------------------------------------------------------
knitr::opts_chunk$set(fig.width = 6, fig.height = 4.5)
## ---- warning = FALSE, message = FALSE-----------------------------------
library("SimMultiCorrData")
library("printr")
stcums <- calc_theory(Dist = "Exponential", params = 0.5)
## ---- warning = FALSE, message = FALSE-----------------------------------
H_exp <- nonnormvar1("Polynomial", means = stcums[1], vars = stcums[2]^2,
skews = stcums[3], skurts = stcums[4],
fifths = stcums[5], sixths = stcums[6], n = 10000,
seed = 1234)
## ------------------------------------------------------------------------
as.matrix(H_exp$constants, nrow = 1, ncol = 6, byrow = TRUE)
## ------------------------------------------------------------------------
as.matrix(round(H_exp$summary_targetcont[, c("Distribution", "mean", "sd",
"skew", "skurtosis", "fifth",
"sixth")], 5), nrow = 1, ncol = 7,
byrow = TRUE)
## ------------------------------------------------------------------------
as.matrix(round(H_exp$summary_continuous[, c("Distribution", "mean", "sd",
"skew", "skurtosis", "fifth",
"sixth")], 5), nrow = 1, ncol = 7,
byrow = TRUE)
## ------------------------------------------------------------------------
H_exp$valid.pdf
## ------------------------------------------------------------------------
y_star <- qexp(1 - 0.05, rate = 0.5) # note that rate = 1/mean
y_star
## ------------------------------------------------------------------------
f_exp <- function(z, c, y) {
return(2 * (c[1] + c[2] * z + c[3] * z^2 + c[4] * z^3 + c[5] * z^4 +
c[6] * z^5) + 2 - y)
}
z_prime <- uniroot(f_exp, interval = c(-1e06, 1e06),
c = as.numeric(H_exp$constants), y = y_star)$root
z_prime
## ------------------------------------------------------------------------
1 - pnorm(z_prime)
## ---- warning = FALSE, message = FALSE-----------------------------------
plot_sim_pdf_theory(sim_y = H_exp$continuous_variable[, 1],
Dist = "Exponential", params = 0.5)
## ---- warning = FALSE, message = FALSE-----------------------------------
plot_sim_cdf(sim_y = H_exp$continuous_variable[, 1], calc_cprob = TRUE,
delta = y_star)
## ---- warning = FALSE, message = FALSE-----------------------------------
as.matrix(t(stats_pdf(c = H_exp$constants[1, ], method = "Polynomial",
alpha = 0.025, mu = stcums[1], sigma = stcums[2])))
| /inst/doc/dist_comp.R | no_license | cran/SimMultiCorrData | R | false | false | 2,779 | r | ## ---- echo = FALSE-------------------------------------------------------
knitr::opts_chunk$set(fig.width = 6, fig.height = 4.5)
## ---- warning = FALSE, message = FALSE-----------------------------------
library("SimMultiCorrData")
library("printr")
stcums <- calc_theory(Dist = "Exponential", params = 0.5)
## ---- warning = FALSE, message = FALSE-----------------------------------
H_exp <- nonnormvar1("Polynomial", means = stcums[1], vars = stcums[2]^2,
skews = stcums[3], skurts = stcums[4],
fifths = stcums[5], sixths = stcums[6], n = 10000,
seed = 1234)
## ------------------------------------------------------------------------
as.matrix(H_exp$constants, nrow = 1, ncol = 6, byrow = TRUE)
## ------------------------------------------------------------------------
as.matrix(round(H_exp$summary_targetcont[, c("Distribution", "mean", "sd",
"skew", "skurtosis", "fifth",
"sixth")], 5), nrow = 1, ncol = 7,
byrow = TRUE)
## ------------------------------------------------------------------------
as.matrix(round(H_exp$summary_continuous[, c("Distribution", "mean", "sd",
"skew", "skurtosis", "fifth",
"sixth")], 5), nrow = 1, ncol = 7,
byrow = TRUE)
## ------------------------------------------------------------------------
H_exp$valid.pdf
## ------------------------------------------------------------------------
y_star <- qexp(1 - 0.05, rate = 0.5) # note that rate = 1/mean
y_star
## ------------------------------------------------------------------------
f_exp <- function(z, c, y) {
return(2 * (c[1] + c[2] * z + c[3] * z^2 + c[4] * z^3 + c[5] * z^4 +
c[6] * z^5) + 2 - y)
}
z_prime <- uniroot(f_exp, interval = c(-1e06, 1e06),
c = as.numeric(H_exp$constants), y = y_star)$root
z_prime
## ------------------------------------------------------------------------
1 - pnorm(z_prime)
## ---- warning = FALSE, message = FALSE-----------------------------------
plot_sim_pdf_theory(sim_y = H_exp$continuous_variable[, 1],
Dist = "Exponential", params = 0.5)
## ---- warning = FALSE, message = FALSE-----------------------------------
plot_sim_cdf(sim_y = H_exp$continuous_variable[, 1], calc_cprob = TRUE,
delta = y_star)
## ---- warning = FALSE, message = FALSE-----------------------------------
as.matrix(t(stats_pdf(c = H_exp$constants[1, ], method = "Polynomial",
alpha = 0.025, mu = stcums[1], sigma = stcums[2])))
|
aaa = aa[1:3, 2:4]
aaa
bbb = aa[9:11, 2:4]
bbb
rbind(aaa, bbb)
rbind(aaa, bbb, aaa, bbb, aaa, bbb)
cbind(aaa, bbb)
cbind(aaa, bbb, aaa)
# 위에서 말했듯 행,열 서로 맞지않으면 에러
aaa = aa[1:5, 2:4]
bbb = aa[9:11, 2:4]
# cbind(aaa, bbb)
aaa = aa[1:3, 1:5]
bbb = aa[9:11, 2:4]
# rbind(aaa, bbb)
table(aa$AWS_ID) # 값의 종류와 갯수 표시
table(aa$AWS_ID, aa$X.) # 행에는 AWS_ID들이 제목으로, 열에는 X. 값들이 열의 제목으로 표시하면서 크로싱된 갯수 표시
aa[2500:3100, "X."] = '+'
table(aa$AWS_ID, aa$X.)
tail(aa)
prop.table(table(aa$AWS_ID)) # 테이블이 표시하는 값을 비율로 변환
prop.table(table(aa$AWS_ID, aa$X.))
prop.table(table(aa$AWS_ID, aa$X.))*100
round(prop.table(table(aa$AWS_ID, aa$X.))*100)
paste0(round(prop.table(table(aa$AWS_ID, aa$X.))*100, 1), '%')
| /06_데이터병합.R | no_license | Junohera/R_Basic | R | false | false | 857 | r | aaa = aa[1:3, 2:4]
aaa
bbb = aa[9:11, 2:4]
bbb
rbind(aaa, bbb)
rbind(aaa, bbb, aaa, bbb, aaa, bbb)
cbind(aaa, bbb)
cbind(aaa, bbb, aaa)
# 위에서 말했듯 행,열 서로 맞지않으면 에러
aaa = aa[1:5, 2:4]
bbb = aa[9:11, 2:4]
# cbind(aaa, bbb)
aaa = aa[1:3, 1:5]
bbb = aa[9:11, 2:4]
# rbind(aaa, bbb)
table(aa$AWS_ID) # 값의 종류와 갯수 표시
table(aa$AWS_ID, aa$X.) # 행에는 AWS_ID들이 제목으로, 열에는 X. 값들이 열의 제목으로 표시하면서 크로싱된 갯수 표시
aa[2500:3100, "X."] = '+'
table(aa$AWS_ID, aa$X.)
tail(aa)
prop.table(table(aa$AWS_ID)) # 테이블이 표시하는 값을 비율로 변환
prop.table(table(aa$AWS_ID, aa$X.))
prop.table(table(aa$AWS_ID, aa$X.))*100
round(prop.table(table(aa$AWS_ID, aa$X.))*100)
paste0(round(prop.table(table(aa$AWS_ID, aa$X.))*100, 1), '%')
|
\name{BTabilities}
\alias{BTabilities}
\title{ estimated abilities in a Bradley-Terry model }
\description{
Extracts the \code{abilities} component from a model object of class \code{\link{BTm}}.
}
\usage{
BTabilities(model)
}
\arguments{
\item{model}{a model object for which \code{inherits(model, "BTm")} is
\code{TRUE}}
}
\value{
A two-column numeric matrix, with columns named "ability" and "se";
one row for each player.
}
\author{ David Firth }
\references{
Firth, D. (2005) Bradley-Terry models in R. \emph{Journal of Statistical
Software}, to appear.
}
\seealso{ \code{\link{BTm}}, \code{\link{BTresiduals}} }
\examples{
data(citations)
origin <- factor(c("UK", "USA", "USA", "UK"))
citeModel2 <- BTm(citations ~ origin)
BTabilities(citeModel2)
}
\keyword{ models }
| /man/BTabilities.Rd | no_license | cran/BradleyTerry | R | false | false | 789 | rd | \name{BTabilities}
\alias{BTabilities}
\title{ estimated abilities in a Bradley-Terry model }
\description{
Extracts the \code{abilities} component from a model object of class \code{\link{BTm}}.
}
\usage{
BTabilities(model)
}
\arguments{
\item{model}{a model object for which \code{inherits(model, "BTm")} is
\code{TRUE}}
}
\value{
A two-column numeric matrix, with columns named "ability" and "se";
one row for each player.
}
\author{ David Firth }
\references{
Firth, D. (2005) Bradley-Terry models in R. \emph{Journal of Statistical
Software}, to appear.
}
\seealso{ \code{\link{BTm}}, \code{\link{BTresiduals}} }
\examples{
data(citations)
origin <- factor(c("UK", "USA", "USA", "UK"))
citeModel2 <- BTm(citations ~ origin)
BTabilities(citeModel2)
}
\keyword{ models }
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/thin_draws.R
\name{thin_draws}
\alias{thin_draws}
\alias{thin}
\alias{thin_draws.draws}
\title{Thin \code{draws} objects}
\usage{
thin_draws(x, thin, ...)
\method{thin_draws}{draws}(x, thin, ...)
}
\arguments{
\item{x}{(draws) A \code{draws} object or another \R object for which the method
is defined.}
\item{thin}{(positive integer) The period for selecting draws.}
\item{...}{Arguments passed to individual methods (if applicable).}
}
\value{
A \code{draws} object of the same class as \code{x}.
}
\description{
Thin \code{\link{draws}} objects to reduce their size and autocorrelation in the chains.
}
\examples{
x <- example_draws()
niterations(x)
x <- thin_draws(x, thin = 5)
niterations(x)
}
| /man/thin_draws.Rd | permissive | brankozajamsek/posterior | R | false | true | 782 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/thin_draws.R
\name{thin_draws}
\alias{thin_draws}
\alias{thin}
\alias{thin_draws.draws}
\title{Thin \code{draws} objects}
\usage{
thin_draws(x, thin, ...)
\method{thin_draws}{draws}(x, thin, ...)
}
\arguments{
\item{x}{(draws) A \code{draws} object or another \R object for which the method
is defined.}
\item{thin}{(positive integer) The period for selecting draws.}
\item{...}{Arguments passed to individual methods (if applicable).}
}
\value{
A \code{draws} object of the same class as \code{x}.
}
\description{
Thin \code{\link{draws}} objects to reduce their size and autocorrelation in the chains.
}
\examples{
x <- example_draws()
niterations(x)
x <- thin_draws(x, thin = 5)
niterations(x)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plots_utils.R
\name{plot_common_pairs}
\alias{plot_common_pairs}
\title{Plot scatterplot Gini corrected vs raw permutation for a group}
\usage{
plot_common_pairs(data, g = c("e", "m"), imp_gini = c("g", "c"))
}
\arguments{
\item{data}{dataframe with selected pairs from \link{select_pairs}}
\item{g}{group: expression or mutation}
\item{imp_gini}{gini or corrected gini}
}
\value{
Plot scatterplot Gini corrected vs raw permutation for expression or mutation data
}
\description{
Plot scatterplot Gini corrected vs raw permutation for a group
}
| /man/plot_common_pairs.Rd | permissive | sbenfatto/PARIS | R | false | true | 625 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plots_utils.R
\name{plot_common_pairs}
\alias{plot_common_pairs}
\title{Plot scatterplot Gini corrected vs raw permutation for a group}
\usage{
plot_common_pairs(data, g = c("e", "m"), imp_gini = c("g", "c"))
}
\arguments{
\item{data}{dataframe with selected pairs from \link{select_pairs}}
\item{g}{group: expression or mutation}
\item{imp_gini}{gini or corrected gini}
}
\value{
Plot scatterplot Gini corrected vs raw permutation for expression or mutation data
}
\description{
Plot scatterplot Gini corrected vs raw permutation for a group
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gen_vec.R
\name{gen_vec}
\alias{gen_vec}
\title{Vector Error Correction Model Input}
\usage{
gen_vec(
data,
p = 2,
exogen = NULL,
s = 2,
r = NULL,
const = NULL,
trend = NULL,
seasonal = NULL,
structural = FALSE,
tvp = FALSE,
sv = FALSE,
fcst = NULL,
iterations = 50000,
burnin = 5000
)
}
\arguments{
\item{data}{a time-series object of endogenous variables.}
\item{p}{an integer vector of the lag order of the series in the (levels) VAR. Thus, the
resulting model's lag will be \eqn{p - 1}. See 'Details'.}
\item{exogen}{an optional time-series object of external regressors.}
\item{s}{an optional integer vector of the lag order of the exogenous variables of the series
in the (levels) VAR. Thus, the resulting model's lag will be \eqn{s - 1}. See 'Details'.}
\item{r}{an integer vector of the cointegration rank. See 'Details'.}
\item{const}{a character specifying whether a constant term enters the error correction
term (\code{"restricted"}) or the non-cointegration term as an \code{"unrestricted"} variable.
If \code{NULL} (default) no constant term will be added.}
\item{trend}{a character specifying whether a trend term enters the error correction
term (\code{"restricted"}) or the non-cointegration term as an \code{"unrestricted"} variable.
If \code{NULL} (default) no constant term will be added.}
\item{seasonal}{a character specifying whether seasonal dummies should be included in the error
correction term (\code{"restricted"}) or in the non-cointegreation term as \code{"unrestricted"}
variables. If \code{NULL} (default) no seasonal terms will be added. The amount of dummy variables
will be automatically detected and depends on the frequency of the time-series object provided
in \code{data}.}
\item{structural}{logical indicating whether data should be prepared for the estimation of a
structural VAR model.}
\item{tvp}{logical indicating whether the model parameters are time varying.}
\item{sv}{logical indicating whether time varying error variances should be estimated by
employing a stochastic volatility algorithm.}
\item{fcst}{integer. Number of observations saved for forecasting evaluation.}
\item{iterations}{an integer of MCMC draws excluding burn-in draws (defaults
to 50000).}
\item{burnin}{an integer of MCMC draws used to initialize the sampler
(defaults to 5000). These draws do not enter the computation of posterior
moments, forecasts etc.}
}
\value{
An object of class \code{'bvecmodel'}, which contains the following elements:
\item{data}{A list of data objects, which can be used for posterior simulation. Element
\code{Y} is a time-series object of dependent variables. Element \code{W} is a timer-series
object of variables in the cointegration term and element \code{X} is a time-series
object of variables that do not enter the cointegration term. Element \code{SUR} contains a
matrix of element \code{X} in its SUR form.}
\item{model}{A list of model specifications.}
}
\description{
\code{gen_vec} produces the input for the estimation of a vector error correction (VEC) model.
}
\details{
The function produces the variable matrices of vector error correction (VEC)
models, which can also include exogenous variables:
\deqn{\Delta y_t = \Pi w_t + \sum_{i=1}^{p-1} \Gamma_{i} \Delta y_{t - i} +
\sum_{i=0}^{s-1} \Upsilon_{i} \Delta x_{t - i} +
C^{UR} d^{UR}_t + u_t,}
where
\eqn{\Delta y_t} is a \eqn{K \times 1} vector of differenced endogenous variables,
\eqn{w_t} is a \eqn{(K + M + N^{R}) \times 1} vector of cointegration variables,
\eqn{\Pi} is a \eqn{K \times (K + M + N^{R})} matrix of cointegration parameters,
\eqn{\Gamma_i} is a \eqn{K \times K} coefficient matrix of endogenous variables,
\eqn{\Delta x_t} is a \eqn{M \times 1} vector of differenced exogenous regressors,
\eqn{\Upsilon_i} is a \eqn{K \times M} coefficient matrix of exogenous regressors,
\eqn{d^{UR}_t} is a \eqn{N \times 1} vector of deterministic terms, and
\eqn{C^{UR}} is a \eqn{K \times N^{UR}} coefficient matrix of deterministic terms
that do not enter the cointegration term.
\eqn{p} is the lag order of endogenous variables and \eqn{s} is the lag
order of exogenous variables of the corresponding VAR model.
\eqn{u_t} is a \eqn{K \times 1} error term.
If an integer vector is provided as argument \code{p}, \code{s} or \code{r}, the function will
produce a distinct model for all possible combinations of those specifications.
If \code{tvp} is \code{TRUE}, the respective coefficients
of the above model are assumed to be time varying. If \code{sv} is \code{TRUE},
the error covariance matrix is assumed to be time varying.
}
\examples{
# Load data
data("e6")
# Generate model data
data <- gen_vec(e6, p = 4, const = "unrestricted", season = "unrestricted")
}
\references{
Lütkepohl, H. (2006). \emph{New introduction to multiple time series analysis} (2nd ed.). Berlin: Springer.
}
| /man/gen_vec.Rd | no_license | franzmohr/bvartools | R | false | true | 4,945 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gen_vec.R
\name{gen_vec}
\alias{gen_vec}
\title{Vector Error Correction Model Input}
\usage{
gen_vec(
data,
p = 2,
exogen = NULL,
s = 2,
r = NULL,
const = NULL,
trend = NULL,
seasonal = NULL,
structural = FALSE,
tvp = FALSE,
sv = FALSE,
fcst = NULL,
iterations = 50000,
burnin = 5000
)
}
\arguments{
\item{data}{a time-series object of endogenous variables.}
\item{p}{an integer vector of the lag order of the series in the (levels) VAR. Thus, the
resulting model's lag will be \eqn{p - 1}. See 'Details'.}
\item{exogen}{an optional time-series object of external regressors.}
\item{s}{an optional integer vector of the lag order of the exogenous variables of the series
in the (levels) VAR. Thus, the resulting model's lag will be \eqn{s - 1}. See 'Details'.}
\item{r}{an integer vector of the cointegration rank. See 'Details'.}
\item{const}{a character specifying whether a constant term enters the error correction
term (\code{"restricted"}) or the non-cointegration term as an \code{"unrestricted"} variable.
If \code{NULL} (default) no constant term will be added.}
\item{trend}{a character specifying whether a trend term enters the error correction
term (\code{"restricted"}) or the non-cointegration term as an \code{"unrestricted"} variable.
If \code{NULL} (default) no constant term will be added.}
\item{seasonal}{a character specifying whether seasonal dummies should be included in the error
correction term (\code{"restricted"}) or in the non-cointegreation term as \code{"unrestricted"}
variables. If \code{NULL} (default) no seasonal terms will be added. The amount of dummy variables
will be automatically detected and depends on the frequency of the time-series object provided
in \code{data}.}
\item{structural}{logical indicating whether data should be prepared for the estimation of a
structural VAR model.}
\item{tvp}{logical indicating whether the model parameters are time varying.}
\item{sv}{logical indicating whether time varying error variances should be estimated by
employing a stochastic volatility algorithm.}
\item{fcst}{integer. Number of observations saved for forecasting evaluation.}
\item{iterations}{an integer of MCMC draws excluding burn-in draws (defaults
to 50000).}
\item{burnin}{an integer of MCMC draws used to initialize the sampler
(defaults to 5000). These draws do not enter the computation of posterior
moments, forecasts etc.}
}
\value{
An object of class \code{'bvecmodel'}, which contains the following elements:
\item{data}{A list of data objects, which can be used for posterior simulation. Element
\code{Y} is a time-series object of dependent variables. Element \code{W} is a timer-series
object of variables in the cointegration term and element \code{X} is a time-series
object of variables that do not enter the cointegration term. Element \code{SUR} contains a
matrix of element \code{X} in its SUR form.}
\item{model}{A list of model specifications.}
}
\description{
\code{gen_vec} produces the input for the estimation of a vector error correction (VEC) model.
}
\details{
The function produces the variable matrices of vector error correction (VEC)
models, which can also include exogenous variables:
\deqn{\Delta y_t = \Pi w_t + \sum_{i=1}^{p-1} \Gamma_{i} \Delta y_{t - i} +
\sum_{i=0}^{s-1} \Upsilon_{i} \Delta x_{t - i} +
C^{UR} d^{UR}_t + u_t,}
where
\eqn{\Delta y_t} is a \eqn{K \times 1} vector of differenced endogenous variables,
\eqn{w_t} is a \eqn{(K + M + N^{R}) \times 1} vector of cointegration variables,
\eqn{\Pi} is a \eqn{K \times (K + M + N^{R})} matrix of cointegration parameters,
\eqn{\Gamma_i} is a \eqn{K \times K} coefficient matrix of endogenous variables,
\eqn{\Delta x_t} is a \eqn{M \times 1} vector of differenced exogenous regressors,
\eqn{\Upsilon_i} is a \eqn{K \times M} coefficient matrix of exogenous regressors,
\eqn{d^{UR}_t} is a \eqn{N \times 1} vector of deterministic terms, and
\eqn{C^{UR}} is a \eqn{K \times N^{UR}} coefficient matrix of deterministic terms
that do not enter the cointegration term.
\eqn{p} is the lag order of endogenous variables and \eqn{s} is the lag
order of exogenous variables of the corresponding VAR model.
\eqn{u_t} is a \eqn{K \times 1} error term.
If an integer vector is provided as argument \code{p}, \code{s} or \code{r}, the function will
produce a distinct model for all possible combinations of those specifications.
If \code{tvp} is \code{TRUE}, the respective coefficients
of the above model are assumed to be time varying. If \code{sv} is \code{TRUE},
the error covariance matrix is assumed to be time varying.
}
\examples{
# Load data
data("e6")
# Generate model data
data <- gen_vec(e6, p = 4, const = "unrestricted", season = "unrestricted")
}
\references{
Lütkepohl, H. (2006). \emph{New introduction to multiple time series analysis} (2nd ed.). Berlin: Springer.
}
|
## ----------- set_if_null -----------
### from Altools
set_if_null <- function(null_arg=NULL, default_value) {
return(
if (is.null(null_arg))
default_value
else
null_arg
)
}
## ----------- suffix_colnames -----------
## add suffix to column names for integration
suffix_colnames <- function(x, indices=1:2){
for (j in indices) {
suff <- names(x)[j]
colnames(x[[j]]) <- paste0(colnames(x[[j]]), '_' ,suff)
}
x
}
## ----------- all_identical -----------
## check if elements in a list are identical
all_identical <- function(lst) {
for (j in seq_along(lst[-1])) {
if (!identical(lst[[j]], lst[[j+1]]))
stop(sprintf("not identical elements: %s and %s",j , j+1 ), call. = FALSE)
}
TRUE
}
## ----------- named_list -----------
## create a named list
named_list <- function(char) {
out <- as.list(char)
names(out) <- char
out
}
## ----------- impute_by_mean -----------
## impute NAs as mean of columns
impute_by_means <- function(x) {
apply(x, 2, function(y){
y[is.na(y)] <- mean(y, na.rm=TRUE)
y
})
}
## ----------- ggplot color hue -----------
gg_color_hue <- function(n) {
hues = seq(15, 375, length = n + 1)
grDevices::hcl(h = hues, l = 65, c = 100)[1:n]
}
## ----------- histogram of values using ggplot -----------
hist_values <- function(values, fill_col = 1, ...) {
df <- data.frame(values = values)
ggplot(df) + geom_histogram(aes(values), bins = 50, fill = gg_color_hue(30)[fill_col %% 30 + 1]) + theme_bw() +
labs(...)
}
## ----------- return sce with sorted top HVGs -----------
get_hvgs_sce <- function(sce = NULL,
n_genes = NULL, ## No. of genes, NULL for all genes ordered by variation
use_bio=TRUE ## choose based on biological variance or total variance?
){
## -- decompose variance
require(scran)
fit <- trendVar(x = sce, use.spikes=FALSE)
sce <- decomposeVar(x = sce, fit = fit, use.spikes=FALSE)
## -- order genes based on variance
gene_order <- ifelse(use_bio, order(-sce$bio), order(-sce$total))
## -- subset genes (if asked)
if(is.null(n_genes)) {
n_genes <- dim(sce)[1]
}
keep_genes <- gene_order[seq_len(n_genes)]
## -- subset sce
sce <- sce[keep_genes,]
return(sce)
}
## ----------- create design matrix for MAE -----------
create_design <- function(mae, off_diag = 0.5, rna_only = FALSE) {
if (rna_only) {
design = matrix(0, ncol = length(mae), nrow = length(mae),
dimnames = list(names(mae), names(mae)))
design[1,] <- design[,1] <- off_diag
} else {
design = matrix(off_diag, ncol = length(mae), nrow = length(mae),
dimnames = list(names(mae), names(mae)))
}
diag(design) = 0
design
}
## ----------- create repeated keepX for MAE -----------
create_keepX <- function(mae, keepX) {
rep(list(keepX), length(mae)) %>% set_names(names(mae))
}
## ----------- subset MAE -----------
subset_mae <- function(mae, n=NULL, p=NULL, SEED=42) {
for (i in seq_along(experiments(mae))) {
feats <- min(dim( mae[[i]])[1], p)
mae[[i]] <- mae[[i]][seq_len(feats),]
}
n_cells <- dim(mae[[1]])[2]
set.seed(SEED)
mae[,sample(x = seq_len(n_cells), size = n),]
}
## for subsetting the P x N data on local computer
subset_pn <- function(mat, n=NULL, p=NULL) {
`%||%` <- purrr::`%||%`
p <- p %||% dim(mat)[2]
n <- n %||% dim(mat)[1]
p <- min(p, dim(mat)[2])
n <- min(n, dim(mat)[1])
mat[seq_len(n), seq_len(p)]
} | /src/utils/utils-misc.R | no_license | aljabadi/scNMT_seq_gastrulation | R | false | false | 3,518 | r | ## ----------- set_if_null -----------
### from Altools
set_if_null <- function(null_arg=NULL, default_value) {
return(
if (is.null(null_arg))
default_value
else
null_arg
)
}
## ----------- suffix_colnames -----------
## add suffix to column names for integration
suffix_colnames <- function(x, indices=1:2){
for (j in indices) {
suff <- names(x)[j]
colnames(x[[j]]) <- paste0(colnames(x[[j]]), '_' ,suff)
}
x
}
## ----------- all_identical -----------
## check if elements in a list are identical
all_identical <- function(lst) {
for (j in seq_along(lst[-1])) {
if (!identical(lst[[j]], lst[[j+1]]))
stop(sprintf("not identical elements: %s and %s",j , j+1 ), call. = FALSE)
}
TRUE
}
## ----------- named_list -----------
## create a named list
named_list <- function(char) {
out <- as.list(char)
names(out) <- char
out
}
## ----------- impute_by_mean -----------
## impute NAs as mean of columns
impute_by_means <- function(x) {
apply(x, 2, function(y){
y[is.na(y)] <- mean(y, na.rm=TRUE)
y
})
}
## ----------- ggplot color hue -----------
gg_color_hue <- function(n) {
hues = seq(15, 375, length = n + 1)
grDevices::hcl(h = hues, l = 65, c = 100)[1:n]
}
## ----------- histogram of values using ggplot -----------
hist_values <- function(values, fill_col = 1, ...) {
df <- data.frame(values = values)
ggplot(df) + geom_histogram(aes(values), bins = 50, fill = gg_color_hue(30)[fill_col %% 30 + 1]) + theme_bw() +
labs(...)
}
## ----------- return sce with sorted top HVGs -----------
get_hvgs_sce <- function(sce = NULL,
n_genes = NULL, ## No. of genes, NULL for all genes ordered by variation
use_bio=TRUE ## choose based on biological variance or total variance?
){
## -- decompose variance
require(scran)
fit <- trendVar(x = sce, use.spikes=FALSE)
sce <- decomposeVar(x = sce, fit = fit, use.spikes=FALSE)
## -- order genes based on variance
gene_order <- ifelse(use_bio, order(-sce$bio), order(-sce$total))
## -- subset genes (if asked)
if(is.null(n_genes)) {
n_genes <- dim(sce)[1]
}
keep_genes <- gene_order[seq_len(n_genes)]
## -- subset sce
sce <- sce[keep_genes,]
return(sce)
}
## ----------- create design matrix for MAE -----------
create_design <- function(mae, off_diag = 0.5, rna_only = FALSE) {
if (rna_only) {
design = matrix(0, ncol = length(mae), nrow = length(mae),
dimnames = list(names(mae), names(mae)))
design[1,] <- design[,1] <- off_diag
} else {
design = matrix(off_diag, ncol = length(mae), nrow = length(mae),
dimnames = list(names(mae), names(mae)))
}
diag(design) = 0
design
}
## ----------- create repeated keepX for MAE -----------
create_keepX <- function(mae, keepX) {
rep(list(keepX), length(mae)) %>% set_names(names(mae))
}
## ----------- subset MAE -----------
subset_mae <- function(mae, n=NULL, p=NULL, SEED=42) {
for (i in seq_along(experiments(mae))) {
feats <- min(dim( mae[[i]])[1], p)
mae[[i]] <- mae[[i]][seq_len(feats),]
}
n_cells <- dim(mae[[1]])[2]
set.seed(SEED)
mae[,sample(x = seq_len(n_cells), size = n),]
}
## for subsetting the P x N data on local computer
subset_pn <- function(mat, n=NULL, p=NULL) {
`%||%` <- purrr::`%||%`
p <- p %||% dim(mat)[2]
n <- n %||% dim(mat)[1]
p <- min(p, dim(mat)[2])
n <- min(n, dim(mat)[1])
mat[seq_len(n), seq_len(p)]
} |
# ------------------------------------------------------------------------------
# rbundle()
test_that("can create an empty rbundle()", {
x <- rbundle()
expect_s3_class(x, "rbundle")
expect_identical(x$rschedules, list())
expect_identical(x$rdates, new_date())
expect_identical(x$exdates, new_date())
})
test_that("rbundle() generates informative output", {
verify_output(test_path("output", "test-rbundle.txt"), {
"# Empty rbundle"
rbundle()
})
})
test_that("can detect rbundles", {
expect_true(is_rbundle(rbundle()))
expect_false(is_rbundle(1))
})
test_that("rbundle works with non-rrules in the bundle", {
rrule1 <- daily(since = "1970-01-01", until = "1970-01-02")
rrule2 <- daily(since = "1970-01-03", until = "1970-01-04")
rb <- rbundle() %>%
add_rschedule(rrule1)
rb2 <- rbundle() %>%
add_rschedule(rb) %>%
add_rschedule(rrule2)
expect_identical(alma_events(rb2), new_date(c(0, 1, 2, 3)))
})
test_that("rbundle rdates work with non-rrules in the bundle", {
rrule1 <- daily(since = "1970-01-01", until = "1970-01-02")
rrule2 <- daily(since = "1970-01-03", until = "1970-01-04")
rdate <- "1970-01-05"
rb <- rbundle() %>%
add_rschedule(rrule1)
rb2 <- rbundle() %>%
add_rschedule(rb) %>%
add_rschedule(rrule2) %>%
add_rdates(rdate)
expect_identical(alma_events(rb2), new_date(c(0, 1, 2, 3, 4)))
})
test_that("rbundle exdates work with non-rrules in the bundle", {
rrule1 <- daily(since = "1970-01-01", until = "1970-01-02")
rrule2 <- daily(since = "1970-01-03", until = "1970-01-04")
exdate <- "1970-01-04"
rb <- rbundle() %>%
add_rschedule(rrule1)
rb2 <- rbundle() %>%
add_rschedule(rb) %>%
add_rschedule(rrule2) %>%
add_exdates(exdate)
expect_identical(alma_events(rb2), new_date(c(0, 1, 2)))
})
test_that("rbundle exdates work with all rrules in the bundle", {
rrule1 <- daily(since = "1970-01-03", until = "1970-01-04")
exdate <- "1970-01-04"
rb <- rbundle() %>%
add_rschedule(rrule1) %>%
add_exdates(exdate)
expect_identical(alma_events(rb), new_date(2))
})
# ------------------------------------------------------------------------------
# new_rbundle()
test_that("can construct a new rbundle", {
expect_s3_class(new_rbundle(), c("rbundle", "rschedule"))
})
test_that("validates rschedules", {
expect_error(new_rbundle(1), "must be a list")
expect_error(new_rbundle(list(1)), "`rschedules\\[\\[1\\]\\]`")
})
test_that("validates rdates", {
expect_error(new_rbundle(rdates = 1), "must be a Date")
expect_error(new_rbundle(rdates = almanac_global_inf_date), "must be finite")
expect_error(new_rbundle(rdates = almanac_global_neg_inf_date), "must be finite")
expect_error(new_rbundle(rdates = almanac_global_na_date), "must be finite")
})
test_that("validates exdates", {
expect_error(new_rbundle(exdates = 1), "must be a Date")
expect_error(new_rbundle(exdates = almanac_global_inf_date), "must be finite")
expect_error(new_rbundle(exdates = almanac_global_neg_inf_date), "must be finite")
expect_error(new_rbundle(exdates = almanac_global_na_date), "must be finite")
})
test_that("validates date bounds", {
expect_error(new_rbundle(rdates = almanac_global_min_date - 1), class = "almanac_error_date_below_minimum")
expect_error(new_rbundle(rdates = almanac_global_max_date + 1), class = "almanac_error_date_above_maximum")
expect_error(new_rbundle(exdates = almanac_global_min_date - 1), class = "almanac_error_date_below_minimum")
expect_error(new_rbundle(exdates = almanac_global_max_date + 1), class = "almanac_error_date_above_maximum")
})
test_that("can subclass rbundle", {
x <- new_rbundle(foo = 1, class = "rsubclass")
expect_s3_class(x, c("rsubclass", "rbundle", "rschedule"), exact = TRUE)
expect_identical(x$foo, 1)
})
test_that("`...` must be named", {
expect_error(
new_rbundle(rschedules = list(), rdates = new_date(), exdates = new_date(), 1),
"must be named"
)
})
# ------------------------------------------------------------------------------
# rbundle_restore()
test_that("rbundle_restore() gives developers a way to restore to `to`", {
x <- new_rbundle()
to <- new_rsubclass()
# By default, no restore
result <- rbundle_restore(x, to)
expect_s3_class(result, c("rbundle", "rschedule"), exact = TRUE)
expect_null(result$foo)
# Register `rbundle_restore()` method
local_rsubclass()
# Now class and attributes are restored
result <- rbundle_restore(x, to)
expect_s3_class(result, c("rsubclass", "rbundle", "rschedule"), exact = TRUE)
expect_identical(result$foo, numeric())
})
test_that("add_rschedule() uses rbundle_restore()", {
rschedule <- daily()
x <- new_rsubclass()
local_rsubclass()
result <- add_rschedule(x, rschedule = rschedule)
expect_s3_class(result, class(x), exact = TRUE)
expect_identical(result$foo, numeric())
expect_identical(result$rschedules, list(rschedule))
})
test_that("add_rdates() uses rbundle_restore()", {
rdate <- as.Date("2019-01-01")
x <- new_rsubclass()
local_rsubclass()
result <- add_rdates(x, rdates = rdate)
expect_s3_class(result, class(x), exact = TRUE)
expect_identical(result$foo, numeric())
expect_identical(result$rdates, rdate)
})
test_that("add_exdates() uses rbundle_restore()", {
exdate <- as.Date("2019-01-01")
x <- new_rsubclass()
local_rsubclass()
result <- add_exdates(x, exdates = exdate)
expect_s3_class(result, class(x), exact = TRUE)
expect_identical(result$foo, numeric())
expect_identical(result$exdates, exdate)
})
| /tests/testthat/test-rbundle.R | permissive | jayhesselberth/almanac | R | false | false | 5,577 | r | # ------------------------------------------------------------------------------
# rbundle()
test_that("can create an empty rbundle()", {
x <- rbundle()
expect_s3_class(x, "rbundle")
expect_identical(x$rschedules, list())
expect_identical(x$rdates, new_date())
expect_identical(x$exdates, new_date())
})
test_that("rbundle() generates informative output", {
verify_output(test_path("output", "test-rbundle.txt"), {
"# Empty rbundle"
rbundle()
})
})
test_that("can detect rbundles", {
expect_true(is_rbundle(rbundle()))
expect_false(is_rbundle(1))
})
test_that("rbundle works with non-rrules in the bundle", {
rrule1 <- daily(since = "1970-01-01", until = "1970-01-02")
rrule2 <- daily(since = "1970-01-03", until = "1970-01-04")
rb <- rbundle() %>%
add_rschedule(rrule1)
rb2 <- rbundle() %>%
add_rschedule(rb) %>%
add_rschedule(rrule2)
expect_identical(alma_events(rb2), new_date(c(0, 1, 2, 3)))
})
test_that("rbundle rdates work with non-rrules in the bundle", {
rrule1 <- daily(since = "1970-01-01", until = "1970-01-02")
rrule2 <- daily(since = "1970-01-03", until = "1970-01-04")
rdate <- "1970-01-05"
rb <- rbundle() %>%
add_rschedule(rrule1)
rb2 <- rbundle() %>%
add_rschedule(rb) %>%
add_rschedule(rrule2) %>%
add_rdates(rdate)
expect_identical(alma_events(rb2), new_date(c(0, 1, 2, 3, 4)))
})
test_that("rbundle exdates work with non-rrules in the bundle", {
rrule1 <- daily(since = "1970-01-01", until = "1970-01-02")
rrule2 <- daily(since = "1970-01-03", until = "1970-01-04")
exdate <- "1970-01-04"
rb <- rbundle() %>%
add_rschedule(rrule1)
rb2 <- rbundle() %>%
add_rschedule(rb) %>%
add_rschedule(rrule2) %>%
add_exdates(exdate)
expect_identical(alma_events(rb2), new_date(c(0, 1, 2)))
})
test_that("rbundle exdates work with all rrules in the bundle", {
rrule1 <- daily(since = "1970-01-03", until = "1970-01-04")
exdate <- "1970-01-04"
rb <- rbundle() %>%
add_rschedule(rrule1) %>%
add_exdates(exdate)
expect_identical(alma_events(rb), new_date(2))
})
# ------------------------------------------------------------------------------
# new_rbundle()
test_that("can construct a new rbundle", {
expect_s3_class(new_rbundle(), c("rbundle", "rschedule"))
})
test_that("validates rschedules", {
expect_error(new_rbundle(1), "must be a list")
expect_error(new_rbundle(list(1)), "`rschedules\\[\\[1\\]\\]`")
})
test_that("validates rdates", {
expect_error(new_rbundle(rdates = 1), "must be a Date")
expect_error(new_rbundle(rdates = almanac_global_inf_date), "must be finite")
expect_error(new_rbundle(rdates = almanac_global_neg_inf_date), "must be finite")
expect_error(new_rbundle(rdates = almanac_global_na_date), "must be finite")
})
test_that("validates exdates", {
expect_error(new_rbundle(exdates = 1), "must be a Date")
expect_error(new_rbundle(exdates = almanac_global_inf_date), "must be finite")
expect_error(new_rbundle(exdates = almanac_global_neg_inf_date), "must be finite")
expect_error(new_rbundle(exdates = almanac_global_na_date), "must be finite")
})
test_that("validates date bounds", {
expect_error(new_rbundle(rdates = almanac_global_min_date - 1), class = "almanac_error_date_below_minimum")
expect_error(new_rbundle(rdates = almanac_global_max_date + 1), class = "almanac_error_date_above_maximum")
expect_error(new_rbundle(exdates = almanac_global_min_date - 1), class = "almanac_error_date_below_minimum")
expect_error(new_rbundle(exdates = almanac_global_max_date + 1), class = "almanac_error_date_above_maximum")
})
test_that("can subclass rbundle", {
x <- new_rbundle(foo = 1, class = "rsubclass")
expect_s3_class(x, c("rsubclass", "rbundle", "rschedule"), exact = TRUE)
expect_identical(x$foo, 1)
})
test_that("`...` must be named", {
expect_error(
new_rbundle(rschedules = list(), rdates = new_date(), exdates = new_date(), 1),
"must be named"
)
})
# ------------------------------------------------------------------------------
# rbundle_restore()
test_that("rbundle_restore() gives developers a way to restore to `to`", {
x <- new_rbundle()
to <- new_rsubclass()
# By default, no restore
result <- rbundle_restore(x, to)
expect_s3_class(result, c("rbundle", "rschedule"), exact = TRUE)
expect_null(result$foo)
# Register `rbundle_restore()` method
local_rsubclass()
# Now class and attributes are restored
result <- rbundle_restore(x, to)
expect_s3_class(result, c("rsubclass", "rbundle", "rschedule"), exact = TRUE)
expect_identical(result$foo, numeric())
})
test_that("add_rschedule() uses rbundle_restore()", {
rschedule <- daily()
x <- new_rsubclass()
local_rsubclass()
result <- add_rschedule(x, rschedule = rschedule)
expect_s3_class(result, class(x), exact = TRUE)
expect_identical(result$foo, numeric())
expect_identical(result$rschedules, list(rschedule))
})
test_that("add_rdates() uses rbundle_restore()", {
rdate <- as.Date("2019-01-01")
x <- new_rsubclass()
local_rsubclass()
result <- add_rdates(x, rdates = rdate)
expect_s3_class(result, class(x), exact = TRUE)
expect_identical(result$foo, numeric())
expect_identical(result$rdates, rdate)
})
test_that("add_exdates() uses rbundle_restore()", {
exdate <- as.Date("2019-01-01")
x <- new_rsubclass()
local_rsubclass()
result <- add_exdates(x, exdates = exdate)
expect_s3_class(result, class(x), exact = TRUE)
expect_identical(result$foo, numeric())
expect_identical(result$exdates, exdate)
})
|
#The reason that I wish to do this topic is becuase recently more and more school shootings begin
#to exist. It happened without any preparation and many students and faculties have been killed
#by the shooter. From my perspective, it is an important social issue that everyone need to pay
#attention to in order to have a stable society in the future.
#Word Cloud for real time data
#Bar plot The Top 10 words in the word cloud
#Sentiment Analysis and Pie Chart for real time data
#Comparison Word cloud for data from 2012 to now
#Comparison between top 10 words in teh word cloud from 2012 to now
#Time Series on video related with school shootings
library(tuber)
library(dplyr)
library(tidytext)
library(stringr)
library(lubridate)
library(dplyr)
library(tm)
library(SnowballC)
library(wordcloud)
library(RColorBrewer)
library(plotrix)
library(reshape)
#clientID <- "979385012952-iive5jqvebq2emfhrnld66iflt1rq1t8.apps.googleusercontent.com"
#client_secrete <- "F5n5Sxt2PXkeeUnnI89mJGo5"
#yt_oauth(clientID, client_secrete, token = '')
#Collecting Real Time Data for comments
#Selecting abc, CNN, NBC, fox and CBS channel's school shootings' news with the most click.
#comments_abc <- get_all_comments(video_id = c('xgIJosk0pnA'))
#comments_cnn <- get_all_comments(video_id = c('yW61tS8H66E'))
#comments_NBC <-get_all_comments(video_id = c('3F_vdCHlDds'))
#comments_fox <-get_all_comments(video_id = c('ZQcaYYHqrUY'))
#comments_cbs <-get_all_comments(video_id = c('npnH0_natfs'))
#all_comments <- rbind(comments_abc, comments_cnn, comments_NBC, comments_fox, comments_cbs)
#real_time_data <- data.frame(all_comments$authorDisplayName,
# all_comments$textOriginal,
# all_comments$likeCount,
# all_comments$publishedAt)
#clean real_time_data
#real_time_data[,4]<-str_split_fixed(real_time_data$all_comments.publishedAt, "T", 2)[,1]
#real_time_data[,4] <- ymd(real_time_data[,4])
#For real time data purpose, only collect data after 2018/04/011
#real <- real_time_data %>%
# filter(all_comments.publishedAt >= as.Date("2018-04-1") )
#real<- data.frame(real$all_comments.textOriginal)
#write.csv(real, file = "real.csv")
#real_time_data <- real_time_data %>%select(all_comments.authorDisplayName,
# all_comments.likeCount,
# all_comments.textOriginal,
# all_comments.publishedAt) %>%
# filter(all_comments.publishedAt >= as.Date("2018-04-1"))
#write.csv(real_time_data, file="real_time_data.csv")
#Word Cloud
real_time_data <- read.csv("real_time_data.csv", header=TRUE, stringsAsFactors = FALSE, fileEncoding = "latin1")
real_time_data_comments <- Corpus(VectorSource(real_time_data$all_comments.textOriginal))
#text cleaning
#real_time_data_comments[[1]][1], test the text cleaning
real_time_data_comments <- tm_map(real_time_data_comments, removeNumbers)
real_time_data_comments <- tm_map(real_time_data_comments, removeWords, stopwords("english"))
real_time_data_comments <- tm_map(real_time_data_comments, removePunctuation )
real_time_data_comments <- tm_map(real_time_data_comments, content_transformer(tolower))
real_time_data_comments <- tm_map(real_time_data_comments, removeWords, c("this","so", "will", "can",
"why","said","get","really",
"just", "like","the","one",
"even", "you", "know","say",
"think", "dont", "people",
"school", "guns"))
real_time_data_comments <- tm_map(real_time_data_comments, stripWhitespace)
real_time_data_comments <- TermDocumentMatrix(real_time_data_comments)
real_time_data_comments <- as.matrix(real_time_data_comments)
real_time_data_comments <- sort(rowSums(real_time_data_comments), decreasing=TRUE)
real_time_data_comments <- data.frame(word=names(real_time_data_comments),
freq=real_time_data_comments)
wordcloud(real_time_data_comments$word, real_time_data_comments$freq, random.order = FALSE,
rot.per = 0.3, scale = c(4,.5), max.words=800, colors = rainbow(5),
vfont= c ( "sans serif" , "plain" ), shape="circle")
#According to this word cloud, people basically commented through three aspects.
#The main perspective is an intense emotion toward the shooting, which they talk about the words like "gun" , "shooting" and " kill" a lot.
# gun. Besides that,they also express the sympathy to the children with words like "bad" and "sad".
# The last aspect is judgemental emotions towards the shooter and their attitude toward these tragedies, which they talked about "judge", "police", and "shooter".
# This frequency of words is predictable becuase expect the video that is the news, the video with most clikc in the CNN is parents and teachers
#push for change after school shooting, so that is why the comments metioned a lot of gun and gun control
# show the top 10 words in the word cloud and their frequency
real_high_frequency <- head(real_time_data_comments, 10)
barplot(real_high_frequency$freq, width=1,space= 0.5,
names.arg = real_high_frequency$word,
col =rainbow(20), main ="Most frequent words",
ylab = "Word frequencies",cex.names = 0.8)
#In order for more clear undersing about the word cloud, I did a barplot to directly show the frequency of the top 10 words.
#From this bar plot we can notice that besides any emotional words, the gun has been mentioned the most
#which indicates that people are begin to pay attention on the gun using.
#Sentiment Analysis
RTD_analysis <- str_split(real_time_data$all_comments.textOriginal, pattern = "\\s+")
RTD_analysis <-unlist(RTD_analysis)
RTD_analysis <- data_frame(line = 1:45256, text = RTD_analysis)
RTD_analysis <- RTD_analysis %>%
unnest_tokens(word, text)
bing_positive <- get_sentiments("bing") %>%
filter(sentiment == "positive")
bing_negative <- get_sentiments("bing") %>%
filter(sentiment == "negative")
#positive and negative & anger, anticipation and joy
positive <- RTD_analysis %>%
inner_join(bing_positive)%>%
count(word, sentiment,sort = TRUE)
negative <- RTD_analysis %>%
inner_join(bing_negative)%>%
count(word, sentiment, sort = TRUE)
anger <-get_sentiments("nrc") %>%
filter(sentiment == "anger")
anticipation <-get_sentiments("nrc") %>%
filter(sentiment == "anticipation")
joy <- get_sentiments("nrc") %>%
filter(sentiment == "joy")
RTD_anger<-RTD_analysis %>%
inner_join(anger) %>%
count(word, sentiment, sort = TRUE)
RTD_anticipation<-RTD_analysis %>%
inner_join(anticipation) %>%
count(word, sentiment,sort = TRUE)
RTD_joy<-RTD_analysis %>%
inner_join(joy) %>%
count(word, sentiment, sort = TRUE)
#Bar Plot for sentimental analysis
slices <- c(anticipation_total <- sum(RTD_anticipation$n), anger_total <- sum(RTD_anger$n)
, joy_total <- sum(RTD_joy$n))
lbls <- c("anticipation", "anger", "joy")
slices <- c(1000, 1881, 706)
pct <- round(slices/sum(slices)*100)
lbls <- paste(lbls, pct)
lbls <- paste(lbls,"%",sep="")
pie3D(slices,labels=lbls,explode=0.1,
main="Real Time: Pie Chart of sentiment analysis")
#although the postive word "like" has been mentioned 200 times, but "like" in this content
#doesn't really have any meaning. we could conclude that, as we expected, most comments are
#negative and intense. We could also conclude from the pie chart that anger has occupy a large proption of the comments
#However, one thing that needs to notice is that people's anticipations
#or suggestions on the current situation have not been mentioned a lot in the comment
#but this might attribute to that the word "gun", which implies the gun control is include in the anger sentiment.
#Historical Data: also collect the comments from abc, CNN, NBC and CBS channel's school shoting news with the most clike
#however, since the comments are not enough, I also collect comments from other school shooting video with high clike rate.
#his_comments_abc <- get_all_comments(video_id = c('aRHcbJ9DHEg'))
#his_comments_abc2 <- get_all_comments(video_id = c('6X7cVDxYd6A')), if not enough, add
#his_comments_cnn <- get_all_comments(video_id = c('nPpEpNk519U'))
#his_comments_nbc <- get_all_comments(video_id = c('icVShYaYxEM'))
#his_comments_fox <- get_all_comments(video_id = c('-6RhIU5Dreo'))
#his_comments_cbs <- get_all_comments(video_id = c('-9XGNIKW-uM'))
#his_comments_random <- get_all_comments(video_id = c('UeO5QTAryNE'))
#his_all_comments <- rbind(his_comments_abc, his_comments_cnn, his_comments_nbc,
# his_comments_fox, his_comments_cbs,his_comments_random)
#his_data <- data.frame(his_all_comments$authorDisplayName,
# his_all_comments$textOriginal,
# his_all_comments$likeCount,
# his_all_comments$publishedAt)
#clean historical data
#his_data[,4]<-str_split_fixed(his_data$all_comments.publishedAt, "T", 2)[,1]
#his_data[,4] <- ymd(his_data[,4])
#For historical data purpose, only collect data between 2012/01/1 and 2017/12/31
#his_data <- his_data %>%
# filter(his_all_comments.publishedAt >= as.Date("2012-01-1") &
# his_all_comments.publishedAt<= as.Date("2017-12-31") )
#his_data_2012 <- his_data %>%
# filter(his_all_comments.publishedAt >= as.Date("2012-01-1") &
# his_all_comments.publishedAt<= as.Date("2012-12-31") )
#his_data_2012<- data.frame(his_data_2012$his_all_comments.textOriginal)
#his_data_2013 <- his_data %>%
# filter(his_all_comments.publishedAt >= as.Date("2013-01-1") &
# his_all_comments.publishedAt<= as.Date("2013-12-31") )
#his_data_2013<- data.frame(his_data_2013$his_all_comments.textOriginal)
#his_data_2014 <- his_data %>%
# filter(his_all_comments.publishedAt >= as.Date("2014-01-1") &
# his_all_comments.publishedAt<= as.Date("2014-12-31") )
#his_data_2014<- data.frame(his_data_2014$his_all_comments.textOriginal)
#his_data_2015 <- his_data %>%
# filter(his_all_comments.publishedAt >= as.Date("2015-01-1") &
# his_all_comments.publishedAt<= as.Date("2015-12-31") )
#his_data_2015<- data.frame(his_data_2015$his_all_comments.textOriginal)
#his_data_2016 <- his_data %>%
# filter(his_all_comments.publishedAt >= as.Date("2016-01-1") &
# his_all_comments.publishedAt<= as.Date("2016-12-31") )
#his_data_2016<- data.frame(his_data_2016$his_all_comments.textOriginal)
#his_data_2017 <- his_data %>%
# filter(his_all_comments.publishedAt >= as.Date("2017-01-1") &
# his_all_comments.publishedAt<= as.Date("2017-12-31") )
#his_data_2017<- data.frame(his_data_2017$his_all_comments.textOriginal)
#write.csv(his_data, file="his_data.csv")
#write.csv(his_data_2012, file="his_data_2012.csv")
#write.csv(his_data_2013, file="his_data_2013.csv")
#write.csv(his_data_2014, file="his_data_2014.csv")
#write.csv(his_data_2015, file="his_data_2015.csv")
#write.csv(his_data_2016, file="his_data_2016.csv")
#write.csv(his_data_2017, file="his_data_2017.csv")
real_time <- read.csv("real.csv", header=TRUE, stringsAsFactors = FALSE, fileEncoding = "latin1")
real_time_comments <- Corpus(VectorSource(real_time))
his_data_2012 <- read.csv("his_data_2012.csv", header=TRUE, stringsAsFactors = FALSE, fileEncoding = "latin1")
his_data_comments_2012 <-Corpus(VectorSource(his_data_2012))
his_data_2013 <- read.csv("his_data_2013.csv", header=TRUE, stringsAsFactors = FALSE, fileEncoding = "latin1")
his_data_comments_2013 <-Corpus(VectorSource(his_data_2013))
his_data_2014 <- read.csv("his_data_2014.csv", header=TRUE, stringsAsFactors = FALSE, fileEncoding = "latin1")
his_data_comments_2014 <-Corpus(VectorSource(his_data_2014))
his_data_2015 <- read.csv("his_data_2015.csv", header=TRUE, stringsAsFactors = FALSE, fileEncoding = "latin1")
his_data_comments_2015 <-Corpus(VectorSource(his_data_2015))
his_data_2016 <- read.csv("his_data_2016.csv", header=TRUE, stringsAsFactors = FALSE, fileEncoding = "latin1")
his_data_comments_2016 <-Corpus(VectorSource(his_data_2016))
his_data_2017 <- read.csv("his_data_2017.csv", header=TRUE, stringsAsFactors = FALSE, fileEncoding = "latin1")
his_data_comments_2017 <-Corpus(VectorSource(his_data_2017))
#text cleaning
clean.text = function(x)
{x <- tm_map(x, removeNumbers)
x <- tm_map(x, removeWords, stopwords("english"))
x <- tm_map(x, removePunctuation )
x <- tm_map(x, removeNumbers)
x <- tm_map(x, content_transformer(tolower))
x <- tm_map(x, removeWords, c("this","so", "will", "can", "why","said",
"get","really","just", "like","the","one",
"even", "you", "know","say",
"think", "dont", "people","guns"))
x <- tm_map(x, stripWhitespace)
}
# clean texts
clean_real = clean.text(real_time_comments)
clean_2012 = clean.text(his_data_comments_2012)
clean_2013 = clean.text(his_data_comments_2013)
clean_2014 = clean.text(his_data_comments_2014)
clean_2015 = clean.text(his_data_comments_2015)
clean_2016 = clean.text(his_data_comments_2016)
clean_2017 = clean.text(his_data_comments_2017)
comments_real = paste(clean_real, collapse=" ")
comments_2012 = paste(clean_2012, collapse=" ")
comments_2013 = paste(clean_2013, collapse=" ")
comments_2014 = paste(clean_2014, collapse=" ")
comments_2015 = paste(clean_2015, collapse=" ")
comments_2016 = paste(clean_2016, collapse=" ")
comments_2017 = paste(clean_2017, collapse=" ")
# put everything in a single vector
all = c(comments_real, comments_2012, comments_2013,comments_2014,comments_2015,comments_2016,comments_2017)
c(stopwords("english"),"comments_real", "comments_2012", "comments_2013", "comments_2014", "comments_2015",
"comments_2016", "comments_2017")
corpus = Corpus(VectorSource(all))
tdm = TermDocumentMatrix(corpus)
tdm = as.matrix(tdm)
colnames(tdm) = c("comments_real","comments_2012","comments_2013","comments_2014","comments_2015","comments_2016","comments_2017")
#Comparison Word Cloud
comparison.cloud(tdm, random.order=FALSE,
colors = c("pink", "#00B2FF", "red", "#FF0099", "#6600CC","blue","green"),
title.size=1, shape="circle")
#according to the word cloud we could notice that comments in the 2017 is the most, despite
#that we only include part of the comments in 2018. We could conjecutred that school shootings
#become more and more serious and importnat among people around 2017, so there are more comments
#begin from 2017. However, we still should addmitted that 2017 has the most comments might also
#due to other circumstances. Most words in the word cloud are negative words, although they
#contain the same emotions, we could detect that the main topic toward shool shootings
#among different years are slight different. real time daya is more focus on the judgement
#toward the shooter, with words like judge, penalty. 2017's comments are mainly around school
#Moreover, 2014's comments have mentioned about words like goverment and crime.
# show the top 10 words in the word cloud and their frequency
# frequency function
frequency.text = function(x){
x<-TermDocumentMatrix(x)
x<- as.matrix(x)
x<- sort(rowSums(x), decreasing=TRUE)
x<- data.frame(word=names(x), freq=x)
x<- head(x, 10)
}
frequency_real = frequency.text(clean_real)
frequency_2012 = frequency.text(clean_2012)
frequency_2013 = frequency.text(clean_2013)
frequency_2014 = frequency.text(clean_2014)
frequency_2015 = frequency.text(clean_2015)
frequency_2016 = frequency.text(clean_2016)
frequency_2017 = frequency.text(clean_2017)
structure <- par(mfrow=c(3,3))
barplot(frequency_real$freq, width=1,space= 0.5,
names.arg = frequency_real$word,ylim=c(0,3000),
col =rainbow(20), main ="Most frequent words_real",
ylab = "Word frequencies",cex.names = 0.6)
barplot(frequency_2012$freq, width=1,space= 0.5,
names.arg = frequency_2012$word,ylim=c(0,3000),
col =rainbow(20), main ="Most frequent words_2012",
ylab = "Word frequencies",cex.names = 0.6)
barplot(frequency_2013$freq, width=1,space= 0.5,
names.arg = frequency_2013$word,ylim=c(0,3000),
col =rainbow(20), main ="Most frequent words_2013",
ylab = "Word frequencies",cex.names = 0.6)
barplot(frequency_2014$freq, width=1,space= 0.5,
names.arg = frequency_2014$word,ylim=c(0,3000),
col =rainbow(20), main ="Most frequent words_2014",
ylab = "Word frequencies",cex.names = 0.6)
barplot(frequency_2015$freq, width=1,space= 0.5,
names.arg = frequency_2015$word,ylim=c(0,3000),
col =rainbow(20), main ="Most frequent words_2015",
ylab = "Word frequencies",cex.names = 0.6)
barplot(frequency_2016$freq, width=1,space= 0.5,
names.arg = frequency_2016$word,ylim=c(0,3000),
col =rainbow(20), main ="Most frequent words_2016",
ylab = "Word frequencies",cex.names = 0.6)
barplot(frequency_2017$freq, width=1,space= 0.5,
names.arg = frequency_2017$word,ylim=c(0,3000),
col =rainbow(20), main ="Most frequent words_2017",
ylab = "Word frequencies",cex.names = 0.6)
dev.off()
#According to these graph, although different years are having different most frequent words,
#we could clearly see that the word "gun" has been highly mentioned in the 2012, 2013, 2014, 2015, and
#2018, which is the real time data. Although in 2016 and 2017 the word "gun" has not been mentioned
#as the most frequent word, it is still in the rank of the top ten mentioned words. This indicates
#that as the school shooting happening, the most comments, besides the expression of sympathy to
#the children, would be the eagserness of the gun control or things related to gun. It is a
#continuous topic from 2012 to 2018 that people keep dicussing and debating about.
#the real time data is only from 04/01 till 04/22, so it has smaller amount of words compare to the rest.
#2012 also has small amount of words because it is long time ago and people might not be that active online
# to comment during 2012
#Time Series on video related with school shootings
#Real Time Data for how many reporting about school shootings/time series something like that
#news <- yt_search("school shootings", lang="en", published_after = "2018-04-10T00:00:00Z")
#new <- yt_search("school shooting", lang ="en",published_after = "2018-04-10T00:00:00Z")
#News <- rbind(news,new)
#write.csv(News, "News.csv")
News <- read.csv("News.csv", header=TRUE)
News <- data.frame(News$publishedAt,
News$title)
#Eliminating the same title
News <- News[!duplicated(News$News.title),]
#2017
#news_2017 <- yt_search("school shootings", lang="en", published_after = "2017-01-1T00:00:00Z", published_before = "2017-12-31T00:00:00Z")
#new_2017 <- yt_search("school shooting", lang ="en",published_after = "2017-01-1T00:00:00Z", published_before = "2017-12-31T00:00:00Z")
#News_2017 <- rbind(news_2017,new_2017)
#write.csv(News_2017, "News_2017.csv")
News_2017 <- read.csv("News_2017.csv", header=TRUE)
News_2017 <- data.frame(News_2017$publishedAt,
News_2017$title)
News_2017 <- News_2017[!duplicated(News_2017$News_2017.title),]
#2016
#news_2016 <- yt_search("school shootings", lang="en", published_after = "2016-01-1T00:00:00Z", published_before = "2016-12-31T00:00:00Z")
#new_2016 <- yt_search("school shooting", lang ="en",published_after = "2016-01-1T00:00:00Z", published_before = "2016-12-31T00:00:00Z")
#News_2016 <- rbind(news_2016,new_2016)
#write.csv(News_2016, "News_2016.csv")
News_2016 <- read.csv("news_2016.csv", header=TRUE)
News_2016 <- data.frame(News_2016$publishedAt,
News_2016$title)
News_2016 <- News_2016[!duplicated(News_2016$News_2016.title),]
#2015
#news_2015 <- yt_search("school shootings", lang="en", published_after = "2015-01-1T00:00:00Z", published_before = "2015-12-31T00:00:00Z")
#new_2015 <- yt_search("school shooting", lang ="en",published_after = "2015-01-1T00:00:00Z", published_before = "2015-12-31T00:00:00Z")
#News_2015 <- rbind(news_2015,new_2015)
#write.csv(News_2015, "News_2015.csv")
News_2015 <- read.csv("News_2015.csv", header=TRUE)
News_2015 <- data.frame(News_2015$publishedAt,
News_2015$title)
News_2015 <- News_2015[!duplicated(News_2015$News_2015.title),]
#2014
#news_2014 <- yt_search("school shootings", lang="en", published_after = "2014-01-1T00:00:00Z", published_before = "2014-12-31T00:00:00Z")
#new_2014 <- yt_search("school shooting", lang ="en",published_after = "2014-01-1T00:00:00Z", published_before = "2014-12-31T00:00:00Z")
#News_2014 <- rbind(news_2014,new_2014)
#write.csv(News_2014, "News_2014.csv")
News_2014 <- read.csv("News_2014.csv", header=TRUE)
News_2014 <- data.frame(News_2014$publishedAt,
News_2014$title)
News_2014 <- News_2014[!duplicated(News_2014$News_2014.title),]
#2013
#news_2013 <- yt_search("school shootings", lang="en", published_after = "2013-01-1T00:00:00Z", published_before = "2016-12-31T00:00:00Z")
#new_2013 <- yt_search("school shooting", lang ="en",published_after = "2013-01-1T00:00:00Z", published_before = "2016-12-31T00:00:00Z")
#News_2013 <- rbind(news_2013,new_2013)
#write.csv(News_2013, "News_2013.csv")
News_2013 <- read.csv("News_2013.csv", header=TRUE)
News_2013 <- data.frame(News_2013$publishedAt,
News_2013$title)
News_2013 <- News_2013[!duplicated(News_2013$News_2013.title),]
#2012
#news_2012 <- yt_search("school shootings", lang="en", published_after = "2012-01-1T00:00:00Z", published_before = "2012-12-31T00:00:00Z")
#new_2012 <- yt_search("school shooting", lang ="en",published_after = "2012-01-1T00:00:00Z", published_before = "2012-12-31T00:00:00Z")
#News_2012 <- rbind(news_2012,new_2012)
#write.csv(News_2012, "News_2012.csv")
News_2012 <- read.csv("News_2012.csv", header=TRUE)
News_2012 <- data.frame(News_2012$publishedAt,
News_2012$title)
News_2012 <- News_2012[!duplicated(News_2012$News_2012.title),]
#time series for related school shooting video from 2012 to 2018
ts_shooting <- ts(c(nrow(News_2012), nrow(News_2013), nrow(News_2014),
nrow(News_2015), nrow(News_2016), nrow(News_2017),
nrow(News) ), start=c(2012), end=c(2018))
plot(ts_shooting, main = "Time Series for Related School Shootings' Videos")
namebank <- as.character(c(2012:2018))
text(c(2012:2018), ts_shooting, namebank,cex=0.9)
#According to the plots, we could see that 2013 there is a sharp increase in the 2013 about the
#videos that related with school shootings. According to the data in the past, I guess that
#this is because the number of school shootings is doubled from 2012 and 2013, so there were
#large amount of attention on this issue so the number of videos increase in the youtube.
#2014 and 2015 have similar number of videos about shool shootings and this might also because.
#number of school shootings in 2014 and 2014 are similar. The number of videos about shool shootings
#is increasing from 2016 and 2017 because the number of school shootings also decreased. However,
#the number of videos about school shootings in 2018 are decreasing because this is only part of
#the data in 2018. Since the number of shool shootings has already reached 17 cases in 2018, I
#predicted that the number of videos about school shootings would also increase compare to previews
#years.
| /Youtube_data.R | no_license | Jennifer0505/Final-Project | R | false | false | 23,656 | r | #The reason that I wish to do this topic is becuase recently more and more school shootings begin
#to exist. It happened without any preparation and many students and faculties have been killed
#by the shooter. From my perspective, it is an important social issue that everyone need to pay
#attention to in order to have a stable society in the future.
#Word Cloud for real time data
#Bar plot The Top 10 words in the word cloud
#Sentiment Analysis and Pie Chart for real time data
#Comparison Word cloud for data from 2012 to now
#Comparison between top 10 words in teh word cloud from 2012 to now
#Time Series on video related with school shootings
library(tuber)
library(dplyr)
library(tidytext)
library(stringr)
library(lubridate)
library(dplyr)
library(tm)
library(SnowballC)
library(wordcloud)
library(RColorBrewer)
library(plotrix)
library(reshape)
#clientID <- "979385012952-iive5jqvebq2emfhrnld66iflt1rq1t8.apps.googleusercontent.com"
#client_secrete <- "F5n5Sxt2PXkeeUnnI89mJGo5"
#yt_oauth(clientID, client_secrete, token = '')
#Collecting Real Time Data for comments
#Selecting abc, CNN, NBC, fox and CBS channel's school shootings' news with the most click.
#comments_abc <- get_all_comments(video_id = c('xgIJosk0pnA'))
#comments_cnn <- get_all_comments(video_id = c('yW61tS8H66E'))
#comments_NBC <-get_all_comments(video_id = c('3F_vdCHlDds'))
#comments_fox <-get_all_comments(video_id = c('ZQcaYYHqrUY'))
#comments_cbs <-get_all_comments(video_id = c('npnH0_natfs'))
#all_comments <- rbind(comments_abc, comments_cnn, comments_NBC, comments_fox, comments_cbs)
#real_time_data <- data.frame(all_comments$authorDisplayName,
# all_comments$textOriginal,
# all_comments$likeCount,
# all_comments$publishedAt)
#clean real_time_data
#real_time_data[,4]<-str_split_fixed(real_time_data$all_comments.publishedAt, "T", 2)[,1]
#real_time_data[,4] <- ymd(real_time_data[,4])
#For real time data purpose, only collect data after 2018/04/011
#real <- real_time_data %>%
# filter(all_comments.publishedAt >= as.Date("2018-04-1") )
#real<- data.frame(real$all_comments.textOriginal)
#write.csv(real, file = "real.csv")
#real_time_data <- real_time_data %>%select(all_comments.authorDisplayName,
# all_comments.likeCount,
# all_comments.textOriginal,
# all_comments.publishedAt) %>%
# filter(all_comments.publishedAt >= as.Date("2018-04-1"))
#write.csv(real_time_data, file="real_time_data.csv")
#Word Cloud
real_time_data <- read.csv("real_time_data.csv", header=TRUE, stringsAsFactors = FALSE, fileEncoding = "latin1")
real_time_data_comments <- Corpus(VectorSource(real_time_data$all_comments.textOriginal))
#text cleaning
#real_time_data_comments[[1]][1], test the text cleaning
real_time_data_comments <- tm_map(real_time_data_comments, removeNumbers)
real_time_data_comments <- tm_map(real_time_data_comments, removeWords, stopwords("english"))
real_time_data_comments <- tm_map(real_time_data_comments, removePunctuation )
real_time_data_comments <- tm_map(real_time_data_comments, content_transformer(tolower))
real_time_data_comments <- tm_map(real_time_data_comments, removeWords, c("this","so", "will", "can",
"why","said","get","really",
"just", "like","the","one",
"even", "you", "know","say",
"think", "dont", "people",
"school", "guns"))
real_time_data_comments <- tm_map(real_time_data_comments, stripWhitespace)
real_time_data_comments <- TermDocumentMatrix(real_time_data_comments)
real_time_data_comments <- as.matrix(real_time_data_comments)
real_time_data_comments <- sort(rowSums(real_time_data_comments), decreasing=TRUE)
real_time_data_comments <- data.frame(word=names(real_time_data_comments),
freq=real_time_data_comments)
wordcloud(real_time_data_comments$word, real_time_data_comments$freq, random.order = FALSE,
rot.per = 0.3, scale = c(4,.5), max.words=800, colors = rainbow(5),
vfont= c ( "sans serif" , "plain" ), shape="circle")
#According to this word cloud, people basically commented through three aspects.
#The main perspective is an intense emotion toward the shooting, which they talk about the words like "gun" , "shooting" and " kill" a lot.
# gun. Besides that,they also express the sympathy to the children with words like "bad" and "sad".
# The last aspect is judgemental emotions towards the shooter and their attitude toward these tragedies, which they talked about "judge", "police", and "shooter".
# This frequency of words is predictable becuase expect the video that is the news, the video with most clikc in the CNN is parents and teachers
#push for change after school shooting, so that is why the comments metioned a lot of gun and gun control
# show the top 10 words in the word cloud and their frequency
real_high_frequency <- head(real_time_data_comments, 10)
barplot(real_high_frequency$freq, width=1,space= 0.5,
names.arg = real_high_frequency$word,
col =rainbow(20), main ="Most frequent words",
ylab = "Word frequencies",cex.names = 0.8)
#In order for more clear undersing about the word cloud, I did a barplot to directly show the frequency of the top 10 words.
#From this bar plot we can notice that besides any emotional words, the gun has been mentioned the most
#which indicates that people are begin to pay attention on the gun using.
#Sentiment Analysis
RTD_analysis <- str_split(real_time_data$all_comments.textOriginal, pattern = "\\s+")
RTD_analysis <-unlist(RTD_analysis)
RTD_analysis <- data_frame(line = 1:45256, text = RTD_analysis)
RTD_analysis <- RTD_analysis %>%
unnest_tokens(word, text)
bing_positive <- get_sentiments("bing") %>%
filter(sentiment == "positive")
bing_negative <- get_sentiments("bing") %>%
filter(sentiment == "negative")
#positive and negative & anger, anticipation and joy
positive <- RTD_analysis %>%
inner_join(bing_positive)%>%
count(word, sentiment,sort = TRUE)
negative <- RTD_analysis %>%
inner_join(bing_negative)%>%
count(word, sentiment, sort = TRUE)
anger <-get_sentiments("nrc") %>%
filter(sentiment == "anger")
anticipation <-get_sentiments("nrc") %>%
filter(sentiment == "anticipation")
joy <- get_sentiments("nrc") %>%
filter(sentiment == "joy")
RTD_anger<-RTD_analysis %>%
inner_join(anger) %>%
count(word, sentiment, sort = TRUE)
RTD_anticipation<-RTD_analysis %>%
inner_join(anticipation) %>%
count(word, sentiment,sort = TRUE)
RTD_joy<-RTD_analysis %>%
inner_join(joy) %>%
count(word, sentiment, sort = TRUE)
#Bar Plot for sentimental analysis
slices <- c(anticipation_total <- sum(RTD_anticipation$n), anger_total <- sum(RTD_anger$n)
, joy_total <- sum(RTD_joy$n))
lbls <- c("anticipation", "anger", "joy")
slices <- c(1000, 1881, 706)
pct <- round(slices/sum(slices)*100)
lbls <- paste(lbls, pct)
lbls <- paste(lbls,"%",sep="")
pie3D(slices,labels=lbls,explode=0.1,
main="Real Time: Pie Chart of sentiment analysis")
#although the postive word "like" has been mentioned 200 times, but "like" in this content
#doesn't really have any meaning. we could conclude that, as we expected, most comments are
#negative and intense. We could also conclude from the pie chart that anger has occupy a large proption of the comments
#However, one thing that needs to notice is that people's anticipations
#or suggestions on the current situation have not been mentioned a lot in the comment
#but this might attribute to that the word "gun", which implies the gun control is include in the anger sentiment.
#Historical Data: also collect the comments from abc, CNN, NBC and CBS channel's school shoting news with the most clike
#however, since the comments are not enough, I also collect comments from other school shooting video with high clike rate.
#his_comments_abc <- get_all_comments(video_id = c('aRHcbJ9DHEg'))
#his_comments_abc2 <- get_all_comments(video_id = c('6X7cVDxYd6A')), if not enough, add
#his_comments_cnn <- get_all_comments(video_id = c('nPpEpNk519U'))
#his_comments_nbc <- get_all_comments(video_id = c('icVShYaYxEM'))
#his_comments_fox <- get_all_comments(video_id = c('-6RhIU5Dreo'))
#his_comments_cbs <- get_all_comments(video_id = c('-9XGNIKW-uM'))
#his_comments_random <- get_all_comments(video_id = c('UeO5QTAryNE'))
#his_all_comments <- rbind(his_comments_abc, his_comments_cnn, his_comments_nbc,
# his_comments_fox, his_comments_cbs,his_comments_random)
#his_data <- data.frame(his_all_comments$authorDisplayName,
# his_all_comments$textOriginal,
# his_all_comments$likeCount,
# his_all_comments$publishedAt)
#clean historical data
#his_data[,4]<-str_split_fixed(his_data$all_comments.publishedAt, "T", 2)[,1]
#his_data[,4] <- ymd(his_data[,4])
#For historical data purpose, only collect data between 2012/01/1 and 2017/12/31
#his_data <- his_data %>%
# filter(his_all_comments.publishedAt >= as.Date("2012-01-1") &
# his_all_comments.publishedAt<= as.Date("2017-12-31") )
#his_data_2012 <- his_data %>%
# filter(his_all_comments.publishedAt >= as.Date("2012-01-1") &
# his_all_comments.publishedAt<= as.Date("2012-12-31") )
#his_data_2012<- data.frame(his_data_2012$his_all_comments.textOriginal)
#his_data_2013 <- his_data %>%
# filter(his_all_comments.publishedAt >= as.Date("2013-01-1") &
# his_all_comments.publishedAt<= as.Date("2013-12-31") )
#his_data_2013<- data.frame(his_data_2013$his_all_comments.textOriginal)
#his_data_2014 <- his_data %>%
# filter(his_all_comments.publishedAt >= as.Date("2014-01-1") &
# his_all_comments.publishedAt<= as.Date("2014-12-31") )
#his_data_2014<- data.frame(his_data_2014$his_all_comments.textOriginal)
#his_data_2015 <- his_data %>%
# filter(his_all_comments.publishedAt >= as.Date("2015-01-1") &
# his_all_comments.publishedAt<= as.Date("2015-12-31") )
#his_data_2015<- data.frame(his_data_2015$his_all_comments.textOriginal)
#his_data_2016 <- his_data %>%
# filter(his_all_comments.publishedAt >= as.Date("2016-01-1") &
# his_all_comments.publishedAt<= as.Date("2016-12-31") )
#his_data_2016<- data.frame(his_data_2016$his_all_comments.textOriginal)
#his_data_2017 <- his_data %>%
# filter(his_all_comments.publishedAt >= as.Date("2017-01-1") &
# his_all_comments.publishedAt<= as.Date("2017-12-31") )
#his_data_2017<- data.frame(his_data_2017$his_all_comments.textOriginal)
#write.csv(his_data, file="his_data.csv")
#write.csv(his_data_2012, file="his_data_2012.csv")
#write.csv(his_data_2013, file="his_data_2013.csv")
#write.csv(his_data_2014, file="his_data_2014.csv")
#write.csv(his_data_2015, file="his_data_2015.csv")
#write.csv(his_data_2016, file="his_data_2016.csv")
#write.csv(his_data_2017, file="his_data_2017.csv")
real_time <- read.csv("real.csv", header=TRUE, stringsAsFactors = FALSE, fileEncoding = "latin1")
real_time_comments <- Corpus(VectorSource(real_time))
his_data_2012 <- read.csv("his_data_2012.csv", header=TRUE, stringsAsFactors = FALSE, fileEncoding = "latin1")
his_data_comments_2012 <-Corpus(VectorSource(his_data_2012))
his_data_2013 <- read.csv("his_data_2013.csv", header=TRUE, stringsAsFactors = FALSE, fileEncoding = "latin1")
his_data_comments_2013 <-Corpus(VectorSource(his_data_2013))
his_data_2014 <- read.csv("his_data_2014.csv", header=TRUE, stringsAsFactors = FALSE, fileEncoding = "latin1")
his_data_comments_2014 <-Corpus(VectorSource(his_data_2014))
his_data_2015 <- read.csv("his_data_2015.csv", header=TRUE, stringsAsFactors = FALSE, fileEncoding = "latin1")
his_data_comments_2015 <-Corpus(VectorSource(his_data_2015))
his_data_2016 <- read.csv("his_data_2016.csv", header=TRUE, stringsAsFactors = FALSE, fileEncoding = "latin1")
his_data_comments_2016 <-Corpus(VectorSource(his_data_2016))
his_data_2017 <- read.csv("his_data_2017.csv", header=TRUE, stringsAsFactors = FALSE, fileEncoding = "latin1")
his_data_comments_2017 <-Corpus(VectorSource(his_data_2017))
#text cleaning
clean.text = function(x)
{x <- tm_map(x, removeNumbers)
x <- tm_map(x, removeWords, stopwords("english"))
x <- tm_map(x, removePunctuation )
x <- tm_map(x, removeNumbers)
x <- tm_map(x, content_transformer(tolower))
x <- tm_map(x, removeWords, c("this","so", "will", "can", "why","said",
"get","really","just", "like","the","one",
"even", "you", "know","say",
"think", "dont", "people","guns"))
x <- tm_map(x, stripWhitespace)
}
# clean texts
clean_real = clean.text(real_time_comments)
clean_2012 = clean.text(his_data_comments_2012)
clean_2013 = clean.text(his_data_comments_2013)
clean_2014 = clean.text(his_data_comments_2014)
clean_2015 = clean.text(his_data_comments_2015)
clean_2016 = clean.text(his_data_comments_2016)
clean_2017 = clean.text(his_data_comments_2017)
comments_real = paste(clean_real, collapse=" ")
comments_2012 = paste(clean_2012, collapse=" ")
comments_2013 = paste(clean_2013, collapse=" ")
comments_2014 = paste(clean_2014, collapse=" ")
comments_2015 = paste(clean_2015, collapse=" ")
comments_2016 = paste(clean_2016, collapse=" ")
comments_2017 = paste(clean_2017, collapse=" ")
# put everything in a single vector
all = c(comments_real, comments_2012, comments_2013,comments_2014,comments_2015,comments_2016,comments_2017)
c(stopwords("english"),"comments_real", "comments_2012", "comments_2013", "comments_2014", "comments_2015",
"comments_2016", "comments_2017")
corpus = Corpus(VectorSource(all))
tdm = TermDocumentMatrix(corpus)
tdm = as.matrix(tdm)
colnames(tdm) = c("comments_real","comments_2012","comments_2013","comments_2014","comments_2015","comments_2016","comments_2017")
#Comparison Word Cloud
comparison.cloud(tdm, random.order=FALSE,
colors = c("pink", "#00B2FF", "red", "#FF0099", "#6600CC","blue","green"),
title.size=1, shape="circle")
#according to the word cloud we could notice that comments in the 2017 is the most, despite
#that we only include part of the comments in 2018. We could conjecutred that school shootings
#become more and more serious and importnat among people around 2017, so there are more comments
#begin from 2017. However, we still should addmitted that 2017 has the most comments might also
#due to other circumstances. Most words in the word cloud are negative words, although they
#contain the same emotions, we could detect that the main topic toward shool shootings
#among different years are slight different. real time daya is more focus on the judgement
#toward the shooter, with words like judge, penalty. 2017's comments are mainly around school
#Moreover, 2014's comments have mentioned about words like goverment and crime.
# show the top 10 words in the word cloud and their frequency
# frequency function
frequency.text = function(x){
x<-TermDocumentMatrix(x)
x<- as.matrix(x)
x<- sort(rowSums(x), decreasing=TRUE)
x<- data.frame(word=names(x), freq=x)
x<- head(x, 10)
}
frequency_real = frequency.text(clean_real)
frequency_2012 = frequency.text(clean_2012)
frequency_2013 = frequency.text(clean_2013)
frequency_2014 = frequency.text(clean_2014)
frequency_2015 = frequency.text(clean_2015)
frequency_2016 = frequency.text(clean_2016)
frequency_2017 = frequency.text(clean_2017)
structure <- par(mfrow=c(3,3))
barplot(frequency_real$freq, width=1,space= 0.5,
names.arg = frequency_real$word,ylim=c(0,3000),
col =rainbow(20), main ="Most frequent words_real",
ylab = "Word frequencies",cex.names = 0.6)
barplot(frequency_2012$freq, width=1,space= 0.5,
names.arg = frequency_2012$word,ylim=c(0,3000),
col =rainbow(20), main ="Most frequent words_2012",
ylab = "Word frequencies",cex.names = 0.6)
barplot(frequency_2013$freq, width=1,space= 0.5,
names.arg = frequency_2013$word,ylim=c(0,3000),
col =rainbow(20), main ="Most frequent words_2013",
ylab = "Word frequencies",cex.names = 0.6)
barplot(frequency_2014$freq, width=1,space= 0.5,
names.arg = frequency_2014$word,ylim=c(0,3000),
col =rainbow(20), main ="Most frequent words_2014",
ylab = "Word frequencies",cex.names = 0.6)
barplot(frequency_2015$freq, width=1,space= 0.5,
names.arg = frequency_2015$word,ylim=c(0,3000),
col =rainbow(20), main ="Most frequent words_2015",
ylab = "Word frequencies",cex.names = 0.6)
barplot(frequency_2016$freq, width=1,space= 0.5,
names.arg = frequency_2016$word,ylim=c(0,3000),
col =rainbow(20), main ="Most frequent words_2016",
ylab = "Word frequencies",cex.names = 0.6)
barplot(frequency_2017$freq, width=1,space= 0.5,
names.arg = frequency_2017$word,ylim=c(0,3000),
col =rainbow(20), main ="Most frequent words_2017",
ylab = "Word frequencies",cex.names = 0.6)
dev.off()
#According to these graph, although different years are having different most frequent words,
#we could clearly see that the word "gun" has been highly mentioned in the 2012, 2013, 2014, 2015, and
#2018, which is the real time data. Although in 2016 and 2017 the word "gun" has not been mentioned
#as the most frequent word, it is still in the rank of the top ten mentioned words. This indicates
#that as the school shooting happening, the most comments, besides the expression of sympathy to
#the children, would be the eagserness of the gun control or things related to gun. It is a
#continuous topic from 2012 to 2018 that people keep dicussing and debating about.
#the real time data is only from 04/01 till 04/22, so it has smaller amount of words compare to the rest.
#2012 also has small amount of words because it is long time ago and people might not be that active online
# to comment during 2012
#Time Series on video related with school shootings
#Real Time Data for how many reporting about school shootings/time series something like that
#news <- yt_search("school shootings", lang="en", published_after = "2018-04-10T00:00:00Z")
#new <- yt_search("school shooting", lang ="en",published_after = "2018-04-10T00:00:00Z")
#News <- rbind(news,new)
#write.csv(News, "News.csv")
News <- read.csv("News.csv", header=TRUE)
News <- data.frame(News$publishedAt,
News$title)
#Eliminating the same title
News <- News[!duplicated(News$News.title),]
#2017
#news_2017 <- yt_search("school shootings", lang="en", published_after = "2017-01-1T00:00:00Z", published_before = "2017-12-31T00:00:00Z")
#new_2017 <- yt_search("school shooting", lang ="en",published_after = "2017-01-1T00:00:00Z", published_before = "2017-12-31T00:00:00Z")
#News_2017 <- rbind(news_2017,new_2017)
#write.csv(News_2017, "News_2017.csv")
News_2017 <- read.csv("News_2017.csv", header=TRUE)
News_2017 <- data.frame(News_2017$publishedAt,
News_2017$title)
News_2017 <- News_2017[!duplicated(News_2017$News_2017.title),]
#2016
#news_2016 <- yt_search("school shootings", lang="en", published_after = "2016-01-1T00:00:00Z", published_before = "2016-12-31T00:00:00Z")
#new_2016 <- yt_search("school shooting", lang ="en",published_after = "2016-01-1T00:00:00Z", published_before = "2016-12-31T00:00:00Z")
#News_2016 <- rbind(news_2016,new_2016)
#write.csv(News_2016, "News_2016.csv")
News_2016 <- read.csv("news_2016.csv", header=TRUE)
News_2016 <- data.frame(News_2016$publishedAt,
News_2016$title)
News_2016 <- News_2016[!duplicated(News_2016$News_2016.title),]
#2015
#news_2015 <- yt_search("school shootings", lang="en", published_after = "2015-01-1T00:00:00Z", published_before = "2015-12-31T00:00:00Z")
#new_2015 <- yt_search("school shooting", lang ="en",published_after = "2015-01-1T00:00:00Z", published_before = "2015-12-31T00:00:00Z")
#News_2015 <- rbind(news_2015,new_2015)
#write.csv(News_2015, "News_2015.csv")
News_2015 <- read.csv("News_2015.csv", header=TRUE)
News_2015 <- data.frame(News_2015$publishedAt,
News_2015$title)
News_2015 <- News_2015[!duplicated(News_2015$News_2015.title),]
#2014
#news_2014 <- yt_search("school shootings", lang="en", published_after = "2014-01-1T00:00:00Z", published_before = "2014-12-31T00:00:00Z")
#new_2014 <- yt_search("school shooting", lang ="en",published_after = "2014-01-1T00:00:00Z", published_before = "2014-12-31T00:00:00Z")
#News_2014 <- rbind(news_2014,new_2014)
#write.csv(News_2014, "News_2014.csv")
News_2014 <- read.csv("News_2014.csv", header=TRUE)
News_2014 <- data.frame(News_2014$publishedAt,
News_2014$title)
News_2014 <- News_2014[!duplicated(News_2014$News_2014.title),]
#2013
#news_2013 <- yt_search("school shootings", lang="en", published_after = "2013-01-1T00:00:00Z", published_before = "2016-12-31T00:00:00Z")
#new_2013 <- yt_search("school shooting", lang ="en",published_after = "2013-01-1T00:00:00Z", published_before = "2016-12-31T00:00:00Z")
#News_2013 <- rbind(news_2013,new_2013)
#write.csv(News_2013, "News_2013.csv")
News_2013 <- read.csv("News_2013.csv", header=TRUE)
News_2013 <- data.frame(News_2013$publishedAt,
News_2013$title)
News_2013 <- News_2013[!duplicated(News_2013$News_2013.title),]
#2012
#news_2012 <- yt_search("school shootings", lang="en", published_after = "2012-01-1T00:00:00Z", published_before = "2012-12-31T00:00:00Z")
#new_2012 <- yt_search("school shooting", lang ="en",published_after = "2012-01-1T00:00:00Z", published_before = "2012-12-31T00:00:00Z")
#News_2012 <- rbind(news_2012,new_2012)
#write.csv(News_2012, "News_2012.csv")
News_2012 <- read.csv("News_2012.csv", header=TRUE)
News_2012 <- data.frame(News_2012$publishedAt,
News_2012$title)
News_2012 <- News_2012[!duplicated(News_2012$News_2012.title),]
#time series for related school shooting video from 2012 to 2018
ts_shooting <- ts(c(nrow(News_2012), nrow(News_2013), nrow(News_2014),
nrow(News_2015), nrow(News_2016), nrow(News_2017),
nrow(News) ), start=c(2012), end=c(2018))
plot(ts_shooting, main = "Time Series for Related School Shootings' Videos")
namebank <- as.character(c(2012:2018))
text(c(2012:2018), ts_shooting, namebank,cex=0.9)
#According to the plots, we could see that 2013 there is a sharp increase in the 2013 about the
#videos that related with school shootings. According to the data in the past, I guess that
#this is because the number of school shootings is doubled from 2012 and 2013, so there were
#large amount of attention on this issue so the number of videos increase in the youtube.
#2014 and 2015 have similar number of videos about shool shootings and this might also because.
#number of school shootings in 2014 and 2014 are similar. The number of videos about shool shootings
#is increasing from 2016 and 2017 because the number of school shootings also decreased. However,
#the number of videos about school shootings in 2018 are decreasing because this is only part of
#the data in 2018. Since the number of shool shootings has already reached 17 cases in 2018, I
#predicted that the number of videos about school shootings would also increase compare to previews
#years.
|
#' @title SEIR+V Model
#' @description A model for influenza that uses a SEIR framework with age
#' structure and that allows for vaccination
#' @param population Size of population; defaults to 1
#' @param populationFractions Vector of population fractions (all non-negative,
#' sum to 1); defaults to 1, representing a single population group
#' @param contactMatrix A matrix whose (row i, column j) entry denotes the
#' number of potentially infectious contacts a single individual from group j
#' has with individuals from group i each day; defaults to proportional mixing
#' @param R0 Average number of secondary cases from a single infected individual
#' in a completely susceptible population; must be specified
#' @param latentPeriod Latent period in days; must be specified
#' @param infectiousPeriod Infectious period in days; must be specified
#' @param seedInfections Fraction of the population to seed with infections;
#' single fraction or vector of fractions by population group; defaults to 0
#' @param priorImmunity Fraction of the population with prior immunity; single
#' fraction, or vector of fractions by population group; defaults to 0
#' @param useCommunityMitigation Whether or not to use community mitigation
#' implemented by modulation of the contact matrix; defaults to FALSE
#' @param communityMitigationStartDay If using community mitigation, day of the
#' simulation on which to start mitigation; must be specified if applicable
#' @param communityMitigationDuration If using community mitigation, duration of
#' time during which mitigation is in effect; must be specified if applicable
#' @param communityMitigationMultiplier If using community mitigation, the
#' non-negative matrix of multipliers that will be used to modulate the contact
#' matrix by elementwise multiplication; must be specified if applicable
#' @param vaccineAdministrationRatePerDay Vaccine administration rate each day;
#' defaults to 0
#' @param vaccineAvailabilityByDay Vector that contains the amount of vaccine
#' available each day; defaults to 0
#' @param vaccineUptakeMultiplier Vector of multipliers that determines the
#' relative rate at which vaccine is given to each age group; defaults to
#' vaccine being allotted proportionally by population
#' @param VEs Vaccine efficacy: protection for vaccinated susceptible
#' individuals; single fraction or vector of fractions by population group;
#' defaults to 0
#' @param VEi Vaccine efficacy: prevention of transmission from vaccinated
#' infected individuals; single fraction or vector of fractions by population
#' group; defaults to 0
#' @param VEp Vaccine efficacy: prevention of symptomatic illness in
#' infected indivduals; single fraction or vector of fractions by population
#' group; defaults to 0
#' @param vaccineEfficacyDelay Delay in days between administration of dose and
#' onset of protection; defaults to 7
#' @param simulationLength Number of days to simulate after seeding infections;
#' defaults to 240
#' @param seedStartDay Day on which to seed initial infections; defaults to 0
#' @param tolerance Absolute tolerance for numerical integration; defaults to
#' 1e-8
#' @param method Which integration method to use. Defaults to lsoda
#' @return a SEIRVModel object
#' @export
SEIRVModel2 <- function(population, populationFractions, contactMatrix, R0,
latentPeriod, infectiousPeriod, seedInfections, priorImmunity,
useCommunityMitigation, communityMitigationStartDay,
communityMitigationDuration, communityMitigationMultiplier,
vaccineAdministrationRatePerDay, vaccineAvailabilityByDay,
vaccineUptakeMultiplier, VEs, VEi, VEp, vaccineEfficacyDelay,
simulationLength, seedStartDay, tolerance, method) {
#Check inputs #TODO: Add checks for all inputs
specifiedArguments <- names(match.call())[-1]
argumentList <- lapply(specifiedArguments, as.name)
names(argumentList) <- specifiedArguments
parameters <- do.call("checkInputs.SEIRV", argumentList) #Get parameters from checked inputs
initialState <- with(parameters, {
c(S = (1 - priorImmunity) * populationFractions,
E = 0 * populationFractions,
I = 0 * populationFractions,
R = priorImmunity * populationFractions,
Sv = 0 * populationFractions,
Ev = 0 * populationFractions,
Iv = 0 * populationFractions,
Rv = 0 * populationFractions,
V = 0 * populationFractions,
vaccinating = rep(1, length(populationFractions)))
})
rootFunction <- function(t, state, parameters) {
stateList <- reconstructState.SEIRV2(state)
with(append(stateList, parameters), {
return(ifelse(vaccinating > 0, populationFractions - V - tolerance, 1))
})
}
eventFunction <- function(t, state, parameters) {
stateList <- reconstructState.SEIRV2(state)
with(append(stateList, parameters), {
state[getLabels("vaccinating", length(populationFractions))] <-
ifelse(populationFractions - V > tolerance, 1, 0)
return(state)
})
}
rawOutput <- integrateModel(initialState = initialState,
parameters = parameters,
derivativeFunction = getDerivative.SEIRV2,
seedFunction = doSeed.SEIRV2,
rootFunction = rootFunction,
eventFunction = eventFunction,
method = method)
#Build the SEIRVModel object to return
model <- list(parameters = parameters,
rawOutput = rawOutput)
class(model) <- c("SEIRVModel", "SEIRModel")
return(model)
}
#' @title Check SEIR+V inputs
#' @description Checks the input parameters for the SEIR+V model
#' @return List of parameters for the SEIR+V model
#' @keywords internal
checkInputs.SEIRV <- function(population, populationFractions, contactMatrix, R0,
latentPeriod, infectiousPeriod, seedInfections, priorImmunity,
useCommunityMitigation, communityMitigationStartDay,
communityMitigationDuration, communityMitigationMultiplier,
vaccineAdministrationRatePerDay, vaccineAvailabilityByDay,
vaccineUptakeMultiplier, VEs, VEi, VEp, vaccineEfficacyDelay,
simulationLength, seedStartDay, tolerance, method) {
specifiedArguments <- names(match.call())[-1]
argumentList <- lapply(specifiedArguments, as.name)
names(argumentList) <- specifiedArguments
SEIRParameters <- do.call("checkInputs.SEIR", argumentList)
#Update arguments passed to checkInputs.Vaccine using SEIRParameters
argumentList$population <- SEIRParameters$population
argumentList$populationFractions <- SEIRParameters$populationFractions
argumentList$seedStartDay <- SEIRParameters$seedStartDay
argumentList$simulationLength <- SEIRParameters$simulationLength
vaccineParameters <- do.call("checkInputs.Vaccine", argumentList)
#Return the parameters
return(c(SEIRParameters, vaccineParameters))
}
#' @title Check vaccine inputs
#' @description Checks vaccine inputs and computes the vaccine parameters
#' @return List of vaccine parameters
#' @keywords internal
checkInputs.Vaccine <- function(population, populationFractions, seedStartDay, simulationLength,
vaccineAdministrationRatePerDay = 0, vaccineAvailabilityByDay = 0,
vaccineUptakeMultiplier = 1, VEs = 0, VEi = 0, VEp = 0,
vaccineEfficacyDelay = 7, ...) {
#Validate vaccine parameters
#vaccineAdministrationRatePerDay
checkNonNegativeNumber(vaccineAdministrationRatePerDay)
#vaccineAvailabilityByDay
checkNonNegative(vaccineAvailabilityByDay)
#vaccineUptakeMultiplier
checkNonNegative(vaccineUptakeMultiplier)
checkDimensionsMatch(vaccineUptakeMultiplier, populationFractions)
#VEs
checkBetween0and1(VEs)
checkDimensionsMatch(VEs, populationFractions)
#VEi
checkBetween0and1(VEi)
checkDimensionsMatch(VEi, populationFractions)
#VEp
checkBetween0and1(VEp)
checkDimensionsMatch(VEp, populationFractions)
#vaccineEfficacyDelay
checkNonNegativeNumber(vaccineEfficacyDelay)
#Compute the daily vaccination rate
totalSimulationLength <- seedStartDay + simulationLength
vaccinationRateByDay <- rep(0, totalSimulationLength)
currentVaccineAvailability <- 0
for (i in 1:totalSimulationLength) {
if (i <= length(vaccineAvailabilityByDay)){
currentVaccineAvailability <- currentVaccineAvailability + vaccineAvailabilityByDay[i]
}
vaccinationRateByDay[i] <- min(vaccineAdministrationRatePerDay, currentVaccineAvailability)
currentVaccineAvailability <- currentVaccineAvailability - vaccinationRateByDay[i]
}
vaccinationRateByDay <- vaccinationRateByDay / population #Normalize
#Define vaccination rate function
vaccinationRate <- function(t) {
if ((t < vaccineEfficacyDelay) || (t >= totalSimulationLength + vaccineEfficacyDelay)) {
return(0)
} else {
return(vaccinationRateByDay[floor(t - vaccineEfficacyDelay + 1)])
}
}
#Compute the vaccination rate age multiplier
vaccinationRateAgeMultiplier <- vaccineUptakeMultiplier * populationFractions
totalMultiplier <- sum(vaccinationRateAgeMultiplier)
if (totalMultiplier > 0) {
vaccinationRateAgeMultiplier <- vaccinationRateAgeMultiplier / totalMultiplier
} else {
warning("vaccineUptakeMultiplier prevents vaccination from occurring.", call. = FALSE)
}
#Return the parameters
return(list(vaccinationRate = vaccinationRate, vaccinationRateAgeMultiplier = vaccinationRateAgeMultiplier,
VEs = VEs, VEi = VEi, VEp = VEp, vaccineEfficacyDelay = vaccineEfficacyDelay))
}
#This is a utility function that reconstructs the model state as a list so that equations can refer to compartments by name
reconstructState.SEIRV2 <- function(state) {
numberOfClasses <- length(state) / 10 #Each of the 10 classes are vectors of the same length
S <- state[ 1 : numberOfClasses ]
E <- state[ (numberOfClasses + 1):(2 * numberOfClasses)]
I <- state[(2 * numberOfClasses + 1):(3 * numberOfClasses)]
R <- state[(3 * numberOfClasses + 1):(4 * numberOfClasses)]
Sv <- state[(4 * numberOfClasses + 1):(5 * numberOfClasses)]
Ev <- state[(5 * numberOfClasses + 1):(6 * numberOfClasses)]
Iv <- state[(6 * numberOfClasses + 1):(7 * numberOfClasses)]
Rv <- state[(7 * numberOfClasses + 1):(8 * numberOfClasses)]
V <- state[(8 * numberOfClasses + 1):(9 * numberOfClasses)]
vaccinating <- state[(9 * numberOfClasses + 1):(10 * numberOfClasses)]
return(as.list(environment()))
}
#This function implements the multivariate derivative of the SEIR+V model
#parameters should define populationFractions, contactMatrix, beta, lambda, gamma,
#VEs, VEi, and the function vaccinationRate(t)
#Note that the total population is normalized to be 1
getDerivative.SEIRV2 <- function(t, state, parameters) {
stateList <- reconstructState.SEIRV2(state)
with(append(stateList, parameters), {
if (useCommunityMitigation) {
if ((t >= communityMitigationStartDay) && (t < communityMitigationEndDay)) {
contactMatrix <- communityMitigationMultiplier * contactMatrix
}
}
isVaccinatingByAge <- (vaccinating > 0) & (populationFractions - V > 0)
effectiveVaccinationMultiplier <- sum(ifelse(isVaccinatingByAge, 1, 0) * vaccinationRateAgeMultiplier)
if (effectiveVaccinationMultiplier > 0) {
vaccinationRateByAge <- vaccinationRate(t) * vaccinationRateAgeMultiplier /
effectiveVaccinationMultiplier
} else {
vaccinationRateByAge <- 0
}
#Flows
forceOfInfection <- beta / populationFractions * (contactMatrix %*% (I + ((1 - VEi) * Iv)))
S_to_E <- S * forceOfInfection
E_to_I <- lambda * E
I_to_R <- gamma * I
Sv_to_Ev <- Sv * (1 - VEs) * forceOfInfection
Ev_to_Iv <- lambda * Ev
Iv_to_Rv <- gamma * Iv
S_to_Sv <- ifelse(isVaccinatingByAge, vaccinationRateByAge * S / (populationFractions - V), 0)
#Derivatives
#Non-vaccinated compartments
dS <- -S_to_E - S_to_Sv
dE <- S_to_E - E_to_I
dI <- E_to_I - I_to_R
dR <- I_to_R
#Vaccinated compartments
dSv <- -Sv_to_Ev + S_to_Sv
dEv <- Sv_to_Ev - Ev_to_Iv
dIv <- Ev_to_Iv - Iv_to_Rv
dRv <- Iv_to_Rv
#Auxiliary vaccinated compartment
dV <- ifelse(isVaccinatingByAge, vaccinationRateByAge, 0)
#Return derivative
return(list(c(dS, dE, dI, dR, dSv, dEv, dIv, dRv, dV, 0 * populationFractions)))
})
}
#This function implements seeding infections in the SEIR+V model
#parameters should define seedInfections, lambda, and gamma
doSeed.SEIRV2 <- function(state, parameters) {
stateList <- reconstructState.SEIRV2(state)
with(append(stateList, parameters), {
seedInfectionsFractions <- seedInfections / population
S <- S - seedInfectionsFractions
E <- E + seedInfectionsFractions / (1 + lambda / gamma)
I <- I + seedInfectionsFractions / (1 + gamma / lambda)
#Return derivative
return(c(S, E, I, R, Sv, Ev, Iv, Rv, V, vaccinating))
})
} | /R/SEIRVModel2.R | no_license | cdcepi/ACIP-SARS-CoV-2-Vaccine-Modeling | R | false | false | 13,381 | r | #' @title SEIR+V Model
#' @description A model for influenza that uses a SEIR framework with age
#' structure and that allows for vaccination
#' @param population Size of population; defaults to 1
#' @param populationFractions Vector of population fractions (all non-negative,
#' sum to 1); defaults to 1, representing a single population group
#' @param contactMatrix A matrix whose (row i, column j) entry denotes the
#' number of potentially infectious contacts a single individual from group j
#' has with individuals from group i each day; defaults to proportional mixing
#' @param R0 Average number of secondary cases from a single infected individual
#' in a completely susceptible population; must be specified
#' @param latentPeriod Latent period in days; must be specified
#' @param infectiousPeriod Infectious period in days; must be specified
#' @param seedInfections Fraction of the population to seed with infections;
#' single fraction or vector of fractions by population group; defaults to 0
#' @param priorImmunity Fraction of the population with prior immunity; single
#' fraction, or vector of fractions by population group; defaults to 0
#' @param useCommunityMitigation Whether or not to use community mitigation
#' implemented by modulation of the contact matrix; defaults to FALSE
#' @param communityMitigationStartDay If using community mitigation, day of the
#' simulation on which to start mitigation; must be specified if applicable
#' @param communityMitigationDuration If using community mitigation, duration of
#' time during which mitigation is in effect; must be specified if applicable
#' @param communityMitigationMultiplier If using community mitigation, the
#' non-negative matrix of multipliers that will be used to modulate the contact
#' matrix by elementwise multiplication; must be specified if applicable
#' @param vaccineAdministrationRatePerDay Vaccine administration rate each day;
#' defaults to 0
#' @param vaccineAvailabilityByDay Vector that contains the amount of vaccine
#' available each day; defaults to 0
#' @param vaccineUptakeMultiplier Vector of multipliers that determines the
#' relative rate at which vaccine is given to each age group; defaults to
#' vaccine being allotted proportionally by population
#' @param VEs Vaccine efficacy: protection for vaccinated susceptible
#' individuals; single fraction or vector of fractions by population group;
#' defaults to 0
#' @param VEi Vaccine efficacy: prevention of transmission from vaccinated
#' infected individuals; single fraction or vector of fractions by population
#' group; defaults to 0
#' @param VEp Vaccine efficacy: prevention of symptomatic illness in
#' infected indivduals; single fraction or vector of fractions by population
#' group; defaults to 0
#' @param vaccineEfficacyDelay Delay in days between administration of dose and
#' onset of protection; defaults to 7
#' @param simulationLength Number of days to simulate after seeding infections;
#' defaults to 240
#' @param seedStartDay Day on which to seed initial infections; defaults to 0
#' @param tolerance Absolute tolerance for numerical integration; defaults to
#' 1e-8
#' @param method Which integration method to use. Defaults to lsoda
#' @return a SEIRVModel object
#' @export
SEIRVModel2 <- function(population, populationFractions, contactMatrix, R0,
latentPeriod, infectiousPeriod, seedInfections, priorImmunity,
useCommunityMitigation, communityMitigationStartDay,
communityMitigationDuration, communityMitigationMultiplier,
vaccineAdministrationRatePerDay, vaccineAvailabilityByDay,
vaccineUptakeMultiplier, VEs, VEi, VEp, vaccineEfficacyDelay,
simulationLength, seedStartDay, tolerance, method) {
#Check inputs #TODO: Add checks for all inputs
specifiedArguments <- names(match.call())[-1]
argumentList <- lapply(specifiedArguments, as.name)
names(argumentList) <- specifiedArguments
parameters <- do.call("checkInputs.SEIRV", argumentList) #Get parameters from checked inputs
initialState <- with(parameters, {
c(S = (1 - priorImmunity) * populationFractions,
E = 0 * populationFractions,
I = 0 * populationFractions,
R = priorImmunity * populationFractions,
Sv = 0 * populationFractions,
Ev = 0 * populationFractions,
Iv = 0 * populationFractions,
Rv = 0 * populationFractions,
V = 0 * populationFractions,
vaccinating = rep(1, length(populationFractions)))
})
rootFunction <- function(t, state, parameters) {
stateList <- reconstructState.SEIRV2(state)
with(append(stateList, parameters), {
return(ifelse(vaccinating > 0, populationFractions - V - tolerance, 1))
})
}
eventFunction <- function(t, state, parameters) {
stateList <- reconstructState.SEIRV2(state)
with(append(stateList, parameters), {
state[getLabels("vaccinating", length(populationFractions))] <-
ifelse(populationFractions - V > tolerance, 1, 0)
return(state)
})
}
rawOutput <- integrateModel(initialState = initialState,
parameters = parameters,
derivativeFunction = getDerivative.SEIRV2,
seedFunction = doSeed.SEIRV2,
rootFunction = rootFunction,
eventFunction = eventFunction,
method = method)
#Build the SEIRVModel object to return
model <- list(parameters = parameters,
rawOutput = rawOutput)
class(model) <- c("SEIRVModel", "SEIRModel")
return(model)
}
#' @title Check SEIR+V inputs
#' @description Checks the input parameters for the SEIR+V model
#' @return List of parameters for the SEIR+V model
#' @keywords internal
checkInputs.SEIRV <- function(population, populationFractions, contactMatrix, R0,
latentPeriod, infectiousPeriod, seedInfections, priorImmunity,
useCommunityMitigation, communityMitigationStartDay,
communityMitigationDuration, communityMitigationMultiplier,
vaccineAdministrationRatePerDay, vaccineAvailabilityByDay,
vaccineUptakeMultiplier, VEs, VEi, VEp, vaccineEfficacyDelay,
simulationLength, seedStartDay, tolerance, method) {
specifiedArguments <- names(match.call())[-1]
argumentList <- lapply(specifiedArguments, as.name)
names(argumentList) <- specifiedArguments
SEIRParameters <- do.call("checkInputs.SEIR", argumentList)
#Update arguments passed to checkInputs.Vaccine using SEIRParameters
argumentList$population <- SEIRParameters$population
argumentList$populationFractions <- SEIRParameters$populationFractions
argumentList$seedStartDay <- SEIRParameters$seedStartDay
argumentList$simulationLength <- SEIRParameters$simulationLength
vaccineParameters <- do.call("checkInputs.Vaccine", argumentList)
#Return the parameters
return(c(SEIRParameters, vaccineParameters))
}
#' @title Check vaccine inputs
#' @description Checks vaccine inputs and computes the vaccine parameters
#' @return List of vaccine parameters
#' @keywords internal
checkInputs.Vaccine <- function(population, populationFractions, seedStartDay, simulationLength,
vaccineAdministrationRatePerDay = 0, vaccineAvailabilityByDay = 0,
vaccineUptakeMultiplier = 1, VEs = 0, VEi = 0, VEp = 0,
vaccineEfficacyDelay = 7, ...) {
#Validate vaccine parameters
#vaccineAdministrationRatePerDay
checkNonNegativeNumber(vaccineAdministrationRatePerDay)
#vaccineAvailabilityByDay
checkNonNegative(vaccineAvailabilityByDay)
#vaccineUptakeMultiplier
checkNonNegative(vaccineUptakeMultiplier)
checkDimensionsMatch(vaccineUptakeMultiplier, populationFractions)
#VEs
checkBetween0and1(VEs)
checkDimensionsMatch(VEs, populationFractions)
#VEi
checkBetween0and1(VEi)
checkDimensionsMatch(VEi, populationFractions)
#VEp
checkBetween0and1(VEp)
checkDimensionsMatch(VEp, populationFractions)
#vaccineEfficacyDelay
checkNonNegativeNumber(vaccineEfficacyDelay)
#Compute the daily vaccination rate
totalSimulationLength <- seedStartDay + simulationLength
vaccinationRateByDay <- rep(0, totalSimulationLength)
currentVaccineAvailability <- 0
for (i in 1:totalSimulationLength) {
if (i <= length(vaccineAvailabilityByDay)){
currentVaccineAvailability <- currentVaccineAvailability + vaccineAvailabilityByDay[i]
}
vaccinationRateByDay[i] <- min(vaccineAdministrationRatePerDay, currentVaccineAvailability)
currentVaccineAvailability <- currentVaccineAvailability - vaccinationRateByDay[i]
}
vaccinationRateByDay <- vaccinationRateByDay / population #Normalize
#Define vaccination rate function
vaccinationRate <- function(t) {
if ((t < vaccineEfficacyDelay) || (t >= totalSimulationLength + vaccineEfficacyDelay)) {
return(0)
} else {
return(vaccinationRateByDay[floor(t - vaccineEfficacyDelay + 1)])
}
}
#Compute the vaccination rate age multiplier
vaccinationRateAgeMultiplier <- vaccineUptakeMultiplier * populationFractions
totalMultiplier <- sum(vaccinationRateAgeMultiplier)
if (totalMultiplier > 0) {
vaccinationRateAgeMultiplier <- vaccinationRateAgeMultiplier / totalMultiplier
} else {
warning("vaccineUptakeMultiplier prevents vaccination from occurring.", call. = FALSE)
}
#Return the parameters
return(list(vaccinationRate = vaccinationRate, vaccinationRateAgeMultiplier = vaccinationRateAgeMultiplier,
VEs = VEs, VEi = VEi, VEp = VEp, vaccineEfficacyDelay = vaccineEfficacyDelay))
}
#This is a utility function that reconstructs the model state as a list so that equations can refer to compartments by name
reconstructState.SEIRV2 <- function(state) {
numberOfClasses <- length(state) / 10 #Each of the 10 classes are vectors of the same length
S <- state[ 1 : numberOfClasses ]
E <- state[ (numberOfClasses + 1):(2 * numberOfClasses)]
I <- state[(2 * numberOfClasses + 1):(3 * numberOfClasses)]
R <- state[(3 * numberOfClasses + 1):(4 * numberOfClasses)]
Sv <- state[(4 * numberOfClasses + 1):(5 * numberOfClasses)]
Ev <- state[(5 * numberOfClasses + 1):(6 * numberOfClasses)]
Iv <- state[(6 * numberOfClasses + 1):(7 * numberOfClasses)]
Rv <- state[(7 * numberOfClasses + 1):(8 * numberOfClasses)]
V <- state[(8 * numberOfClasses + 1):(9 * numberOfClasses)]
vaccinating <- state[(9 * numberOfClasses + 1):(10 * numberOfClasses)]
return(as.list(environment()))
}
#This function implements the multivariate derivative of the SEIR+V model
#parameters should define populationFractions, contactMatrix, beta, lambda, gamma,
#VEs, VEi, and the function vaccinationRate(t)
#Note that the total population is normalized to be 1
getDerivative.SEIRV2 <- function(t, state, parameters) {
stateList <- reconstructState.SEIRV2(state)
with(append(stateList, parameters), {
if (useCommunityMitigation) {
if ((t >= communityMitigationStartDay) && (t < communityMitigationEndDay)) {
contactMatrix <- communityMitigationMultiplier * contactMatrix
}
}
isVaccinatingByAge <- (vaccinating > 0) & (populationFractions - V > 0)
effectiveVaccinationMultiplier <- sum(ifelse(isVaccinatingByAge, 1, 0) * vaccinationRateAgeMultiplier)
if (effectiveVaccinationMultiplier > 0) {
vaccinationRateByAge <- vaccinationRate(t) * vaccinationRateAgeMultiplier /
effectiveVaccinationMultiplier
} else {
vaccinationRateByAge <- 0
}
#Flows
forceOfInfection <- beta / populationFractions * (contactMatrix %*% (I + ((1 - VEi) * Iv)))
S_to_E <- S * forceOfInfection
E_to_I <- lambda * E
I_to_R <- gamma * I
Sv_to_Ev <- Sv * (1 - VEs) * forceOfInfection
Ev_to_Iv <- lambda * Ev
Iv_to_Rv <- gamma * Iv
S_to_Sv <- ifelse(isVaccinatingByAge, vaccinationRateByAge * S / (populationFractions - V), 0)
#Derivatives
#Non-vaccinated compartments
dS <- -S_to_E - S_to_Sv
dE <- S_to_E - E_to_I
dI <- E_to_I - I_to_R
dR <- I_to_R
#Vaccinated compartments
dSv <- -Sv_to_Ev + S_to_Sv
dEv <- Sv_to_Ev - Ev_to_Iv
dIv <- Ev_to_Iv - Iv_to_Rv
dRv <- Iv_to_Rv
#Auxiliary vaccinated compartment
dV <- ifelse(isVaccinatingByAge, vaccinationRateByAge, 0)
#Return derivative
return(list(c(dS, dE, dI, dR, dSv, dEv, dIv, dRv, dV, 0 * populationFractions)))
})
}
#This function implements seeding infections in the SEIR+V model
#parameters should define seedInfections, lambda, and gamma
doSeed.SEIRV2 <- function(state, parameters) {
stateList <- reconstructState.SEIRV2(state)
with(append(stateList, parameters), {
seedInfectionsFractions <- seedInfections / population
S <- S - seedInfectionsFractions
E <- E + seedInfectionsFractions / (1 + lambda / gamma)
I <- I + seedInfectionsFractions / (1 + gamma / lambda)
#Return derivative
return(c(S, E, I, R, Sv, Ev, Iv, Rv, V, vaccinating))
})
} |
library(ggplot2)
#============================================================
#交换x轴和y轴
#============================================================
#使用coord_flip()来翻转坐标轴
ggplot(PlantGrowth, aes(x=group, y=weight)) + geom_boxplot()
ggplot(PlantGrowth, aes(x=group, y=weight)) + geom_boxplot() + coord_flip()
#如果x变量是一个因子型变量,
#则排列顺序可以通过使用scale_x_discrete()和参数limits=rev(levels(...))反转
ggplot(PlantGrowth, aes(x=group, y=weight)) + geom_boxplot() +
coord_flip() +
scale_x_discrete(limits=rev(levels(PlantGrowth$group)))
#============================================================
#设置连续性坐标轴的值域
#============================================================
#使用xlim()和ylim()来设置一条连续型坐标轴的最小值和最大值
p <- ggplot(PlantGrowth, aes(x=group, y=weight)) + geom_boxplot()
p
p + ylim(0, max(PlantGrowth$weight))
#ylim()来设定范围是通过scale_y_continuous()来设定范围的简便写法
#对于xlim()和scale_x_continuous()同理
#ylim(0,10)
#等价于
#scale_y_continuous(limits=c(0,10))
#ylim()和scale_y_continuous()同时使用,只有命令中的后一条会生效
p + ylim(0,10) + scale_y_continuous(breaks = c(0,5,10))
p + scale_y_continuous(breaks = c(0,5,10)) + ylim(0,10)
#舍弃ylim(),使用scale_y_continuous(),并设定limits和breaks
p + scale_y_continuous(limits = c(0,10), breaks = c(0,5,10))
#ggplot2中有两种设置坐标轴值域的方法。
#第一种方法: 修改标度
#超出范围的数据不仅不会被展示,而且会被完全移除
#第二种方法: 应用一个坐标转换
#数据不会被修建,只是将数据放大或缩小到指定的范围
p + scale_y_continuous(limits = c(5,6.5)) #与使用ylim()相同
p + coord_cartesian(ylim = c(5,6.5))
p + scale_y_continuous(limits = c(0,6.5))
#使用expand_limits()单向扩展值域
p + expand_limits(y=0)
#============================================================
#反转一条连续型坐标轴
#============================================================
#使用scale_y_reverse或scale_x_reverse
#坐标轴的方向也可通过指定反序的范围来反转,先写最大值,再写最小值
ggplot(PlantGrowth, aes(x=group, y=weight)) + geom_boxplot() + scale_y_reverse()
#通过指定反序的范围产生类似的效果
ggplot(PlantGrowth, aes(x=group, y=weight)) + geom_boxplot() + ylim(6.5, 3.5)
ggplot(PlantGrowth, aes(x=group, y=weight)) + geom_boxplot() +
scale_y_reverse(limits=c(8,0))
#=============================================================
#修改类被行坐标轴上项目的顺序
#=============================================================
#对于类别型(离散型)坐标轴上来说,会有一个因子型变量映射到它上面
#坐标轴上项目的顺序可以设定scale_x_discrete()或scale_y_discrete()中的参数limits来修改
p <- ggplot(PlantGrowth, aes(x=group, y=weight)) + geom_boxplot()
p + scale_x_discrete(limits=c("trt1","ctrl","trt2"))
p + scale_x_discrete(limits=c("trt1","ctrl"))
p + scale_x_discrete(limits=rev(levels(PlantGrowth$group)))
#=============================================================
#设置x轴和y轴的缩放比例
#=============================================================
#使用coord_fixed(),以下代码将得到x轴和y轴之间1:1的缩放结果
library(gcookbook) #为了使用数据集
sp <- ggplot(marathon, aes(x=Half, y=Full)) + geom_point()
sp + coord_fixed()
#通过在scale_y_continuous()和scale_x_continuous()中调整参数breaks
#将刻度间距设为相同
sp + coord_fixed() +
scale_y_continuous(breaks = seq(0, 420, 30)) +
scale_x_continuous(breaks = seq(0, 420, 30))
#设置参数ratio使两个坐标轴之间指定其他的固定比例而非相同的比例
#在x轴上添加双倍的刻度线
sp + coord_fixed(ratio =1/2) +
scale_y_continuous(breaks = seq(0, 420, 30)) +
scale_x_continuous(breaks = seq(0, 420, 30))
#================================================================
#设置刻度线的位置
#================================================================
#如果需要改变刻度线在坐标轴的位置,设置breaks参数即可
ggplot(PlantGrowth, aes(x=group, y=weight)) + geom_boxplot()
ggplot(PlantGrowth, aes(x=group, y=weight)) + geom_boxplot() +
scale_y_continuous(breaks = c(4, 4.25, 4.5, 5, 6, 8))
#也可以使用seq()函数或运算符,来生成刻度线的位置向量
seq(4, 7, by=.5)
#如果坐标轴使离散型而不是连续型,默认会为每个项目生成一条刻度线
#对于离散型坐标轴,可以通过指定limits()来修改项目的顺序或移除项目
#设置limits重排序或移除项目,设置breaks控制那些项目拥有标签
ggplot(PlantGrowth, aes(x=group, y=weight)) + geom_boxplot() +
scale_x_discrete(limits=c("trt2", "ctrl"),breaks = "ctrl")
#================================================================
#移除刻度线和标签
#================================================================
#移除y轴刻度标签,使用theme(axis.text.y = element_blank())
#移除x轴刻度标签,使用theme(axis.text.x = element_blank())
p <- ggplot(PlantGrowth, aes(x=group, y= weight)) + geom_boxplot()
p + theme(axis.text.y = element_blank())
#移除刻度线,使用theme(axis.ticks = element_blank())
p + theme(axis.ticks = element_blank(), axis.text.y = element_blank())
#要移除刻度线、刻度标签和网格线, 将break设置为NULL即可
p + scale_y_continuous(breaks = NULL)
#对于连续性坐标轴,ggplot()通常会在每个breaks值得位置放置刻度线、刻度标签和主网格线
#对于离散型坐标轴,这些元素则出现在每个limits值的位置上。
#==============================================================
#修改刻度标签的文本
#==============================================================
#在标度中为breaks和labels赋值即可
library(gcookbook) #为了使用数据集
hwp <- ggplot(heightweight,aes(x=ageYear, y=heightIn)) +
geom_point()
hwp
hwp + scale_y_continuous(breaks = c(50,56,60,66,72),
labels = c("Tiny","Really\nshort","short",
"Medium", "Tallish"))
#让数据以某种格式存储-----
#函数将英寸数值转换为英尺加英寸的格式
footinch_formatter <- function(x){
foot <- floor(x/12)
inch <- x %% 12
return(paste(foot, "'", inch, "\"",sep = ""))
}
footinch_formatter(56:64)
#参数labels把函数传递给标度
hwp + scale_y_continuous(labels = footinch_formatter)
hwp + scale_y_continuous(breaks = seq(48,72,4),
labels = footinch_formatter)
#=================================================================
#修改刻度标签的外观
#=================================================================
bp <- ggplot(PlantGrowth, aes(x=group, y=weight)) + geom_boxplot() +
scale_x_discrete(breaks=c("ctrl", "trt1", "trt2"),
labels=c("Control", "Treatment 1", "Treatment 2"))
bp
#将文本逆时针旋转90度
bp + theme(axis.text.x = element_text(angle = 90, hjust = 1, vjust = 0.5))
#将文本逆时针旋转30度
bp + theme(axis.text.x = element_text(angle = 30, hjust = 1, vjust = 0.5))
#hjust设置横向对齐、
#vjust设置纵向对齐
#除了旋转意外,其他的文本属性,如大小、样式(粗体、斜体、常规)
#和字体族(Times或Helvetica)可以使用element_text()进行设置
bp + theme(axis.text.x = element_text(family = "Times", face = "italic",
colour = "darkred",
size = rel(0.9))) #当前主题基础字体大小的0.9倍
#==================================================================
#修改坐标轴标签的文本
#==================================================================
#使用xlab()或ylab()来修改坐标轴标签的文本
library(gcookbook) #为了使用数据集
hwp <- ggplot(heightweight, aes(x=ageYear, y=heightIn, colour=sex)) +
geom_point()
#使用默认的坐标轴标签
hwp
#设置坐标轴标签
hwp + xlab("Age in years") + ylab("Height in inches")
#除了使用xlab()和ylab(),也可以使用labs()
hwp + labs(x="Age in years", y="Height in inches")
#另一种设置坐标轴标签方法是在标度中指定
hwp + scale_x_continuous(name = "Age in years")
#这种方法同样适用于其他的坐标轴标度
#如: scale_y_continuous()、scale_x_discrete()等
#还可以使用\n来添加换行
hwp + scale_x_continuous(name = "Age\n(Years)")
#=============================================================
#移除坐标轴标签
#=============================================================
#对于x轴标签,使用theme(axis.title.x=element_blank())
#对于y轴标签,使用theme(axis.title.y=element_blank())
p <- ggplot(PlantGrowth, aes(x=group, y=weight)) + geom_boxplot()
p + theme(axis.title.x = element_blank())
#移除坐标轴标签的另一种方法是将其设为一个空字符串
p + xlab("")
#使用theme(axis.title.x=element_blank()), x、y标度的名称是不会改变的,
#只是这样不会显示文本而且不会为其留出空间
#设置标签为"",标度的名称就改变了,实际上显示了空白的文本
#===============================================================
#修改坐标轴标签的外观
#===============================================================
#要修改x轴标签的外观,使用axis.title.x即可
library(gcookbook) #为了使用数据集
hwp <- ggplot(heightweight, aes(x=ageYear, y=heightIn)) + geom_point()
hwp + theme(axis.title.x = element_text(face = "italic", colour = "darkred", size = 14))
#标签中\n表示另起一行
hwp + ylab("Height\n(inches)") +
theme(axis.title.y = element_text(angle = 0, face = "italic", size = 14))
#当调用element_text()时,默认的角度时0,
#所以如果设置了axis.title.y但没有指定这个角度,它将以文本的顶部指向上方的朝向显示
hwp + ylab("Height\n(inches)") +
theme(axis.title.y = element_text(angle = 90, face = "italic", size = 14))
#===============================================================
#沿坐标轴显示直线
#===============================================================
#使用主题设置中的axis.line
library(gcookbook)
p <- ggplot(heightweight, aes(x=ageYear, y=heightIn)) + geom_point()
p + theme(axis.line = element_line(colour = "black"))
#增加重置参数panel.border
p + theme_bw() +
theme(panel.border = element_blank(),
axis.line = element_line(colour = "black"))
#使末端的边界线完全重叠,设置linneed="square":-----
#对于较粗的线条,只有一半重叠
p + theme_bw() +
theme(panel.border = element_blank(),
axis.line = element_line(colour = "black",size = 4))
#完全重叠
p + theme_bw() +
theme(panel.border = element_blank(),
axis.line = element_line(colour = "black", size = 4, lineend = "square"))
#==============================================================
#使用对数坐标轴
#====================================================================
#使用scale_x_log10()和或scale_y_log10()
library(MASS) #为了使用数据集
#基本图形
p <- ggplot(Animals, aes(x=body, y=brain, label=rownames(Animals))) +
geom_text(size=3)
p
#使用对数x标度和对数y标度
p + scale_x_log10() + scale_y_log10()
p + scale_x_log10(breaks=10^(-1:5)) + scale_y_log10(10^(0:3))
#要让刻度标签转而使用指数记数法,只要使用scales包中的函数trans_format()
library(scales)
p + scale_x_log10(breaks=10^(-1:5),
labels=trans_format("log10", math_format(10^.x))) +
scale_y_log10(breaks=10^(0:3),
labels=trans_format("log10",math_format(10^.x)))
#使用对数坐标轴的另一种方法是:
#在将数据映射到x和y坐标之前,先对其进行变换,从技术上,坐标轴仍然是线性的--它表示对数变换后的数值
ggplot(Animals, aes(x=log10(body), y=log10(brain), label=rownames(Animals))) +
geom_text(size=3)
#对x使用自然对数变换,对y使用log2变换
p + scale_x_continuous(trans = log_trans(),
breaks = trans_breaks("log", function(x) exp(x)),
labels = trans_format("log", math_format(e^.x))) +
scale_y_continuous(trans = log2_trans(),
breaks = trans_breaks("log2",function(x) 2^x),
labels = trans_format("log2", math_format(2^.x)))
#分别使用线性和对数的y轴来展示苹果公司的股价变化情况
library(gcookbook) #为了使用数据集
ggplot(aapl, aes(x=date,y=adj_price)) + geom_line()
ggplot(aapl, aes(x=date,y=adj_price)) + geom_line() +
scale_y_log10(breaks=c(2,10,50,250))
#==============================================================
#为对数坐标轴添加刻度
#==============================================================
#使用annotation_logticks()
library(MASS) #为了使用数据集
library(scales) #为了使用trans和format相关函数
ggplot(Animals, aes(x=body, y=brain, label=rownames(Animals))) +
geom_text(size=3) +
annotation_logticks() +
scale_x_log10(breaks=10^(-1:5),
labels=trans_format("log10", math_format(10^.x))) +
scale_y_log10(breaks=10^(0:3),
labels=trans_format("log10",math_format(10^.x)))
ggplot(Animals, aes(x=body, y=brain, label=rownames(Animals))) +
geom_text(size=3) +
annotation_logticks() +
scale_x_log10(breaks=trans_breaks("log10", function(x) 10^x),
labels=trans_format("log10", math_format(10^.x)),
minor_breaks = log10(5) + -2:5) +
scale_y_log10(breaks=trans_breaks("log10", function(x) 10^x),
labels=trans_format("log10",math_format(10^.x)),
minor_breaks = log10(5) + -1:3) +
coord_fixed() +
theme_bw()
#========================================================
#绘制环状图形
#========================================================
#使用coord_polar()
library(gcookbook) #为了使用数据
head(wind)
#使用geom_histogram()对每个SpeedCat和DirCat的类别绘制样本数量的计数值。
#将binwidth设置为15以使直方图的origin开始与-7.5的位置,
#这样每个扇形就会居中于0、15、30等位置。
ggplot(wind, aes(x=DirCat,fill=SpeedCat)) +
geom_histogram(binwidth = 15, drgin=-7.5) +
coord_polar() +
scale_x_continuous(limits = c(0,360))
#可以通过反转图例、使用不同的调色板、添加外框线以及将分割点设置为某些更熟悉的值的方式,让图形稍微美观一些
ggplot(wind, aes(x=DirCat,fill=SpeedCat)) +
geom_histogram(binwidth = 15, drgin=-7.5,colour="black",size=0.25) +
coord_polar() +
scale_x_continuous(limits = c(0,360),breaks = seq(0,360,by=45),
minor_breaks = seq(0,360,by=15))+
scale_fill_brewer()
#在使用一个连续性的x是,数据中的最小值和最大值是重合的,有时需要设置对应的界限
#将mdeaths的时间序列数据放入一个数据框
md <- data.frame(deaths=as.numeric(mdeaths),
month = as.numeric(cycle(mdeaths)))
#计算每个月的平均死亡数量
library(plyr)
md <- ddply(md, "month",summarise,deaths=mean(deaths))
md
#绘制基本图形
p <- ggplot(md, aes(x=month,y=deaths)) + geom_line() +
scale_x_continuous(breaks = 1:12)
#使用coord_polar
p + coord_polar()
#使用coord_polar并将y(r)的下界设置为0
p + coord_polar() + ylim(0, max(md$deaths))
p + coord_polar() + ylim(0, max(md$deaths)) + xlim(0,12)
#通过添加一个值与12的值相同的0来连接曲线
mdx <- md[md$month==12,]
mdx$month <- 0
mdnew <- rbind(mdx,md)
#通过使用%+% 绘制与之前相同的图形,只是使用的数据不同
p %+% mdnew + coord_polar() + ylim(0,max(md$deaths))
#================================================================
#在坐标轴上使用日期
#================================================================
#将一列类为Date的变量映射到x轴或y轴
#观察数据结构
str(economics)
ggplot(economics, aes(x=date, y=psavert)) + geom_line()
#ggplot2可以处理两类时间相关的对象,日期对象(Date)和日期时间对象(POSIXt)
#Date对象表示的是日期,分别率为一天
#POSIXt对象是时刻,拥有精确到秒的小数部分的分辨率
#去economics的一个子集
econ <- subset(economics, date >= as.Date("1992-05-01") &
date < as.Date("1993-06-01"))
#基本图形---不指定分割点
p <- ggplot(econ, aes(x=date, y=psavert)) + geom_line()
p
#指定一个日期向量为分割点
datebreaks <- seq(as.Date("1992-06-01"), as.Date("1993-06-01"), by="2 month")
#使用分割点并旋转文本标签
p + scale_x_date(breaks = datebreaks) +
theme(axis.text.x = element_text(angle = 30,hjust = 1))
#注意:这里分割点(标签)的格式发生了改变,可以通过使用scales包中的date_format()函数来指定格式。
library(scales)
p + scale_x_date(breaks = datebreaks, labels = date_format("%Y %b")) +
theme(axis.text.x = element_text(angle = 30,hjust = 1))
#=============================================================
#在坐标轴上使用相对时间
#=============================================================
#时间值通常以数字的形式存储
#当将一个值映射到x轴或y轴上,并使用一个格式刷来生成合适的坐标轴标签
#转换时间序列对象WWWusage为数据框
www <- data.frame(minute = as.numeric(time(WWWusage)),
users = as.numeric(WWWusage))
#定义一个格式刷函数--可将以分钟表示的时间转换为字符串
timeHM_formatter <- function(x){
h <- floor(x/60)
m <- floor(x %% 60)
lab <- sprintf("%d:%02d", h, m) #将字符串格式化为HH:MM(时:分)的格式
return(lab)
}
#默认的x轴
ggplot(www, aes(x=minute,y=users)) + geom_line()
#使用格式化后的时间
ggplot(www, aes(x=minute,y=users)) + geom_line() +
scale_x_continuous(name = "time", breaks = seq(0,100, by=10),
labels = timeHM_formatter)
| /codes/chapter8.R | no_license | xiaowei3223/Graphic_data_with_R | R | false | false | 18,099 | r | library(ggplot2)
#============================================================
#交换x轴和y轴
#============================================================
#使用coord_flip()来翻转坐标轴
ggplot(PlantGrowth, aes(x=group, y=weight)) + geom_boxplot()
ggplot(PlantGrowth, aes(x=group, y=weight)) + geom_boxplot() + coord_flip()
#如果x变量是一个因子型变量,
#则排列顺序可以通过使用scale_x_discrete()和参数limits=rev(levels(...))反转
ggplot(PlantGrowth, aes(x=group, y=weight)) + geom_boxplot() +
coord_flip() +
scale_x_discrete(limits=rev(levels(PlantGrowth$group)))
#============================================================
#设置连续性坐标轴的值域
#============================================================
#使用xlim()和ylim()来设置一条连续型坐标轴的最小值和最大值
p <- ggplot(PlantGrowth, aes(x=group, y=weight)) + geom_boxplot()
p
p + ylim(0, max(PlantGrowth$weight))
#ylim()来设定范围是通过scale_y_continuous()来设定范围的简便写法
#对于xlim()和scale_x_continuous()同理
#ylim(0,10)
#等价于
#scale_y_continuous(limits=c(0,10))
#ylim()和scale_y_continuous()同时使用,只有命令中的后一条会生效
p + ylim(0,10) + scale_y_continuous(breaks = c(0,5,10))
p + scale_y_continuous(breaks = c(0,5,10)) + ylim(0,10)
#舍弃ylim(),使用scale_y_continuous(),并设定limits和breaks
p + scale_y_continuous(limits = c(0,10), breaks = c(0,5,10))
#ggplot2中有两种设置坐标轴值域的方法。
#第一种方法: 修改标度
#超出范围的数据不仅不会被展示,而且会被完全移除
#第二种方法: 应用一个坐标转换
#数据不会被修建,只是将数据放大或缩小到指定的范围
p + scale_y_continuous(limits = c(5,6.5)) #与使用ylim()相同
p + coord_cartesian(ylim = c(5,6.5))
p + scale_y_continuous(limits = c(0,6.5))
#使用expand_limits()单向扩展值域
p + expand_limits(y=0)
#============================================================
#反转一条连续型坐标轴
#============================================================
#使用scale_y_reverse或scale_x_reverse
#坐标轴的方向也可通过指定反序的范围来反转,先写最大值,再写最小值
ggplot(PlantGrowth, aes(x=group, y=weight)) + geom_boxplot() + scale_y_reverse()
#通过指定反序的范围产生类似的效果
ggplot(PlantGrowth, aes(x=group, y=weight)) + geom_boxplot() + ylim(6.5, 3.5)
ggplot(PlantGrowth, aes(x=group, y=weight)) + geom_boxplot() +
scale_y_reverse(limits=c(8,0))
#=============================================================
#修改类被行坐标轴上项目的顺序
#=============================================================
#对于类别型(离散型)坐标轴上来说,会有一个因子型变量映射到它上面
#坐标轴上项目的顺序可以设定scale_x_discrete()或scale_y_discrete()中的参数limits来修改
p <- ggplot(PlantGrowth, aes(x=group, y=weight)) + geom_boxplot()
p + scale_x_discrete(limits=c("trt1","ctrl","trt2"))
p + scale_x_discrete(limits=c("trt1","ctrl"))
p + scale_x_discrete(limits=rev(levels(PlantGrowth$group)))
#=============================================================
#设置x轴和y轴的缩放比例
#=============================================================
#使用coord_fixed(),以下代码将得到x轴和y轴之间1:1的缩放结果
library(gcookbook) #为了使用数据集
sp <- ggplot(marathon, aes(x=Half, y=Full)) + geom_point()
sp + coord_fixed()
#通过在scale_y_continuous()和scale_x_continuous()中调整参数breaks
#将刻度间距设为相同
sp + coord_fixed() +
scale_y_continuous(breaks = seq(0, 420, 30)) +
scale_x_continuous(breaks = seq(0, 420, 30))
#设置参数ratio使两个坐标轴之间指定其他的固定比例而非相同的比例
#在x轴上添加双倍的刻度线
sp + coord_fixed(ratio =1/2) +
scale_y_continuous(breaks = seq(0, 420, 30)) +
scale_x_continuous(breaks = seq(0, 420, 30))
#================================================================
#设置刻度线的位置
#================================================================
#如果需要改变刻度线在坐标轴的位置,设置breaks参数即可
ggplot(PlantGrowth, aes(x=group, y=weight)) + geom_boxplot()
ggplot(PlantGrowth, aes(x=group, y=weight)) + geom_boxplot() +
scale_y_continuous(breaks = c(4, 4.25, 4.5, 5, 6, 8))
#也可以使用seq()函数或运算符,来生成刻度线的位置向量
seq(4, 7, by=.5)
#如果坐标轴使离散型而不是连续型,默认会为每个项目生成一条刻度线
#对于离散型坐标轴,可以通过指定limits()来修改项目的顺序或移除项目
#设置limits重排序或移除项目,设置breaks控制那些项目拥有标签
ggplot(PlantGrowth, aes(x=group, y=weight)) + geom_boxplot() +
scale_x_discrete(limits=c("trt2", "ctrl"),breaks = "ctrl")
#================================================================
#移除刻度线和标签
#================================================================
#移除y轴刻度标签,使用theme(axis.text.y = element_blank())
#移除x轴刻度标签,使用theme(axis.text.x = element_blank())
p <- ggplot(PlantGrowth, aes(x=group, y= weight)) + geom_boxplot()
p + theme(axis.text.y = element_blank())
#移除刻度线,使用theme(axis.ticks = element_blank())
p + theme(axis.ticks = element_blank(), axis.text.y = element_blank())
#要移除刻度线、刻度标签和网格线, 将break设置为NULL即可
p + scale_y_continuous(breaks = NULL)
#对于连续性坐标轴,ggplot()通常会在每个breaks值得位置放置刻度线、刻度标签和主网格线
#对于离散型坐标轴,这些元素则出现在每个limits值的位置上。
#==============================================================
#修改刻度标签的文本
#==============================================================
#在标度中为breaks和labels赋值即可
library(gcookbook) #为了使用数据集
hwp <- ggplot(heightweight,aes(x=ageYear, y=heightIn)) +
geom_point()
hwp
hwp + scale_y_continuous(breaks = c(50,56,60,66,72),
labels = c("Tiny","Really\nshort","short",
"Medium", "Tallish"))
#让数据以某种格式存储-----
#函数将英寸数值转换为英尺加英寸的格式
footinch_formatter <- function(x){
foot <- floor(x/12)
inch <- x %% 12
return(paste(foot, "'", inch, "\"",sep = ""))
}
footinch_formatter(56:64)
#参数labels把函数传递给标度
hwp + scale_y_continuous(labels = footinch_formatter)
hwp + scale_y_continuous(breaks = seq(48,72,4),
labels = footinch_formatter)
#=================================================================
#修改刻度标签的外观
#=================================================================
bp <- ggplot(PlantGrowth, aes(x=group, y=weight)) + geom_boxplot() +
scale_x_discrete(breaks=c("ctrl", "trt1", "trt2"),
labels=c("Control", "Treatment 1", "Treatment 2"))
bp
#将文本逆时针旋转90度
bp + theme(axis.text.x = element_text(angle = 90, hjust = 1, vjust = 0.5))
#将文本逆时针旋转30度
bp + theme(axis.text.x = element_text(angle = 30, hjust = 1, vjust = 0.5))
#hjust设置横向对齐、
#vjust设置纵向对齐
#除了旋转意外,其他的文本属性,如大小、样式(粗体、斜体、常规)
#和字体族(Times或Helvetica)可以使用element_text()进行设置
bp + theme(axis.text.x = element_text(family = "Times", face = "italic",
colour = "darkred",
size = rel(0.9))) #当前主题基础字体大小的0.9倍
#==================================================================
#修改坐标轴标签的文本
#==================================================================
#使用xlab()或ylab()来修改坐标轴标签的文本
library(gcookbook) #为了使用数据集
hwp <- ggplot(heightweight, aes(x=ageYear, y=heightIn, colour=sex)) +
geom_point()
#使用默认的坐标轴标签
hwp
#设置坐标轴标签
hwp + xlab("Age in years") + ylab("Height in inches")
#除了使用xlab()和ylab(),也可以使用labs()
hwp + labs(x="Age in years", y="Height in inches")
#另一种设置坐标轴标签方法是在标度中指定
hwp + scale_x_continuous(name = "Age in years")
#这种方法同样适用于其他的坐标轴标度
#如: scale_y_continuous()、scale_x_discrete()等
#还可以使用\n来添加换行
hwp + scale_x_continuous(name = "Age\n(Years)")
#=============================================================
#移除坐标轴标签
#=============================================================
#对于x轴标签,使用theme(axis.title.x=element_blank())
#对于y轴标签,使用theme(axis.title.y=element_blank())
p <- ggplot(PlantGrowth, aes(x=group, y=weight)) + geom_boxplot()
p + theme(axis.title.x = element_blank())
#移除坐标轴标签的另一种方法是将其设为一个空字符串
p + xlab("")
#使用theme(axis.title.x=element_blank()), x、y标度的名称是不会改变的,
#只是这样不会显示文本而且不会为其留出空间
#设置标签为"",标度的名称就改变了,实际上显示了空白的文本
#===============================================================
#修改坐标轴标签的外观
#===============================================================
#要修改x轴标签的外观,使用axis.title.x即可
library(gcookbook) #为了使用数据集
hwp <- ggplot(heightweight, aes(x=ageYear, y=heightIn)) + geom_point()
hwp + theme(axis.title.x = element_text(face = "italic", colour = "darkred", size = 14))
#标签中\n表示另起一行
hwp + ylab("Height\n(inches)") +
theme(axis.title.y = element_text(angle = 0, face = "italic", size = 14))
#当调用element_text()时,默认的角度时0,
#所以如果设置了axis.title.y但没有指定这个角度,它将以文本的顶部指向上方的朝向显示
hwp + ylab("Height\n(inches)") +
theme(axis.title.y = element_text(angle = 90, face = "italic", size = 14))
#===============================================================
#沿坐标轴显示直线
#===============================================================
#使用主题设置中的axis.line
library(gcookbook)
p <- ggplot(heightweight, aes(x=ageYear, y=heightIn)) + geom_point()
p + theme(axis.line = element_line(colour = "black"))
#增加重置参数panel.border
p + theme_bw() +
theme(panel.border = element_blank(),
axis.line = element_line(colour = "black"))
#使末端的边界线完全重叠,设置linneed="square":-----
#对于较粗的线条,只有一半重叠
p + theme_bw() +
theme(panel.border = element_blank(),
axis.line = element_line(colour = "black",size = 4))
#完全重叠
p + theme_bw() +
theme(panel.border = element_blank(),
axis.line = element_line(colour = "black", size = 4, lineend = "square"))
#==============================================================
#使用对数坐标轴
#====================================================================
#使用scale_x_log10()和或scale_y_log10()
library(MASS) #为了使用数据集
#基本图形
p <- ggplot(Animals, aes(x=body, y=brain, label=rownames(Animals))) +
geom_text(size=3)
p
#使用对数x标度和对数y标度
p + scale_x_log10() + scale_y_log10()
p + scale_x_log10(breaks=10^(-1:5)) + scale_y_log10(10^(0:3))
#要让刻度标签转而使用指数记数法,只要使用scales包中的函数trans_format()
library(scales)
p + scale_x_log10(breaks=10^(-1:5),
labels=trans_format("log10", math_format(10^.x))) +
scale_y_log10(breaks=10^(0:3),
labels=trans_format("log10",math_format(10^.x)))
#使用对数坐标轴的另一种方法是:
#在将数据映射到x和y坐标之前,先对其进行变换,从技术上,坐标轴仍然是线性的--它表示对数变换后的数值
ggplot(Animals, aes(x=log10(body), y=log10(brain), label=rownames(Animals))) +
geom_text(size=3)
#对x使用自然对数变换,对y使用log2变换
p + scale_x_continuous(trans = log_trans(),
breaks = trans_breaks("log", function(x) exp(x)),
labels = trans_format("log", math_format(e^.x))) +
scale_y_continuous(trans = log2_trans(),
breaks = trans_breaks("log2",function(x) 2^x),
labels = trans_format("log2", math_format(2^.x)))
#分别使用线性和对数的y轴来展示苹果公司的股价变化情况
library(gcookbook) #为了使用数据集
ggplot(aapl, aes(x=date,y=adj_price)) + geom_line()
ggplot(aapl, aes(x=date,y=adj_price)) + geom_line() +
scale_y_log10(breaks=c(2,10,50,250))
#==============================================================
#为对数坐标轴添加刻度
#==============================================================
#使用annotation_logticks()
library(MASS) #为了使用数据集
library(scales) #为了使用trans和format相关函数
ggplot(Animals, aes(x=body, y=brain, label=rownames(Animals))) +
geom_text(size=3) +
annotation_logticks() +
scale_x_log10(breaks=10^(-1:5),
labels=trans_format("log10", math_format(10^.x))) +
scale_y_log10(breaks=10^(0:3),
labels=trans_format("log10",math_format(10^.x)))
ggplot(Animals, aes(x=body, y=brain, label=rownames(Animals))) +
geom_text(size=3) +
annotation_logticks() +
scale_x_log10(breaks=trans_breaks("log10", function(x) 10^x),
labels=trans_format("log10", math_format(10^.x)),
minor_breaks = log10(5) + -2:5) +
scale_y_log10(breaks=trans_breaks("log10", function(x) 10^x),
labels=trans_format("log10",math_format(10^.x)),
minor_breaks = log10(5) + -1:3) +
coord_fixed() +
theme_bw()
#========================================================
#绘制环状图形
#========================================================
#使用coord_polar()
library(gcookbook) #为了使用数据
head(wind)
#使用geom_histogram()对每个SpeedCat和DirCat的类别绘制样本数量的计数值。
#将binwidth设置为15以使直方图的origin开始与-7.5的位置,
#这样每个扇形就会居中于0、15、30等位置。
ggplot(wind, aes(x=DirCat,fill=SpeedCat)) +
geom_histogram(binwidth = 15, drgin=-7.5) +
coord_polar() +
scale_x_continuous(limits = c(0,360))
#可以通过反转图例、使用不同的调色板、添加外框线以及将分割点设置为某些更熟悉的值的方式,让图形稍微美观一些
ggplot(wind, aes(x=DirCat,fill=SpeedCat)) +
geom_histogram(binwidth = 15, drgin=-7.5,colour="black",size=0.25) +
coord_polar() +
scale_x_continuous(limits = c(0,360),breaks = seq(0,360,by=45),
minor_breaks = seq(0,360,by=15))+
scale_fill_brewer()
#在使用一个连续性的x是,数据中的最小值和最大值是重合的,有时需要设置对应的界限
#将mdeaths的时间序列数据放入一个数据框
md <- data.frame(deaths=as.numeric(mdeaths),
month = as.numeric(cycle(mdeaths)))
#计算每个月的平均死亡数量
library(plyr)
md <- ddply(md, "month",summarise,deaths=mean(deaths))
md
#绘制基本图形
p <- ggplot(md, aes(x=month,y=deaths)) + geom_line() +
scale_x_continuous(breaks = 1:12)
#使用coord_polar
p + coord_polar()
#使用coord_polar并将y(r)的下界设置为0
p + coord_polar() + ylim(0, max(md$deaths))
p + coord_polar() + ylim(0, max(md$deaths)) + xlim(0,12)
#通过添加一个值与12的值相同的0来连接曲线
mdx <- md[md$month==12,]
mdx$month <- 0
mdnew <- rbind(mdx,md)
#通过使用%+% 绘制与之前相同的图形,只是使用的数据不同
p %+% mdnew + coord_polar() + ylim(0,max(md$deaths))
#================================================================
#在坐标轴上使用日期
#================================================================
#将一列类为Date的变量映射到x轴或y轴
#观察数据结构
str(economics)
ggplot(economics, aes(x=date, y=psavert)) + geom_line()
#ggplot2可以处理两类时间相关的对象,日期对象(Date)和日期时间对象(POSIXt)
#Date对象表示的是日期,分别率为一天
#POSIXt对象是时刻,拥有精确到秒的小数部分的分辨率
#去economics的一个子集
econ <- subset(economics, date >= as.Date("1992-05-01") &
date < as.Date("1993-06-01"))
#基本图形---不指定分割点
p <- ggplot(econ, aes(x=date, y=psavert)) + geom_line()
p
#指定一个日期向量为分割点
datebreaks <- seq(as.Date("1992-06-01"), as.Date("1993-06-01"), by="2 month")
#使用分割点并旋转文本标签
p + scale_x_date(breaks = datebreaks) +
theme(axis.text.x = element_text(angle = 30,hjust = 1))
#注意:这里分割点(标签)的格式发生了改变,可以通过使用scales包中的date_format()函数来指定格式。
library(scales)
p + scale_x_date(breaks = datebreaks, labels = date_format("%Y %b")) +
theme(axis.text.x = element_text(angle = 30,hjust = 1))
#=============================================================
#在坐标轴上使用相对时间
#=============================================================
#时间值通常以数字的形式存储
#当将一个值映射到x轴或y轴上,并使用一个格式刷来生成合适的坐标轴标签
#转换时间序列对象WWWusage为数据框
www <- data.frame(minute = as.numeric(time(WWWusage)),
users = as.numeric(WWWusage))
#定义一个格式刷函数--可将以分钟表示的时间转换为字符串
timeHM_formatter <- function(x){
h <- floor(x/60)
m <- floor(x %% 60)
lab <- sprintf("%d:%02d", h, m) #将字符串格式化为HH:MM(时:分)的格式
return(lab)
}
#默认的x轴
ggplot(www, aes(x=minute,y=users)) + geom_line()
#使用格式化后的时间
ggplot(www, aes(x=minute,y=users)) + geom_line() +
scale_x_continuous(name = "time", breaks = seq(0,100, by=10),
labels = timeHM_formatter)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/na.R
\name{count_na}
\alias{count_na}
\title{count_na}
\usage{
count_na(x)
}
\arguments{
\item{x}{A vector or matrix or array}
}
\value{
An integer
}
\description{
Count the number of NA
}
\examples{
count_na( c(1,NA, 2, NA, 3) )
count_na( array(1:64, dim = c(4,4,4)) )
}
| /man/count_na.Rd | no_license | talegari/sidekicks | R | false | true | 350 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/na.R
\name{count_na}
\alias{count_na}
\title{count_na}
\usage{
count_na(x)
}
\arguments{
\item{x}{A vector or matrix or array}
}
\value{
An integer
}
\description{
Count the number of NA
}
\examples{
count_na( c(1,NA, 2, NA, 3) )
count_na( array(1:64, dim = c(4,4,4)) )
}
|
library(bitops)
library(NLP)
library(RColorBrewer)
library(twitteR)
library(RCurl)
library(RJSONIO)
library(stringr)
library(tm)
library(wordcloud)
db_key <- "9c81bbc72b8891bf8d07275ba2a3ccee"
clean.text <- function(some_txt)
{
some_txt = gsub("(RT|via)((?:\\b\\W*@\\w+)+)", "", some_txt)
some_txt = gsub("@\\w+", "", some_txt)
some_txt = gsub("[[:punct:]]", "", some_txt)
some_txt = gsub("[[:digit:]]", "", some_txt)
some_txt = gsub("http\\w+", "", some_txt)
some_txt = gsub("[ \t]{2,}", "", some_txt)
some_txt = gsub("^\\s+|\\s+$", "", some_txt)
some_txt = gsub("amp", "", some_txt)
try.tolower = function(x)
{
y = NA
try_error = tryCatch(tolower(x), error=function(e) e)
if (!inherits(try_error, "error"))
y = tolower(x)
return(y)
}
some_txt = sapply(some_txt, try.tolower)
some_txt = some_txt[some_txt != ""]
names(some_txt) = NULL
return(some_txt)
}
getSentiment <- function (text, key){
text <- URLencode(text);
#save all the spaces, then get rid of the weird characters that break the API, then convert back the URL-encoded spaces.
text <- str_replace_all(text, "%20", " ");
text <- str_replace_all(text, "%\\d\\d", "");
text <- str_replace_all(text, " ", "%20");
if (str_length(text) > 360){
text <- substr(text, 0, 359);
}
##########################################
data <- getURL(paste("http://api.datumbox.com/1.0/TwitterSentimentAnalysis.json?api_key=", key, "&text=",text, sep=""))
js <- fromJSON(data, asText=TRUE);
# get mood probability
sentiment = js$output$result
###################################
return(list(sentiment=sentiment))
}
tweets = searchTwitter("iPhone", 20, lang="en")
tweet_txt = sapply(tweets, function(x) x$getText())
# clean text
tweet_clean = clean.text(tweet_txt)
tweet_num = length(tweet_clean)
tweet_df = data.frame(text=tweet_clean, sentiment=rep("", tweet_num),stringsAsFactors=FALSE)
sentiment = rep(0, tweet_num)
for (i in 1:tweet_num)
{
tmp = getSentiment(tweet_clean[i], db_key)
tweet_df$sentiment[i] = tmp$sentiment
print(paste(i," of ", tweet_num))
}
tweet_df <- tweet_df[tweet_df$sentiment!="",]
sents = levels(factor(tweet_df$sentiment))
labels <- lapply(sents, function(x) paste(x,format(round((length((tweet_df[tweet_df$sentiment ==x,])$text)/length(tweet_df$sentiment)*100),2),nsmall=2),"%"))
nemo = length(sents)
emo.docs = rep("", nemo)
for (i in 1:nemo)
{
tmp = tweet_df[tweet_df$sentiment == sents[i],]$text
emo.docs[i] = paste(tmp,collapse=" ")
}
emo.docs = removeWords(emo.docs, stopwords("german"))
emo.docs = removeWords(emo.docs, stopwords("english"))
corpus = Corpus(VectorSource(emo.docs))
tdm = TermDocumentMatrix(corpus)
tdm = as.matrix(tdm)
colnames(tdm) = labels
comparison.cloud(tdm, colors = brewer.pal(nemo, "Dark2"),scale = c(3,.5), random.order = FALSE, title.size = 1.5)
| /swaps/twitter/rprog2.R | no_license | masymbol/customer_experience | R | false | false | 2,888 | r | library(bitops)
library(NLP)
library(RColorBrewer)
library(twitteR)
library(RCurl)
library(RJSONIO)
library(stringr)
library(tm)
library(wordcloud)
db_key <- "9c81bbc72b8891bf8d07275ba2a3ccee"
clean.text <- function(some_txt)
{
some_txt = gsub("(RT|via)((?:\\b\\W*@\\w+)+)", "", some_txt)
some_txt = gsub("@\\w+", "", some_txt)
some_txt = gsub("[[:punct:]]", "", some_txt)
some_txt = gsub("[[:digit:]]", "", some_txt)
some_txt = gsub("http\\w+", "", some_txt)
some_txt = gsub("[ \t]{2,}", "", some_txt)
some_txt = gsub("^\\s+|\\s+$", "", some_txt)
some_txt = gsub("amp", "", some_txt)
try.tolower = function(x)
{
y = NA
try_error = tryCatch(tolower(x), error=function(e) e)
if (!inherits(try_error, "error"))
y = tolower(x)
return(y)
}
some_txt = sapply(some_txt, try.tolower)
some_txt = some_txt[some_txt != ""]
names(some_txt) = NULL
return(some_txt)
}
getSentiment <- function (text, key){
text <- URLencode(text);
#save all the spaces, then get rid of the weird characters that break the API, then convert back the URL-encoded spaces.
text <- str_replace_all(text, "%20", " ");
text <- str_replace_all(text, "%\\d\\d", "");
text <- str_replace_all(text, " ", "%20");
if (str_length(text) > 360){
text <- substr(text, 0, 359);
}
##########################################
data <- getURL(paste("http://api.datumbox.com/1.0/TwitterSentimentAnalysis.json?api_key=", key, "&text=",text, sep=""))
js <- fromJSON(data, asText=TRUE);
# get mood probability
sentiment = js$output$result
###################################
return(list(sentiment=sentiment))
}
tweets = searchTwitter("iPhone", 20, lang="en")
tweet_txt = sapply(tweets, function(x) x$getText())
# clean text
tweet_clean = clean.text(tweet_txt)
tweet_num = length(tweet_clean)
tweet_df = data.frame(text=tweet_clean, sentiment=rep("", tweet_num),stringsAsFactors=FALSE)
sentiment = rep(0, tweet_num)
for (i in 1:tweet_num)
{
tmp = getSentiment(tweet_clean[i], db_key)
tweet_df$sentiment[i] = tmp$sentiment
print(paste(i," of ", tweet_num))
}
tweet_df <- tweet_df[tweet_df$sentiment!="",]
sents = levels(factor(tweet_df$sentiment))
labels <- lapply(sents, function(x) paste(x,format(round((length((tweet_df[tweet_df$sentiment ==x,])$text)/length(tweet_df$sentiment)*100),2),nsmall=2),"%"))
nemo = length(sents)
emo.docs = rep("", nemo)
for (i in 1:nemo)
{
tmp = tweet_df[tweet_df$sentiment == sents[i],]$text
emo.docs[i] = paste(tmp,collapse=" ")
}
emo.docs = removeWords(emo.docs, stopwords("german"))
emo.docs = removeWords(emo.docs, stopwords("english"))
corpus = Corpus(VectorSource(emo.docs))
tdm = TermDocumentMatrix(corpus)
tdm = as.matrix(tdm)
colnames(tdm) = labels
comparison.cloud(tdm, colors = brewer.pal(nemo, "Dark2"),scale = c(3,.5), random.order = FALSE, title.size = 1.5)
|
## Description: Fetch, Organize, and Export Congressional District Data for KSVD
## Author: Mark White, markhwhiteii@gmail.com
library(acs)
library(tidyverse)
library(stringr)
source("get_proportions.R")
source("get_totals.R")
key <- read_csv("acs_variable_key.csv")
ks_geo <- geo.make(state = "KS", congressional.district = "*")
dat_congress <- acs.fetch(2015, 5, ks_geo, variable = key$variable_code) %>%
estimate() %>%
as.data.frame() %>%
rownames_to_column("congressional") %>%
mutate(congressional = str_sub(congressional, 15, 24)) %>%
gather("variable", "value", -congressional) %>%
full_join(key, ., by = c("variable_code" = "variable")) %>%
group_by(congressional, variable_composite) %>%
summarise(value = sum(value))
acs_congress_totals <- get_totals(dat_congress, TRUE)
acs_congress_proportions <- get_proportions(dat_congress, TRUE)
write.csv(acs_congress_totals,
"../app/Data/acs_congress_totals.csv", row.names = FALSE)
write.csv(acs_congress_proportions,
"../app/Data/acs_congress_proportions.csv", row.names = FALSE)
| /acs/getting_congressional_acs.R | no_license | kansas-voting-data/ksvd_app | R | false | false | 1,084 | r | ## Description: Fetch, Organize, and Export Congressional District Data for KSVD
## Author: Mark White, markhwhiteii@gmail.com
library(acs)
library(tidyverse)
library(stringr)
source("get_proportions.R")
source("get_totals.R")
key <- read_csv("acs_variable_key.csv")
ks_geo <- geo.make(state = "KS", congressional.district = "*")
dat_congress <- acs.fetch(2015, 5, ks_geo, variable = key$variable_code) %>%
estimate() %>%
as.data.frame() %>%
rownames_to_column("congressional") %>%
mutate(congressional = str_sub(congressional, 15, 24)) %>%
gather("variable", "value", -congressional) %>%
full_join(key, ., by = c("variable_code" = "variable")) %>%
group_by(congressional, variable_composite) %>%
summarise(value = sum(value))
acs_congress_totals <- get_totals(dat_congress, TRUE)
acs_congress_proportions <- get_proportions(dat_congress, TRUE)
write.csv(acs_congress_totals,
"../app/Data/acs_congress_totals.csv", row.names = FALSE)
write.csv(acs_congress_proportions,
"../app/Data/acs_congress_proportions.csv", row.names = FALSE)
|
#set working directory
setwd("C:/Users/matr06581/Desktop/Data Analysis")
if (!file.exists("data.zip")) {
download.file(url="https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip",
destfile="data.zip")
unzip("data.zip")
}
#clean data
data = read.csv("household_power_consumption.txt",
skip=66637,
nrows = 2880,
na.strings = "?",
header = F,
sep = ";")
names(data) = names(read.csv("household_power_consumption.txt", nrows = 1, sep = ";"))
data$DateTime = as.POSIXct(paste(data$Date, data$Time, sept= " "),
format ="%d/%m/%Y %H:%M:%S")
data$Date = as.Date(data$Date, format = "%d/%m/%y")
data$Time = strptime(data$Time, format = "%H:%M:%S")
#plot
plot(data$DateTime, data$Global_active_power, type="l", ylab = "Global Active Power (kilowatts)", xlab = "")
#save
dev.copy(png, file = "plot2.png", width = 480, height = 480)
dev.off()
| /plot2.r | no_license | somethinggaby/ExData_Plotting1 | R | false | false | 997 | r | #set working directory
setwd("C:/Users/matr06581/Desktop/Data Analysis")
if (!file.exists("data.zip")) {
download.file(url="https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip",
destfile="data.zip")
unzip("data.zip")
}
#clean data
data = read.csv("household_power_consumption.txt",
skip=66637,
nrows = 2880,
na.strings = "?",
header = F,
sep = ";")
names(data) = names(read.csv("household_power_consumption.txt", nrows = 1, sep = ";"))
data$DateTime = as.POSIXct(paste(data$Date, data$Time, sept= " "),
format ="%d/%m/%Y %H:%M:%S")
data$Date = as.Date(data$Date, format = "%d/%m/%y")
data$Time = strptime(data$Time, format = "%H:%M:%S")
#plot
plot(data$DateTime, data$Global_active_power, type="l", ylab = "Global Active Power (kilowatts)", xlab = "")
#save
dev.copy(png, file = "plot2.png", width = 480, height = 480)
dev.off()
|
# LapierreLab
# Author: Philippe Maisonneuve
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# This script calculates light attenuation rates (Kd)
# from data files generated by the PAR-UV probe (Bertolo Lab, UQTR)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
#install.packages("dplyr")
library(dplyr)
#install.packages("zoo")
library(zoo)
#NaRV.omit function
#install.packages("IDPmisc")
library(IDPmisc)
# Calibrated data generated by the PAR-UV probe are stored as .csv
# files in a "CDOM" folder located in a folder "data" which will be
# set as the working drectory
setwd("~/Documents/Labo Lapierre/Maîtrise/Data/2019/PUV/")
# Create a list containing the names of all the files in the CDOM folder
data.files = list.files(path = "./csv/")
# Create a matrix to store Kd values, R2, and maximum depth at each site
output = matrix(0,ncol = 3, nrow = length(data.files))
colnames(output) = c("Kd443", "R2.Kd443", "Max_Depth") #"Kd313", "R2.Kd313" #Kd320", "R2.Kd320","Kd340", "R2.Kd340","Kd443", "R2.Kd443","Kd550", "R2.Kd550", "Max_depth")
sites.names <- sub('.*\\_', '',data.files)
sites.names <- sub("\\..*","",sites.names)
rownames(output) <- sites.names
pdf(file="Kd313.pdf")
for(i in 1:length(data.files))
{
header <- read.table(paste0('./csv/',data.files[i]), nrows = 1, header = FALSE, sep =',', stringsAsFactors = FALSE)
data <- read.table(paste0('./csv/',data.files[i]), skip = 2, header = FALSE, sep =',')
colnames(data) <- unlist(header)
data$Depth <- as.numeric(as.character(data$Depth))
a <- cbind(data$Depth, log(data$EdZ340))
a <- NaRV.omit(a)
colnames(a) <- c("Z", "Iz")
f <- function (d) {
m <- lm(Iz~Z, as.data.frame(d))
return(summary(m)$r.squared)
}
# Apply function to data with a moving window of 10 observations
# and correlation treshold of R^2>0.95
r2 <- rollapply(a, 15, f, by.column=F)
id.points <- which(r2 > 0.95, r2)
RES <- data.frame(a[id.points,])
# Plot conserved (in red) and discarded points
# with linear trend line and graphical parameters
plot(a, main = sites.names[i])
points(RES,pch=15,col="red")
lm.Kd <- lm(Iz~Z,RES)
abline(lm(Iz~Z,RES),col="blue")
legend("topright", legend = paste("R^2 =",
round(summary(lm.Kd)$adj.r.squared, digits = 4),
"\nKd = ",
round(coef(lm.Kd)[2], digits = 4)))
output[i,1] <- round(coef(lm.Kd)[2], digits = 4)
output[i,2] <- round(summary(lm.Kd)$adj.r.squared, digits = 4)
# Z0 is the depth at which we start to see light attenuation (the depth at which the instrument enters the water)
# We need to substact this depth to the maximum depth to obtain the real depth
output[i,3] <- max(data$Depth, na.rm = TRUE) - RES[1,1]
}
dev.off()
write.csv(x = output, file = "Kd313.csv", fileEncoding = "UTF-8") | /Photodegradation/03 - Kd_Attenuation_PUV.R | no_license | p-maisonneuve/lapierre-lab | R | false | false | 2,852 | r | # LapierreLab
# Author: Philippe Maisonneuve
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# This script calculates light attenuation rates (Kd)
# from data files generated by the PAR-UV probe (Bertolo Lab, UQTR)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
#install.packages("dplyr")
library(dplyr)
#install.packages("zoo")
library(zoo)
#NaRV.omit function
#install.packages("IDPmisc")
library(IDPmisc)
# Calibrated data generated by the PAR-UV probe are stored as .csv
# files in a "CDOM" folder located in a folder "data" which will be
# set as the working drectory
setwd("~/Documents/Labo Lapierre/Maîtrise/Data/2019/PUV/")
# Create a list containing the names of all the files in the CDOM folder
data.files = list.files(path = "./csv/")
# Create a matrix to store Kd values, R2, and maximum depth at each site
output = matrix(0,ncol = 3, nrow = length(data.files))
colnames(output) = c("Kd443", "R2.Kd443", "Max_Depth") #"Kd313", "R2.Kd313" #Kd320", "R2.Kd320","Kd340", "R2.Kd340","Kd443", "R2.Kd443","Kd550", "R2.Kd550", "Max_depth")
sites.names <- sub('.*\\_', '',data.files)
sites.names <- sub("\\..*","",sites.names)
rownames(output) <- sites.names
pdf(file="Kd313.pdf")
for(i in 1:length(data.files))
{
header <- read.table(paste0('./csv/',data.files[i]), nrows = 1, header = FALSE, sep =',', stringsAsFactors = FALSE)
data <- read.table(paste0('./csv/',data.files[i]), skip = 2, header = FALSE, sep =',')
colnames(data) <- unlist(header)
data$Depth <- as.numeric(as.character(data$Depth))
a <- cbind(data$Depth, log(data$EdZ340))
a <- NaRV.omit(a)
colnames(a) <- c("Z", "Iz")
f <- function (d) {
m <- lm(Iz~Z, as.data.frame(d))
return(summary(m)$r.squared)
}
# Apply function to data with a moving window of 10 observations
# and correlation treshold of R^2>0.95
r2 <- rollapply(a, 15, f, by.column=F)
id.points <- which(r2 > 0.95, r2)
RES <- data.frame(a[id.points,])
# Plot conserved (in red) and discarded points
# with linear trend line and graphical parameters
plot(a, main = sites.names[i])
points(RES,pch=15,col="red")
lm.Kd <- lm(Iz~Z,RES)
abline(lm(Iz~Z,RES),col="blue")
legend("topright", legend = paste("R^2 =",
round(summary(lm.Kd)$adj.r.squared, digits = 4),
"\nKd = ",
round(coef(lm.Kd)[2], digits = 4)))
output[i,1] <- round(coef(lm.Kd)[2], digits = 4)
output[i,2] <- round(summary(lm.Kd)$adj.r.squared, digits = 4)
# Z0 is the depth at which we start to see light attenuation (the depth at which the instrument enters the water)
# We need to substact this depth to the maximum depth to obtain the real depth
output[i,3] <- max(data$Depth, na.rm = TRUE) - RES[1,1]
}
dev.off()
write.csv(x = output, file = "Kd313.csv", fileEncoding = "UTF-8") |
install.packages("rattle")
library(rattle)
rattle()
install.packages("RGtk2")
library(RGtk2)
library(xlsx)
UL_Haircare <- read.csv("C:/Users/shossain/Documents/Projects/NPL/New analysis/Final UL (Hair Care).csv")
saveRDS(epos_UL_Haircare,"C:/Users/shossain/Documents/Projects/NPL/Indo_Darwin/tempo/epos_UL_Haircare.rds")
scatterplotMatrix(UL_Haircare[2:6])
install.packages("rggobi")
library(rggobi)
devtools::session_info("ggobi")
#Cluster analysis
# Let's only grab the numeric columns
UL_Haircare$
str(UL_Haircare)
mydata <- UL_Haircare[,c("HUL_DISC","Line_val","quantity")]
mydata <- na.omit(mydata) # listwise deletion of missing
mydata <- scale(mydata) # standardize variables ibrary(ggplot2)
# How many clusters should we have?
wss <- (nrow(mydata)-1)*sum(apply(mydata,2,var))
for (i in 2:15) wss[i] <- sum(kmeans(mydata,
centers=i)$withinss)
plot(1:15, wss, type="b", xlab="Number of Clusters",
ylab="Within groups sum of squares")
# K-Means Clustering with 5 clusters
fit <- kmeans(mydata, 3)
# Cluster Plot against 1st 2 principal components
# vary parameters for most readable graph
library(cluster)
clusplot(mydata, fit$cluster, color=TRUE, shade=TRUE,
labels=0, lines=0)
# Centroid Plot against 1st 2 discriminant functions
library(fpc)
plotcluster(mydata, fit$cluster)
x <- data.frame(UL_Haircare$Area,fit$cluster)
x$UL_Haircare.Area
y <- x %>% group_by(UL_Haircare.Area,fit.cluster) %>% summarise(n=n())
UL_Haircare <- read.csv("C:/Users/shossain/Documents/Projects/NPL/New analysis/Final UL (Hair Care).csv")
str(UL_Haircare)
x <- UL_Haircare[,c("Line_val","quantity","Area","City","Store.Name")]
install.packages("clustMixType")
library(clustMixType)
wss <- (nrow(x)-1)*sum(apply(x,2,var))
for (i in 2:15) wss[i] <- sum(kproto(x,
i)$withinss)
plot(1:15, wss, type="b", xlab="Number of Clusters",
ylab="Within groups sum of squares")
a <- lambdaest(x)
res <- kproto(x, k= 3, lambda = a)
res
res$centers
# kpres <- kproto(x, 3)
plot(Line_val~Area,x,col = res$cluster)
data <- subset(UL_Haircare,!is.na(UL_Haircare$City))
data <- cbind(data,res$cluster)
table(data$Cluster)
colnames(data)[31] <- "Cluster"
clus_1 <- subset(data,data$Cluster == 1)
clus_2 <- subset(data,data$Cluster == 2)
clus_3 <- subset(data,data$Cluster == 3)
write.csv(clus_1,"C:/Users/shossain/Documents/Projects/NPL/New analysis/clus_1.csv")
write.csv(clus_2,"C:/Users/shossain/Documents/Projects/NPL/New analysis/clus_2.csv")
write.csv(clus_3,"C:/Users/shossain/Documents/Projects/NPL/New analysis/clus_3.csv")
# UL_Haircare(wine, package='rattle')
# head(wine)
#
# str(wine)
#
# mydata <- na.omit(mydata) # listwise deletion of missing
# mydata <- scale(mydata) # standardize variables ibrary(ggplot2)
#
# # How many clusters should we have?
# wss <- (nrow(mydata)-1)*sum(apply(mydata,2,var))
# for (i in 2:15) wss[i] <- sum(kmeans(mydata,
# centers=i)$withinss)
# plot(1:15, wss, type="b", xlab="Number of Clusters",
# ylab="Within groups sum of squares")
#
# # K-Means Clustering with 5 clusters
# fit <- kmeans(mydata, 3)
#
library(cluster)
library(dplyr)
library(ggplot2)
library(readr)
install.packages("Rtsne")
library(Rtsne)
#' Load data
df <- read_csv2("C:\\Users\\shossain\\Documents\\Projects\\Clustering pilot\\bank.csv")
str(df)
#' Compute Gower distance
df <- df %>% mutate_if(is.character,as.factor)
gower_dist <- cluster::daisy(df,metric = "gower")
gower_mat <- as.matrix(gower_dist)
#' Print most similar clients
df[which(gower_mat == min(gower_mat[gower_mat != min(gower_mat)]), arr.ind = TRUE)[1, ], ]
#' Print most dissimilar clients
df[which(gower_mat == max(gower_mat[gower_mat != max(gower_mat)]), arr.ind = TRUE)[1, ], ]
sil_width <- c(NA)
for(i in 2:8){
pam_fit <- pam(gower_dist, diss = TRUE, k = i)
sil_width[i] <- pam_fit$silinfo$avg.width
}
plot(1:8, sil_width,
xlab = "Number of clusters",
ylab = "Silhouette Width")
lines(1:8, sil_width)
k <- 5
pam_fit <- pam(gower_dist, diss = TRUE, k)
pam_results <- df %>%
mutate(cluster = pam_fit$clustering) %>%
group_by(cluster) %>%
do(the_summary = summary(.))
pam_results$the_summary
tsne_obj <- Rtsne(gower_dist, is_distance = TRUE)
df_with_cluster <- cbind(df,pam_fit$clustering)
table(df_with_cluster$`pam_fit$clustering`,useNA = "ifany")
| /Cluster Analysis.R | no_license | safikhossain/DatascienceInnovation | R | false | false | 6,186 | r | install.packages("rattle")
library(rattle)
rattle()
install.packages("RGtk2")
library(RGtk2)
library(xlsx)
UL_Haircare <- read.csv("C:/Users/shossain/Documents/Projects/NPL/New analysis/Final UL (Hair Care).csv")
saveRDS(epos_UL_Haircare,"C:/Users/shossain/Documents/Projects/NPL/Indo_Darwin/tempo/epos_UL_Haircare.rds")
scatterplotMatrix(UL_Haircare[2:6])
install.packages("rggobi")
library(rggobi)
devtools::session_info("ggobi")
#Cluster analysis
# Let's only grab the numeric columns
UL_Haircare$
str(UL_Haircare)
mydata <- UL_Haircare[,c("HUL_DISC","Line_val","quantity")]
mydata <- na.omit(mydata) # listwise deletion of missing
mydata <- scale(mydata) # standardize variables ibrary(ggplot2)
# How many clusters should we have?
wss <- (nrow(mydata)-1)*sum(apply(mydata,2,var))
for (i in 2:15) wss[i] <- sum(kmeans(mydata,
centers=i)$withinss)
plot(1:15, wss, type="b", xlab="Number of Clusters",
ylab="Within groups sum of squares")
# K-Means Clustering with 5 clusters
fit <- kmeans(mydata, 3)
# Cluster Plot against 1st 2 principal components
# vary parameters for most readable graph
library(cluster)
clusplot(mydata, fit$cluster, color=TRUE, shade=TRUE,
labels=0, lines=0)
# Centroid Plot against 1st 2 discriminant functions
library(fpc)
plotcluster(mydata, fit$cluster)
x <- data.frame(UL_Haircare$Area,fit$cluster)
x$UL_Haircare.Area
y <- x %>% group_by(UL_Haircare.Area,fit.cluster) %>% summarise(n=n())
UL_Haircare <- read.csv("C:/Users/shossain/Documents/Projects/NPL/New analysis/Final UL (Hair Care).csv")
str(UL_Haircare)
x <- UL_Haircare[,c("Line_val","quantity","Area","City","Store.Name")]
install.packages("clustMixType")
library(clustMixType)
wss <- (nrow(x)-1)*sum(apply(x,2,var))
for (i in 2:15) wss[i] <- sum(kproto(x,
i)$withinss)
plot(1:15, wss, type="b", xlab="Number of Clusters",
ylab="Within groups sum of squares")
a <- lambdaest(x)
res <- kproto(x, k= 3, lambda = a)
res
res$centers
# kpres <- kproto(x, 3)
plot(Line_val~Area,x,col = res$cluster)
data <- subset(UL_Haircare,!is.na(UL_Haircare$City))
data <- cbind(data,res$cluster)
table(data$Cluster)
colnames(data)[31] <- "Cluster"
clus_1 <- subset(data,data$Cluster == 1)
clus_2 <- subset(data,data$Cluster == 2)
clus_3 <- subset(data,data$Cluster == 3)
write.csv(clus_1,"C:/Users/shossain/Documents/Projects/NPL/New analysis/clus_1.csv")
write.csv(clus_2,"C:/Users/shossain/Documents/Projects/NPL/New analysis/clus_2.csv")
write.csv(clus_3,"C:/Users/shossain/Documents/Projects/NPL/New analysis/clus_3.csv")
# UL_Haircare(wine, package='rattle')
# head(wine)
#
# str(wine)
#
# mydata <- na.omit(mydata) # listwise deletion of missing
# mydata <- scale(mydata) # standardize variables ibrary(ggplot2)
#
# # How many clusters should we have?
# wss <- (nrow(mydata)-1)*sum(apply(mydata,2,var))
# for (i in 2:15) wss[i] <- sum(kmeans(mydata,
# centers=i)$withinss)
# plot(1:15, wss, type="b", xlab="Number of Clusters",
# ylab="Within groups sum of squares")
#
# # K-Means Clustering with 5 clusters
# fit <- kmeans(mydata, 3)
#
library(cluster)
library(dplyr)
library(ggplot2)
library(readr)
install.packages("Rtsne")
library(Rtsne)
#' Load data
df <- read_csv2("C:\\Users\\shossain\\Documents\\Projects\\Clustering pilot\\bank.csv")
str(df)
#' Compute Gower distance
df <- df %>% mutate_if(is.character,as.factor)
gower_dist <- cluster::daisy(df,metric = "gower")
gower_mat <- as.matrix(gower_dist)
#' Print most similar clients
df[which(gower_mat == min(gower_mat[gower_mat != min(gower_mat)]), arr.ind = TRUE)[1, ], ]
#' Print most dissimilar clients
df[which(gower_mat == max(gower_mat[gower_mat != max(gower_mat)]), arr.ind = TRUE)[1, ], ]
sil_width <- c(NA)
for(i in 2:8){
pam_fit <- pam(gower_dist, diss = TRUE, k = i)
sil_width[i] <- pam_fit$silinfo$avg.width
}
plot(1:8, sil_width,
xlab = "Number of clusters",
ylab = "Silhouette Width")
lines(1:8, sil_width)
k <- 5
pam_fit <- pam(gower_dist, diss = TRUE, k)
pam_results <- df %>%
mutate(cluster = pam_fit$clustering) %>%
group_by(cluster) %>%
do(the_summary = summary(.))
pam_results$the_summary
tsne_obj <- Rtsne(gower_dist, is_distance = TRUE)
df_with_cluster <- cbind(df,pam_fit$clustering)
table(df_with_cluster$`pam_fit$clustering`,useNA = "ifany")
|
library(mpt)
### Name: mpt
### Title: Multinomial Processing Tree (MPT) Models
### Aliases: mpt anova.mpt coef.mpt confint.mpt predict.mpt print.mpt
### summary.mpt print.summary.mpt
### Keywords: models
### ** Examples
## Storage-retrieval model for pair clustering (Riefer & Batchelder, 1988)
data(retroact)
spec <- mptspec(
c*r,
(1 - c)*u^2,
2*(1 - c)*u*(1 - u),
c*(1 - r) + (1 - c)*(1 - u)^2,
u,
1 - u
)
mpt1 <- mpt(spec, retroact[retroact$lists == 0, ])
summary(mpt1) # parameter estimates, goodness of fit
plot(mpt1) # residuals versus predicted values
confint(mpt1) # approximate confidence intervals
plot(coef(mpt1), axes=FALSE, ylim=0:1, pch=16, xlab="",
ylab="Parameter estimate (MPT model, 95% CI)")
axis(1, 1:3, names(coef(mpt1))); axis(2)
arrows(1:3, plogis(confint(mpt1))[,1], 1:3, plogis(confint(mpt1))[,2],
.05, 90, 3)
## See ?retroact, ?proact, and ?prospecMemory for further examples.
| /data/genthat_extracted_code/mpt/examples/mpt.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 946 | r | library(mpt)
### Name: mpt
### Title: Multinomial Processing Tree (MPT) Models
### Aliases: mpt anova.mpt coef.mpt confint.mpt predict.mpt print.mpt
### summary.mpt print.summary.mpt
### Keywords: models
### ** Examples
## Storage-retrieval model for pair clustering (Riefer & Batchelder, 1988)
data(retroact)
spec <- mptspec(
c*r,
(1 - c)*u^2,
2*(1 - c)*u*(1 - u),
c*(1 - r) + (1 - c)*(1 - u)^2,
u,
1 - u
)
mpt1 <- mpt(spec, retroact[retroact$lists == 0, ])
summary(mpt1) # parameter estimates, goodness of fit
plot(mpt1) # residuals versus predicted values
confint(mpt1) # approximate confidence intervals
plot(coef(mpt1), axes=FALSE, ylim=0:1, pch=16, xlab="",
ylab="Parameter estimate (MPT model, 95% CI)")
axis(1, 1:3, names(coef(mpt1))); axis(2)
arrows(1:3, plogis(confint(mpt1))[,1], 1:3, plogis(confint(mpt1))[,2],
.05, 90, 3)
## See ?retroact, ?proact, and ?prospecMemory for further examples.
|
# SVM
# Importing the dataset
dataset = read.csv('Social_Network_Ads.csv')
dataset = dataset[3:5]
# Encoding the target feature as factor
dataset$Purchased = factor(dataset$Purchased, levels = c(0, 1))
# Splitting the dataset into the Training set and Test set
# install.packages('caTools')
library(caTools)
set.seed(123)
split = sample.split(dataset$Purchased, SplitRatio = 0.75)
training_set = subset(dataset, split == TRUE)
test_set = subset(dataset, split == FALSE)
# Feature Scaling
training_set[-3] = scale(training_set[-3])
test_set[-3] = scale(test_set[-3])
# Fitting classifier to the Training set
# install.packages('e1071')
library(e1071)
classifier = svm(formula = Purchased ~ ., data = training_set,kernel='linear',type = 'C-classification')
# Predicting the Test set results
y_pred = predict(classifier, newdata = test_set[-3])
# Making the Confusion Matrix
cm = table(test_set[, 3], y_pred)
# Visualising the Training set results
library(ElemStatLearn)
set = training_set
X1 = seq(min(set[, 1]) - 1, max(set[, 1]) + 1, by = 0.01)
X2 = seq(min(set[, 2]) - 1, max(set[, 2]) + 1, by = 0.01)
grid_set = expand.grid(X1, X2)
colnames(grid_set) = c('Age', 'EstimatedSalary')
y_grid = predict(classifier, newdata = grid_set)
plot(set[, -3],
main = 'Classifier (Training set)',
xlab = 'Age', ylab = 'Estimated Salary',
xlim = range(X1), ylim = range(X2))
contour(X1, X2, matrix(as.numeric(y_grid), length(X1), length(X2)), add = TRUE)
points(grid_set, pch = '.', col = ifelse(y_grid == 1, 'springgreen3', 'tomato'))
points(set, pch = 21, bg = ifelse(set[, 3] == 1, 'green4', 'red3'))
# Visualising the Test set results
library(ElemStatLearn)
set = test_set
X1 = seq(min(set[, 1]) - 1, max(set[, 1]) + 1, by = 0.01)
X2 = seq(min(set[, 2]) - 1, max(set[, 2]) + 1, by = 0.01)
grid_set = expand.grid(X1, X2)
colnames(grid_set) = c('Age', 'EstimatedSalary')
y_grid = predict(classifier, newdata = grid_set)
plot(set[, -3], main = 'Classifier (Test set)',
xlab = 'Age', ylab = 'Estimated Salary',
xlim = range(X1), ylim = range(X2))
contour(X1, X2, matrix(as.numeric(y_grid), length(X1), length(X2)), add = TRUE)
points(grid_set, pch = '.', col = ifelse(y_grid == 1, 'springgreen3', 'tomato'))
points(set, pch = 21, bg = ifelse(set[, 3] == 1, 'green4', 'red3')) | /Part 3 - Classification/Section 16 - Support Vector Machine (SVM)/svm_working.R | no_license | nareshr8/Machine-Learning-A-Z-Templates | R | false | false | 2,295 | r | # SVM
# Importing the dataset
dataset = read.csv('Social_Network_Ads.csv')
dataset = dataset[3:5]
# Encoding the target feature as factor
dataset$Purchased = factor(dataset$Purchased, levels = c(0, 1))
# Splitting the dataset into the Training set and Test set
# install.packages('caTools')
library(caTools)
set.seed(123)
split = sample.split(dataset$Purchased, SplitRatio = 0.75)
training_set = subset(dataset, split == TRUE)
test_set = subset(dataset, split == FALSE)
# Feature Scaling
training_set[-3] = scale(training_set[-3])
test_set[-3] = scale(test_set[-3])
# Fitting classifier to the Training set
# install.packages('e1071')
library(e1071)
classifier = svm(formula = Purchased ~ ., data = training_set,kernel='linear',type = 'C-classification')
# Predicting the Test set results
y_pred = predict(classifier, newdata = test_set[-3])
# Making the Confusion Matrix
cm = table(test_set[, 3], y_pred)
# Visualising the Training set results
library(ElemStatLearn)
set = training_set
X1 = seq(min(set[, 1]) - 1, max(set[, 1]) + 1, by = 0.01)
X2 = seq(min(set[, 2]) - 1, max(set[, 2]) + 1, by = 0.01)
grid_set = expand.grid(X1, X2)
colnames(grid_set) = c('Age', 'EstimatedSalary')
y_grid = predict(classifier, newdata = grid_set)
plot(set[, -3],
main = 'Classifier (Training set)',
xlab = 'Age', ylab = 'Estimated Salary',
xlim = range(X1), ylim = range(X2))
contour(X1, X2, matrix(as.numeric(y_grid), length(X1), length(X2)), add = TRUE)
points(grid_set, pch = '.', col = ifelse(y_grid == 1, 'springgreen3', 'tomato'))
points(set, pch = 21, bg = ifelse(set[, 3] == 1, 'green4', 'red3'))
# Visualising the Test set results
library(ElemStatLearn)
set = test_set
X1 = seq(min(set[, 1]) - 1, max(set[, 1]) + 1, by = 0.01)
X2 = seq(min(set[, 2]) - 1, max(set[, 2]) + 1, by = 0.01)
grid_set = expand.grid(X1, X2)
colnames(grid_set) = c('Age', 'EstimatedSalary')
y_grid = predict(classifier, newdata = grid_set)
plot(set[, -3], main = 'Classifier (Test set)',
xlab = 'Age', ylab = 'Estimated Salary',
xlim = range(X1), ylim = range(X2))
contour(X1, X2, matrix(as.numeric(y_grid), length(X1), length(X2)), add = TRUE)
points(grid_set, pch = '.', col = ifelse(y_grid == 1, 'springgreen3', 'tomato'))
points(set, pch = 21, bg = ifelse(set[, 3] == 1, 'green4', 'red3')) |
ARI.F <- function(VC,U, t_norm = c("minimum","product"))
{
if (missing(VC))
stop("The hard partitions VC must be given")
if (missing(U))
stop("The fuzzy partitions U must be given")
if (is.null(VC))
stop("The hard partitions VC is empty")
if (is.null(U))
stop("The fuzzy partitions U is empty")
VC <- as.numeric(VC)
U <- as.matrix(U)
partHard=matrix(0,nrow=length(VC),ncol=length(unique(VC)))
for (i in 1:length(VC))
{
partHard[i, VC[i]]=1
}
#partHard = as.matrix(partHard)
U = as.matrix(U)
# if(any(dim(partHard) != dim(partFuzzy)))
# stop("partHard and partFuzzy must be matrix with the same dimension")
t_norm <- match.arg(t_norm, choices = eval(formals(ARI.F)$t_norm))
out = partition_comp(HardClust = partHard,Fuzzy = U, t_norm = t_norm)
return(out$adjRand.F)
}
| /R/ARI.F.R | no_license | cran/fclust | R | false | false | 832 | r | ARI.F <- function(VC,U, t_norm = c("minimum","product"))
{
if (missing(VC))
stop("The hard partitions VC must be given")
if (missing(U))
stop("The fuzzy partitions U must be given")
if (is.null(VC))
stop("The hard partitions VC is empty")
if (is.null(U))
stop("The fuzzy partitions U is empty")
VC <- as.numeric(VC)
U <- as.matrix(U)
partHard=matrix(0,nrow=length(VC),ncol=length(unique(VC)))
for (i in 1:length(VC))
{
partHard[i, VC[i]]=1
}
#partHard = as.matrix(partHard)
U = as.matrix(U)
# if(any(dim(partHard) != dim(partFuzzy)))
# stop("partHard and partFuzzy must be matrix with the same dimension")
t_norm <- match.arg(t_norm, choices = eval(formals(ARI.F)$t_norm))
out = partition_comp(HardClust = partHard,Fuzzy = U, t_norm = t_norm)
return(out$adjRand.F)
}
|
# Input: number of features, input_data,
# Output: first_hash_table
source('get_pattern_table.R')
source('get_contingency_table.R')
stratification <- function(n_features, input_data) {
# first hash table
first_hash_table = get_pattern_table(n_features)
first_hash_table['n11'] = 0
first_hash_table['n12'] = 0
first_hash_table['n21'] = 0
first_hash_table['n22'] = 0
first_hash_table['pi_1'] = 0
first_hash_table['pi_2'] = 0
first_hash_table['phi'] = 0
# add pattern column in input_data data frame
input_data$pattern = apply(input_data[,1:n_features], 1, function(x) {return(paste(x, collapse = ""))})
# loop over first hash table to find counts
for (i in 1:nrow(first_hash_table)) {
current_row = first_hash_table[i,1:n_features]
current_row_p = paste(current_row, collapse = "")
# get correct subset
input_data_subset = subset(input_data, input_data$pattern == current_row_p)
# calculate contingency table
n11_n12_n21_n22 = get_contingency_table(input_data_subset, treatment = 'W', outcome = 'Y')
first_hash_table[i,c('n11','n12','n21','n22')] = n11_n12_n21_n22
}
# key step: remove unused rows
if (length(which(first_hash_table$n11+first_hash_table$n12+first_hash_table$n21+first_hash_table$n22==0))) {
first_hash_table = first_hash_table[-which(first_hash_table$n11+first_hash_table$n12+first_hash_table$n21+first_hash_table$n22==0),]
}
return(first_hash_table)
}
| /stratification.R | no_license | JuliusZSS/DEEP_SZ | R | false | false | 1,465 | r | # Input: number of features, input_data,
# Output: first_hash_table
source('get_pattern_table.R')
source('get_contingency_table.R')
stratification <- function(n_features, input_data) {
# first hash table
first_hash_table = get_pattern_table(n_features)
first_hash_table['n11'] = 0
first_hash_table['n12'] = 0
first_hash_table['n21'] = 0
first_hash_table['n22'] = 0
first_hash_table['pi_1'] = 0
first_hash_table['pi_2'] = 0
first_hash_table['phi'] = 0
# add pattern column in input_data data frame
input_data$pattern = apply(input_data[,1:n_features], 1, function(x) {return(paste(x, collapse = ""))})
# loop over first hash table to find counts
for (i in 1:nrow(first_hash_table)) {
current_row = first_hash_table[i,1:n_features]
current_row_p = paste(current_row, collapse = "")
# get correct subset
input_data_subset = subset(input_data, input_data$pattern == current_row_p)
# calculate contingency table
n11_n12_n21_n22 = get_contingency_table(input_data_subset, treatment = 'W', outcome = 'Y')
first_hash_table[i,c('n11','n12','n21','n22')] = n11_n12_n21_n22
}
# key step: remove unused rows
if (length(which(first_hash_table$n11+first_hash_table$n12+first_hash_table$n21+first_hash_table$n22==0))) {
first_hash_table = first_hash_table[-which(first_hash_table$n11+first_hash_table$n12+first_hash_table$n21+first_hash_table$n22==0),]
}
return(first_hash_table)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cpa.count.R
\name{cpa.count}
\alias{cpa.count}
\title{Analytic power calculations for parallel arm cluster-randomized trials with count outcomes}
\usage{
cpa.count(
alpha = 0.05,
power = 0.8,
nclusters = NA,
nsubjects = NA,
r1 = NA,
r2 = NA,
CVB = NA,
r1inc = TRUE,
tol = .Machine$double.eps^0.25
)
}
\arguments{
\item{alpha}{The level of significance of the test, the probability of a
Type I error.}
\item{power}{The power of the test, 1 minus the probability of a Type II
error.}
\item{nclusters}{The number of clusters per condition. It must be greater than 1}
\item{nsubjects}{The number of units of person-time of observation per cluster}
\item{r1}{The mean event rate per unit time in one of the conditions}
\item{r2}{The mean event rate per unit time in the other condition}
\item{CVB}{The between-cluster coefficient of variation}
\item{r1inc}{Logical indicating if r1 is expected to be greater than r2. This is
only important to specify if one of r1 or r2 is NA.}
\item{tol}{Numerical tolerance used in root finding. The default provides
at least four significant digits.}
}
\value{
The computed value of the NA parameter (among \code{alpha}, \code{power}, \code{nclusters},
\code{nsubjects},
\code{r1}, \code{r2} and \code{CVB}) needed to satisfy the power and
sample size equation.
}
\description{
\loadmathjax
Compute the power, number of clusters needed, number of subjects per cluster
needed, or other key parameters for a simple parallel cluster randomized
trial with a count outcome.
Exactly one of \code{alpha}, \code{power}, \code{nclusters}, \code{nsubjects},
\code{r1}, \code{r2}, and \code{CVB} must be passed as \code{NA}.
Note that \code{alpha} and \code{power} have non-\code{NA}
defaults, so if those are the parameters of interest they must be
explicitly passed as \code{NA}.
}
\section{Authors}{
Jonathan Moyer (\email{jon.moyer@gmail.com}), Ken Kleinman (\email{ken.kleinman@gmail.com})
}
\section{Note}{
This function implements the approach of Hayes and Bennet (1999). An estimate for the
intracluster correlation coefficient (ICC) is used to calculate a design effect that
accounts for variance inflation due to clustering.
The coefficient of variation \code{CVB} is the variance of the cluster rates divided by the
mean of the cluster rates.
The CVB refers neither to
any natural parameter of a data generating model nor to any function of its parameters.
For this reason we do not offer the user a option to input
the variance between the cluster means. If you prefer to use that input, we suggest using the
cps.count function.
This function was inspired by work from Stephane Champely (pwr.t.test) and
Peter Dalgaard (power.t.test). As with those functions, 'uniroot' is used to
solve power equation for unknowns, so you may see
errors from it, notably about inability to bracket the root when
invalid arguments are given. This generally means that no solution exists for which the
omitted parameter and the supplied parameters fulfill the equation. In particular, the desired
power may not be achievable with any number of subjects or clusters.
}
\section{Testing details}{
This function has been verified against reference values from
\code{CRTsize::n4incidence}, and \code{clusterPower::cps.count}.
}
\examples{
# Find the number of clusters per condition needed for a trial with alpha = 0.05,
# power = 0.80, 10 person-years per cluster, rate in condition 1 of 0.10
# and condition 2 of 0.20, and CVB = 0.10.
cpa.count(nsubjects=10, r1=0.10, r2=0.20, CVB=0.10)
# The result, showimg nclusters of greater than 24, suggests 25 clusters per
# condition should be used.
# Find the largest CVB compatible with 80\% power when there are 25 clusters, 10
# subject-units of time per cluster, and a rate of 0.1 and 0.2 in each condition.
cpa.count(nsubjects=10, nclusters= 25,r1=0.10, r2=0.20, CVB=NA)
# Results show that CVB as high as 0.107 can still yield power this high.
}
\references{
Donner A, Klar N. Design and Analysis of Cluster Randomization Trials in Health Research. Chichester, UK; 2009.
Hayes JR, Bennett S. Simple sample size calculation for cluster-randomized trials. International Journal of Epidemiology 1999; 28:319-326
Hayes JR, Moulton LH. Cluster Randomized Trials. Boca Raton, FL: CRC Press; 2009.
}
| /man/cpa.count.Rd | no_license | Kenkleinman/clusterPower | R | false | true | 4,476 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cpa.count.R
\name{cpa.count}
\alias{cpa.count}
\title{Analytic power calculations for parallel arm cluster-randomized trials with count outcomes}
\usage{
cpa.count(
alpha = 0.05,
power = 0.8,
nclusters = NA,
nsubjects = NA,
r1 = NA,
r2 = NA,
CVB = NA,
r1inc = TRUE,
tol = .Machine$double.eps^0.25
)
}
\arguments{
\item{alpha}{The level of significance of the test, the probability of a
Type I error.}
\item{power}{The power of the test, 1 minus the probability of a Type II
error.}
\item{nclusters}{The number of clusters per condition. It must be greater than 1}
\item{nsubjects}{The number of units of person-time of observation per cluster}
\item{r1}{The mean event rate per unit time in one of the conditions}
\item{r2}{The mean event rate per unit time in the other condition}
\item{CVB}{The between-cluster coefficient of variation}
\item{r1inc}{Logical indicating if r1 is expected to be greater than r2. This is
only important to specify if one of r1 or r2 is NA.}
\item{tol}{Numerical tolerance used in root finding. The default provides
at least four significant digits.}
}
\value{
The computed value of the NA parameter (among \code{alpha}, \code{power}, \code{nclusters},
\code{nsubjects},
\code{r1}, \code{r2} and \code{CVB}) needed to satisfy the power and
sample size equation.
}
\description{
\loadmathjax
Compute the power, number of clusters needed, number of subjects per cluster
needed, or other key parameters for a simple parallel cluster randomized
trial with a count outcome.
Exactly one of \code{alpha}, \code{power}, \code{nclusters}, \code{nsubjects},
\code{r1}, \code{r2}, and \code{CVB} must be passed as \code{NA}.
Note that \code{alpha} and \code{power} have non-\code{NA}
defaults, so if those are the parameters of interest they must be
explicitly passed as \code{NA}.
}
\section{Authors}{
Jonathan Moyer (\email{jon.moyer@gmail.com}), Ken Kleinman (\email{ken.kleinman@gmail.com})
}
\section{Note}{
This function implements the approach of Hayes and Bennet (1999). An estimate for the
intracluster correlation coefficient (ICC) is used to calculate a design effect that
accounts for variance inflation due to clustering.
The coefficient of variation \code{CVB} is the variance of the cluster rates divided by the
mean of the cluster rates.
The CVB refers neither to
any natural parameter of a data generating model nor to any function of its parameters.
For this reason we do not offer the user a option to input
the variance between the cluster means. If you prefer to use that input, we suggest using the
cps.count function.
This function was inspired by work from Stephane Champely (pwr.t.test) and
Peter Dalgaard (power.t.test). As with those functions, 'uniroot' is used to
solve power equation for unknowns, so you may see
errors from it, notably about inability to bracket the root when
invalid arguments are given. This generally means that no solution exists for which the
omitted parameter and the supplied parameters fulfill the equation. In particular, the desired
power may not be achievable with any number of subjects or clusters.
}
\section{Testing details}{
This function has been verified against reference values from
\code{CRTsize::n4incidence}, and \code{clusterPower::cps.count}.
}
\examples{
# Find the number of clusters per condition needed for a trial with alpha = 0.05,
# power = 0.80, 10 person-years per cluster, rate in condition 1 of 0.10
# and condition 2 of 0.20, and CVB = 0.10.
cpa.count(nsubjects=10, r1=0.10, r2=0.20, CVB=0.10)
# The result, showimg nclusters of greater than 24, suggests 25 clusters per
# condition should be used.
# Find the largest CVB compatible with 80\% power when there are 25 clusters, 10
# subject-units of time per cluster, and a rate of 0.1 and 0.2 in each condition.
cpa.count(nsubjects=10, nclusters= 25,r1=0.10, r2=0.20, CVB=NA)
# Results show that CVB as high as 0.107 can still yield power this high.
}
\references{
Donner A, Klar N. Design and Analysis of Cluster Randomization Trials in Health Research. Chichester, UK; 2009.
Hayes JR, Bennett S. Simple sample size calculation for cluster-randomized trials. International Journal of Epidemiology 1999; 28:319-326
Hayes JR, Moulton LH. Cluster Randomized Trials. Boca Raton, FL: CRC Press; 2009.
}
|
\name{sol}
\alias{sol}
\alias{eta}
\alias{minkowski}
\alias{lightspeed}
\alias{celerity}
\alias{ptm}
\title{Speed of light and Minkowski metric}
\description{Getting and setting the speed of light}
\usage{
sol(c)
eta(downstairs=TRUE)
ptm(to_natural=TRUE, change_time=TRUE)
}
\arguments{
\item{c}{Scalar, speed of light. If missing, return the speed of
light}
\item{downstairs}{Boolean, with default \code{TRUE} meaning to return
the covariant metric tensor \eqn{g_{ij}}{g_ij} with two downstairs
indices, and \code{FALSE} meaning to return the contravariant
version \eqn{g^{ij}}{g^ij} with two upstairs indices}
\item{to_natural,change_time}{Boolean, specifying the
nature of the passive transform matrix}
}
\details{
In the context of an R package, the symbol \dQuote{c} presents
particular problems. In the \pkg{gyrogroup} package, the speed of
light is denoted \dQuote{sol}, for \sQuote{speed of light}.
The speed of light is a global variable, governed by
\code{options("c")}. If \code{NULL}, define \code{c=1}.
Function \code{eta()} returns the Minkowski flat-space metric
\deqn{\left(\begin{array}{cccc}
-c^2& 0 & 0 & 0\\
0 & 1 & 0 & 0\\
0 & 0 & 1 & 0\\
0 & 0 & 0 & 1
\end{array}
\right)
}{
omitted}
Note that the top-left element of \code{eta()} is \eqn{-c^2}, not
\eqn{-1}.
Function \code{ptm()} returns a passive transformation matrix that
converts displacement vectors to natural units (\code{to_natural=TRUE})
or from natural units (\code{to_natural=FALSE}). Argument
\code{change_time} specifies whether to change the unit of time (if
\code{TRUE}) or the unit of length (if \code{FALSE}).
}
\author{Robin K. S. Hankin}
\note{
Typing \dQuote{\code{sol(299792458)}} is a lot easier than typing
\dQuote{\code{options("c"=299792458)}}, which is why the package uses
the idiom that it does.
}
\examples{
sol() # returns current speed of light
sol(299792458) # use SI units
sol() # speed of light now SI value
eta() # note [t,t] term
u <- as.3vel(c(100,200,300)) # fast terrestrial speed, but not relativistic
boost(u) # boost matrix practically Galilean
is.consistent.boost(boost(u)) # should be TRUE
sol(1) # revert to relativisitic units
}
| /man/sol.Rd | no_license | muizzk/lorentz | R | false | false | 2,396 | rd | \name{sol}
\alias{sol}
\alias{eta}
\alias{minkowski}
\alias{lightspeed}
\alias{celerity}
\alias{ptm}
\title{Speed of light and Minkowski metric}
\description{Getting and setting the speed of light}
\usage{
sol(c)
eta(downstairs=TRUE)
ptm(to_natural=TRUE, change_time=TRUE)
}
\arguments{
\item{c}{Scalar, speed of light. If missing, return the speed of
light}
\item{downstairs}{Boolean, with default \code{TRUE} meaning to return
the covariant metric tensor \eqn{g_{ij}}{g_ij} with two downstairs
indices, and \code{FALSE} meaning to return the contravariant
version \eqn{g^{ij}}{g^ij} with two upstairs indices}
\item{to_natural,change_time}{Boolean, specifying the
nature of the passive transform matrix}
}
\details{
In the context of an R package, the symbol \dQuote{c} presents
particular problems. In the \pkg{gyrogroup} package, the speed of
light is denoted \dQuote{sol}, for \sQuote{speed of light}.
The speed of light is a global variable, governed by
\code{options("c")}. If \code{NULL}, define \code{c=1}.
Function \code{eta()} returns the Minkowski flat-space metric
\deqn{\left(\begin{array}{cccc}
-c^2& 0 & 0 & 0\\
0 & 1 & 0 & 0\\
0 & 0 & 1 & 0\\
0 & 0 & 0 & 1
\end{array}
\right)
}{
omitted}
Note that the top-left element of \code{eta()} is \eqn{-c^2}, not
\eqn{-1}.
Function \code{ptm()} returns a passive transformation matrix that
converts displacement vectors to natural units (\code{to_natural=TRUE})
or from natural units (\code{to_natural=FALSE}). Argument
\code{change_time} specifies whether to change the unit of time (if
\code{TRUE}) or the unit of length (if \code{FALSE}).
}
\author{Robin K. S. Hankin}
\note{
Typing \dQuote{\code{sol(299792458)}} is a lot easier than typing
\dQuote{\code{options("c"=299792458)}}, which is why the package uses
the idiom that it does.
}
\examples{
sol() # returns current speed of light
sol(299792458) # use SI units
sol() # speed of light now SI value
eta() # note [t,t] term
u <- as.3vel(c(100,200,300)) # fast terrestrial speed, but not relativistic
boost(u) # boost matrix practically Galilean
is.consistent.boost(boost(u)) # should be TRUE
sol(1) # revert to relativisitic units
}
|
context("test-get_nltt_error_function")
test_that("use", {
phylogeny <- ape::read.tree(text = "((A:1.5, B:1.5):1.5, C:3.0);")
tree_1 <- ape::read.tree(text = "((A:1.0, B:1.0):2.0, C:3.0);")
tree_2 <- ape::read.tree(text = "((A:2.0, B:2.0):1.0, C:3.0);")
lowest_error <- get_nltt_error_function()(phylogeny, c(phylogeny))
error_1 <- get_nltt_error_function()(phylogeny, c(tree_1))
error_2 <- get_nltt_error_function()(phylogeny, c(tree_2))
expect_true(lowest_error < error_1)
expect_true(lowest_error < error_2)
})
| /tests/testthat/test-get_nltt_error_function.R | no_license | thijsjanzen/pirouette | R | false | false | 534 | r | context("test-get_nltt_error_function")
test_that("use", {
phylogeny <- ape::read.tree(text = "((A:1.5, B:1.5):1.5, C:3.0);")
tree_1 <- ape::read.tree(text = "((A:1.0, B:1.0):2.0, C:3.0);")
tree_2 <- ape::read.tree(text = "((A:2.0, B:2.0):1.0, C:3.0);")
lowest_error <- get_nltt_error_function()(phylogeny, c(phylogeny))
error_1 <- get_nltt_error_function()(phylogeny, c(tree_1))
error_2 <- get_nltt_error_function()(phylogeny, c(tree_2))
expect_true(lowest_error < error_1)
expect_true(lowest_error < error_2)
})
|
library(dictionary) # for the "vn_admin1_year"
context("data provinces name")
test_that("correct names of provinces", {
province_name_ye <- function(df, ye) {
df <- get(df)
vect <- df[which(df$year == ye), "province", drop = TRUE]
sort(unique(vect))
}
expect_equal(
province_name_ye("chickenpox", "1980"),
dictionary::vn_admin1_year$`1979-1990`)
expect_equal(
province_name_ye("chickenpox", "1990"),
dictionary::vn_admin1_year$`1990-1991`)
expect_equal(
province_name_ye("chickenpox", "1991"),
dictionary::vn_admin1_year$`1991-1992`)
expect_equal(
province_name_ye("chickenpox", "1992"),
dictionary::vn_admin1_year$`1992-1997`)
expect_equal(
province_name_ye("chickenpox", "1997"),
dictionary::vn_admin1_year$`1997-2004`)
expect_equal(
province_name_ye("chickenpox", "2004"),
dictionary::vn_admin1_year$`2004-2008`)
expect_equal(
province_name_ye("chickenpox", "2008"),
dictionary::vn_admin1_year$`2008-2020`)
})
| /tests/testthat/test_province_name.R | no_license | FelipeJColon/gdpm | R | false | false | 1,008 | r | library(dictionary) # for the "vn_admin1_year"
context("data provinces name")
test_that("correct names of provinces", {
province_name_ye <- function(df, ye) {
df <- get(df)
vect <- df[which(df$year == ye), "province", drop = TRUE]
sort(unique(vect))
}
expect_equal(
province_name_ye("chickenpox", "1980"),
dictionary::vn_admin1_year$`1979-1990`)
expect_equal(
province_name_ye("chickenpox", "1990"),
dictionary::vn_admin1_year$`1990-1991`)
expect_equal(
province_name_ye("chickenpox", "1991"),
dictionary::vn_admin1_year$`1991-1992`)
expect_equal(
province_name_ye("chickenpox", "1992"),
dictionary::vn_admin1_year$`1992-1997`)
expect_equal(
province_name_ye("chickenpox", "1997"),
dictionary::vn_admin1_year$`1997-2004`)
expect_equal(
province_name_ye("chickenpox", "2004"),
dictionary::vn_admin1_year$`2004-2008`)
expect_equal(
province_name_ye("chickenpox", "2008"),
dictionary::vn_admin1_year$`2008-2020`)
})
|
###########Read in libraries###########
library(neonstore)
library(dplyr)
library(tidyr)
library(rgbif)
library(ggplot2)
###########Download data###########
download_loc <- paste0(neon_dir(), "/")
cover_product_id <- "DP1.10058.001"
if(!dir.exists(paste0(download_loc, cover_product_id))){
neon_download(product = cover_product_id, type = "expanded")
}
###########Clean up data###########
all_sites_df <- neon_read(table = "div_1m2Data-basic",
product = cover_product_id,
start_date = "2013-03-01", end_date = "2020-01-30")
curated_df <- all_sites_df %>%
select(uid, siteID, decimalLatitude, decimalLongitude,
plotID, subplotID, endDate, scientificName, taxonRank,
percentCover) %>%
filter(!is.na(scientificName),
taxonRank == "species",
decimalLongitude > -140,
decimalLatitude > 25 & decimalLatitude < 50) %>%
select(-taxonRank) %>%
unite(sitename, c("siteID", "plotID", "subplotID"), sep = "_") %>%
rename(lat = decimalLatitude,
lon = decimalLongitude,
date = endDate,
species = scientificName,
canopy_cover = percentCover) %>%
relocate(species, lat, lon, sitename, date, canopy_cover)
###########QA/QC data###########
# individual percent cover records
range(curated_df$canopy_cover, na.rm = TRUE)
# percent cover records per subplot
curated_df %>%
group_by(sitename, date) %>%
summarise(total_percent_cover = sum(canopy_cover))
# species distributions compared against GBIF source
species_ordered <- curated_df %>%
group_by(species) %>%
summarize(count = n()) %>%
arrange(desc(count))
gbif_occs <- c()
for (species in species_ordered$species[1:10]) {
species_occs <- occ_search(scientificName = species,
country = "US",
limit = 1000)$data
species_occs <- species_occs %>%
select(scientificName, decimalLatitude, decimalLongitude)
gbif_occs <- rbind(gbif_occs, species_occs)
}
gbif_occs_clean <- gbif_occs %>%
rename(species = scientificName,
lat = decimalLatitude,
lon = decimalLongitude) %>%
filter(species %in% species_ordered$species[1:10]) %>%
mutate(source = "gbif")
neon_occs <- curated_df %>%
select(species, lon, lat) %>%
filter(species %in% unique(gbif_occs_clean$species)) %>%
mutate(source = "neon")
occs <- rbind(gbif_occs_clean, neon_occs)
ggplot() +
geom_polygon(data = map_data("state"), aes(x = long, y = lat, group = group),
fill = "white", color = "black") +
geom_point(data = occs, aes(x = lon, y = lat, color = source)) +
scale_color_manual(values = c("grey", "red")) +
facet_wrap(~species)
# check locations
locations <- c(paste(curated_df$lon, curated_df$lat))
head(sort(table(locations)))
###########Save data###########
write.csv(curated_df, file = "plant_cover/plant_cover.csv", row.names = FALSE)
| /plant_cover/curate_data.R | no_license | KristinaRiemer/neon-datasets | R | false | false | 2,967 | r | ###########Read in libraries###########
library(neonstore)
library(dplyr)
library(tidyr)
library(rgbif)
library(ggplot2)
###########Download data###########
download_loc <- paste0(neon_dir(), "/")
cover_product_id <- "DP1.10058.001"
if(!dir.exists(paste0(download_loc, cover_product_id))){
neon_download(product = cover_product_id, type = "expanded")
}
###########Clean up data###########
all_sites_df <- neon_read(table = "div_1m2Data-basic",
product = cover_product_id,
start_date = "2013-03-01", end_date = "2020-01-30")
curated_df <- all_sites_df %>%
select(uid, siteID, decimalLatitude, decimalLongitude,
plotID, subplotID, endDate, scientificName, taxonRank,
percentCover) %>%
filter(!is.na(scientificName),
taxonRank == "species",
decimalLongitude > -140,
decimalLatitude > 25 & decimalLatitude < 50) %>%
select(-taxonRank) %>%
unite(sitename, c("siteID", "plotID", "subplotID"), sep = "_") %>%
rename(lat = decimalLatitude,
lon = decimalLongitude,
date = endDate,
species = scientificName,
canopy_cover = percentCover) %>%
relocate(species, lat, lon, sitename, date, canopy_cover)
###########QA/QC data###########
# individual percent cover records
range(curated_df$canopy_cover, na.rm = TRUE)
# percent cover records per subplot
curated_df %>%
group_by(sitename, date) %>%
summarise(total_percent_cover = sum(canopy_cover))
# species distributions compared against GBIF source
species_ordered <- curated_df %>%
group_by(species) %>%
summarize(count = n()) %>%
arrange(desc(count))
gbif_occs <- c()
for (species in species_ordered$species[1:10]) {
species_occs <- occ_search(scientificName = species,
country = "US",
limit = 1000)$data
species_occs <- species_occs %>%
select(scientificName, decimalLatitude, decimalLongitude)
gbif_occs <- rbind(gbif_occs, species_occs)
}
gbif_occs_clean <- gbif_occs %>%
rename(species = scientificName,
lat = decimalLatitude,
lon = decimalLongitude) %>%
filter(species %in% species_ordered$species[1:10]) %>%
mutate(source = "gbif")
neon_occs <- curated_df %>%
select(species, lon, lat) %>%
filter(species %in% unique(gbif_occs_clean$species)) %>%
mutate(source = "neon")
occs <- rbind(gbif_occs_clean, neon_occs)
ggplot() +
geom_polygon(data = map_data("state"), aes(x = long, y = lat, group = group),
fill = "white", color = "black") +
geom_point(data = occs, aes(x = lon, y = lat, color = source)) +
scale_color_manual(values = c("grey", "red")) +
facet_wrap(~species)
# check locations
locations <- c(paste(curated_df$lon, curated_df$lat))
head(sort(table(locations)))
###########Save data###########
write.csv(curated_df, file = "plant_cover/plant_cover.csv", row.names = FALSE)
|
## ---- include = FALSE---------------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
## -----------------------------------------------------------------------------
library(rhub)
## ----platforms-info-----------------------------------------------------------
knitr::kable(platforms(), row.names = FALSE)
| /inst/doc/rhub.R | no_license | cran/rhub | R | false | false | 366 | r | ## ---- include = FALSE---------------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
## -----------------------------------------------------------------------------
library(rhub)
## ----platforms-info-----------------------------------------------------------
knitr::kable(platforms(), row.names = FALSE)
|
#' Initiate a simulation run
#'
#' This is intended to be a drop-in replacement for [swmmr::run_swmm()].
#'
#' @param inp Path to an input file.
#' @param rpt Path to a report file (that does not yet exist or will be overwritten).
#' @param out Path to an output file (that does not yet exist or will be overwritten).
#' @param overwrite Use `overwrite = TRUE` overwrite `rpt` and `out`.
#' @param quiet Silence default SWWM output.
#'
#' @export
#'
#' @examples
#' swmm_run(swmm_example_file("Example1-Pre.inp"))
#'
#' @importFrom assertthat is.scalar assert_that
#'
swmm_run <- function(inp, rpt = NULL, out = NULL, overwrite = FALSE, quiet = TRUE) {
overwrite <- isTRUE(overwrite)
assert_that(is.character(inp), is.scalar(inp), file.exists(inp))
assert_that(is.null(rpt) || (is.character(rpt) && is.scalar(rpt)))
assert_that(is.null(out) || (is.character(out) && is.scalar(out)))
if(is.null(rpt)) {
rpt <- tempfile(fileext = ".rpt")
}
if(is.null(out)) {
out <- tempfile(fileext = ".out")
}
if(!overwrite && file.exists(rpt)) {
stop("File \"", rpt, "\" already exists. Use overwrite = TRUE to overwrite.")
}
if(!overwrite && file.exists(out)) {
stop("File \"", out, "\" already exists. Use overwrite = TRUE to overwrite.")
}
# create paths here that both R and C can understand
inp <- sanitize_c_path(inp)
rpt <- sanitize_c_path(rpt)
out <- sanitize_c_path(out)
# on Windows, files must exist before SWMM is called
# on not Windows, this doesn't cause a failure
if(!file.exists(rpt)) {
if(!file.create(rpt, showWarnings = FALSE)) stop("Could not create report file '", rpt, "'")
}
if(!file.exists(out)) {
if(!file.create(out, showWarnings = FALSE)) stop("Could not create output file '", out, "'")
}
if(quiet) {
output_file <- tempfile()
sink(output_file)
on.exit({sink(); unlink(output_file)})
}
# runn SWMM
output <- swmmRun(inp, rpt, out)
# convert paths back to R-friendly paths
file_items <- c("inp", "rpt", "out")
output[file_items] <- lapply(output[file_items], sanitize_path)
# make sure there's a newline before returning to R
if(!quiet) cat("\n")
# if there's an error, stop() with the message
if(output$last_error != 0) {
stop("SWMM: ", extract_error_message(output), call. = FALSE)
}
output
}
# on Windows, the C functions need path to have
# backslashes instead of forward slashses
sanitize_c_path <- function(path) {
normalizePath(path, mustWork = FALSE)
}
sanitize_path <- function(path) {
gsub("\\\\", "/", normalizePath(path, mustWork = FALSE))
}
is_valid_input_file <- function(path) {
is_valid_file(path) && file.exists(path)
}
is_valid_output_file <- function(path, overwrite = FALSE) {
is.null(path) || (overwrite && is_valid_input_file(path)) || is_valid_file(path)
}
is_valid_file <- function(path) {
is.character(path) && (length(path) == 1) && !is.na(path)
}
extract_error_message <- function(output) {
if(file.exists(output$rpt)) {
rpt_lines <- readr::read_lines(output$rpt)
error_lines <- rpt_lines[grepl("ERROR", rpt_lines)]
return(paste(trimws(gsub("ERROR [0-9]*:?", "", error_lines)), collapse = "\n"))
}
# this line is probably never reached but is a good defensive fallback
error_messages$message[match(output$last_error, error_messages$code)]
}
| /R/swmm_run.R | no_license | paleolimbot/swmm | R | false | false | 3,340 | r |
#' Initiate a simulation run
#'
#' This is intended to be a drop-in replacement for [swmmr::run_swmm()].
#'
#' @param inp Path to an input file.
#' @param rpt Path to a report file (that does not yet exist or will be overwritten).
#' @param out Path to an output file (that does not yet exist or will be overwritten).
#' @param overwrite Use `overwrite = TRUE` overwrite `rpt` and `out`.
#' @param quiet Silence default SWWM output.
#'
#' @export
#'
#' @examples
#' swmm_run(swmm_example_file("Example1-Pre.inp"))
#'
#' @importFrom assertthat is.scalar assert_that
#'
swmm_run <- function(inp, rpt = NULL, out = NULL, overwrite = FALSE, quiet = TRUE) {
overwrite <- isTRUE(overwrite)
assert_that(is.character(inp), is.scalar(inp), file.exists(inp))
assert_that(is.null(rpt) || (is.character(rpt) && is.scalar(rpt)))
assert_that(is.null(out) || (is.character(out) && is.scalar(out)))
if(is.null(rpt)) {
rpt <- tempfile(fileext = ".rpt")
}
if(is.null(out)) {
out <- tempfile(fileext = ".out")
}
if(!overwrite && file.exists(rpt)) {
stop("File \"", rpt, "\" already exists. Use overwrite = TRUE to overwrite.")
}
if(!overwrite && file.exists(out)) {
stop("File \"", out, "\" already exists. Use overwrite = TRUE to overwrite.")
}
# create paths here that both R and C can understand
inp <- sanitize_c_path(inp)
rpt <- sanitize_c_path(rpt)
out <- sanitize_c_path(out)
# on Windows, files must exist before SWMM is called
# on not Windows, this doesn't cause a failure
if(!file.exists(rpt)) {
if(!file.create(rpt, showWarnings = FALSE)) stop("Could not create report file '", rpt, "'")
}
if(!file.exists(out)) {
if(!file.create(out, showWarnings = FALSE)) stop("Could not create output file '", out, "'")
}
if(quiet) {
output_file <- tempfile()
sink(output_file)
on.exit({sink(); unlink(output_file)})
}
# runn SWMM
output <- swmmRun(inp, rpt, out)
# convert paths back to R-friendly paths
file_items <- c("inp", "rpt", "out")
output[file_items] <- lapply(output[file_items], sanitize_path)
# make sure there's a newline before returning to R
if(!quiet) cat("\n")
# if there's an error, stop() with the message
if(output$last_error != 0) {
stop("SWMM: ", extract_error_message(output), call. = FALSE)
}
output
}
# on Windows, the C functions need path to have
# backslashes instead of forward slashses
sanitize_c_path <- function(path) {
normalizePath(path, mustWork = FALSE)
}
sanitize_path <- function(path) {
gsub("\\\\", "/", normalizePath(path, mustWork = FALSE))
}
is_valid_input_file <- function(path) {
is_valid_file(path) && file.exists(path)
}
is_valid_output_file <- function(path, overwrite = FALSE) {
is.null(path) || (overwrite && is_valid_input_file(path)) || is_valid_file(path)
}
is_valid_file <- function(path) {
is.character(path) && (length(path) == 1) && !is.na(path)
}
extract_error_message <- function(output) {
if(file.exists(output$rpt)) {
rpt_lines <- readr::read_lines(output$rpt)
error_lines <- rpt_lines[grepl("ERROR", rpt_lines)]
return(paste(trimws(gsub("ERROR [0-9]*:?", "", error_lines)), collapse = "\n"))
}
# this line is probably never reached but is a good defensive fallback
error_messages$message[match(output$last_error, error_messages$code)]
}
|
#ANALYZING PHOTOS AND SAVING LATEST 5 PHOTOS
library(Rfacebook)
library(RCurl)
library(magrittr)
library(httr)
library(RMySQL)
library(jsonlite)
load("my_db_connection")
load("FB_ID_FILE")
load("fb_oauth_auto")
load("fb_oauth_manual")
options(stringsAsFactors = FALSE)
#FORMING QUERY FOR TOP 7 FRIENDS INCLUDING THE USER
a1 <- "SELECT count(Friend_id) ,Friend_name FROM "
a1 <- paste(a1,fb_user_id_fetch,"_photos ",sep = "")
a1 <- paste(a1,"group by(Friend_id) order by count(Friend_id) DESC",sep = "")
#FORMING QUERY FOR TOTAL NO OF PHOTOS
a2 <- "SELECT count(Friend_id) FROM "
a2 <- paste(a2,fb_user_id_fetch,"_photos",sep = "")
#FETCHING TOP 7 FRIEND IF EXISTS
r1 <- dbSendQuery(mydb,a1)
r1 <- fetch(r1,n=-1)
#VECTOR FOR NAME(name)
name <- ""
#VECTOR FOR COUNT(count)
count <- NULL
i <- 1
while(i<=nrow(r1)&&i<=7)
{
name <- c(name,r1$Friend_name[i])
count <- c(count,r1$`count(Friend_id)`[i])
i <- i+1
}
#TOTAL No of Photos (total_count)
r2 <- dbSendQuery(mydb,a2)
total_count <- fetch(r2,n=-1)
#REMOVING FIRST "" IN NAME
name <- name[-1]
#PLOTTING PIE CHART
#Converting friend count into percentage and rounding off
percent <- round(100*count/sum(count),1)
#plotting the chart
str3 <- "C:\\xampp\\htdocs\\example\\photos.png"
png(filename=str3)
pie(count, labels = percent, main = "PIE CHART OF PHOTOS",col = rainbow(length(count)),radius = 1.0)
legend("top", name, cex = 0.7,fill = rainbow(length(count)))
dev.off()
#FETCHING LAST 5 PHotos
a3 <- "SELECT photo_link FROM "
a3 <- paste(a3,fb_user_id_fetch,"_photos",sep = "")
r3 <- dbSendQuery(mydb,a3)
r3 <- fetch(r3,n=-1)
#Saving it one by one
i<- 1
while(i<=5)
{
if(r3[i,"photo_link"]!="NA")
{
if(i==1)
{
download.file(url=r3[i,"photo_link"],destfile = "C:\\xampp\\htdocs\\example\\1.jpg",mode = "wb")
}
if(i==2)
{
download.file(url=r3[i,"photo_link"],destfile = "C:\\xampp\\htdocs\\example\\2.jpg",mode = "wb")
}
if(i==3)
{
download.file(url=r3[i,"photo_link"],destfile = "C:\\xampp\\htdocs\\example\\3.jpg",mode = "wb")
}
if(i==4)
{
download.file(url=r3[i,"photo_link"],destfile = "C:\\xampp\\htdocs\\example\\4.jpg",mode = "wb")
}
if(i==5)
{
download.file(url=r3[i,"photo_link"],destfile = "C:\\xampp\\htdocs\\example\\5.jpg",mode = "wb")
}
i <- i+1
}}
| /ANALYZING_PHOTOS_PIECHART.R | no_license | raunaqrameshporwal/Facebook-Influencer-Identification | R | false | false | 2,329 | r | #ANALYZING PHOTOS AND SAVING LATEST 5 PHOTOS
library(Rfacebook)
library(RCurl)
library(magrittr)
library(httr)
library(RMySQL)
library(jsonlite)
load("my_db_connection")
load("FB_ID_FILE")
load("fb_oauth_auto")
load("fb_oauth_manual")
options(stringsAsFactors = FALSE)
#FORMING QUERY FOR TOP 7 FRIENDS INCLUDING THE USER
a1 <- "SELECT count(Friend_id) ,Friend_name FROM "
a1 <- paste(a1,fb_user_id_fetch,"_photos ",sep = "")
a1 <- paste(a1,"group by(Friend_id) order by count(Friend_id) DESC",sep = "")
#FORMING QUERY FOR TOTAL NO OF PHOTOS
a2 <- "SELECT count(Friend_id) FROM "
a2 <- paste(a2,fb_user_id_fetch,"_photos",sep = "")
#FETCHING TOP 7 FRIEND IF EXISTS
r1 <- dbSendQuery(mydb,a1)
r1 <- fetch(r1,n=-1)
#VECTOR FOR NAME(name)
name <- ""
#VECTOR FOR COUNT(count)
count <- NULL
i <- 1
while(i<=nrow(r1)&&i<=7)
{
name <- c(name,r1$Friend_name[i])
count <- c(count,r1$`count(Friend_id)`[i])
i <- i+1
}
#TOTAL No of Photos (total_count)
r2 <- dbSendQuery(mydb,a2)
total_count <- fetch(r2,n=-1)
#REMOVING FIRST "" IN NAME
name <- name[-1]
#PLOTTING PIE CHART
#Converting friend count into percentage and rounding off
percent <- round(100*count/sum(count),1)
#plotting the chart
str3 <- "C:\\xampp\\htdocs\\example\\photos.png"
png(filename=str3)
pie(count, labels = percent, main = "PIE CHART OF PHOTOS",col = rainbow(length(count)),radius = 1.0)
legend("top", name, cex = 0.7,fill = rainbow(length(count)))
dev.off()
#FETCHING LAST 5 PHotos
a3 <- "SELECT photo_link FROM "
a3 <- paste(a3,fb_user_id_fetch,"_photos",sep = "")
r3 <- dbSendQuery(mydb,a3)
r3 <- fetch(r3,n=-1)
#Saving it one by one
i<- 1
while(i<=5)
{
if(r3[i,"photo_link"]!="NA")
{
if(i==1)
{
download.file(url=r3[i,"photo_link"],destfile = "C:\\xampp\\htdocs\\example\\1.jpg",mode = "wb")
}
if(i==2)
{
download.file(url=r3[i,"photo_link"],destfile = "C:\\xampp\\htdocs\\example\\2.jpg",mode = "wb")
}
if(i==3)
{
download.file(url=r3[i,"photo_link"],destfile = "C:\\xampp\\htdocs\\example\\3.jpg",mode = "wb")
}
if(i==4)
{
download.file(url=r3[i,"photo_link"],destfile = "C:\\xampp\\htdocs\\example\\4.jpg",mode = "wb")
}
if(i==5)
{
download.file(url=r3[i,"photo_link"],destfile = "C:\\xampp\\htdocs\\example\\5.jpg",mode = "wb")
}
i <- i+1
}}
|
context("sim_ped")
test_that("returns a single ped file dataframe", {
expect_true(is.data.frame(sim_ped(hazard_rates = hazard(AgeSpecific_Hazards),
GRR = 10, carrier_prob = 0.002,
RVfounder = TRUE,
FamID = 1,
stop_year = 2015,
founder_byears = c(1900, 1905))))
})
test_that("pedigree always contains at least 1 person", {
expect_true(nrow(sim_ped(hazard_rates = hazard(AgeSpecific_Hazards),
GRR = 10, carrier_prob = 0.002,
RVfounder = TRUE,
FamID = 1, stop_year = 2015,
founder_byears = c(1900, 1905))) >= 1)
})
test_that("Effects of RVfounder = 'first' ", {
exPed <- sim_ped(hazard_rates = hazard(AgeSpecific_Hazards),
GRR = 10, carrier_prob = 0.002,
RVfounder = TRUE,
FamID = 1, stop_year = 2015,
founder_byears = c(1900, 1905))
#expect that first founder introduces causal variant
expect_true(1 %in% exPed[1, c(7,8)])
#expect that only one founder introduces causal variant
expect_true(sum(exPed[which(is.na(exPed$dadID)), c(7, 8)]) == 1)
#expect that no children are heterozygous at the disease locus
expect_true(!any(exPed$DA1 + exPed$DA2 == 2))
})
test_that("If GRR = 1", {
exPed <- sim_ped(hazard_rates = hazard(AgeSpecific_Hazards),
GRR = 1, carrier_prob = 0.002,
FamID = 1, stop_year = 2015,
founder_byears = c(1900, 1905))
#expect that no founder introduce causal variant
expect_true(!any(exPed[, c(7,8)] == 1))
})
test_that("issues error when hazard_rates is not a hazard object", {
expect_error(sim_ped(hazard_rates = AgeSpecific_Hazards,
GRR = 1, carrier_prob = 0.002,
FamID = 1, stop_year = 2015,
founder_byears = c(1900, 1905),
RVfounder = TRUE))
})
| /data/genthat_extracted_code/SimRVPedigree/tests/test_sim_ped.R | no_license | surayaaramli/typeRrh | R | false | false | 2,160 | r | context("sim_ped")
test_that("returns a single ped file dataframe", {
expect_true(is.data.frame(sim_ped(hazard_rates = hazard(AgeSpecific_Hazards),
GRR = 10, carrier_prob = 0.002,
RVfounder = TRUE,
FamID = 1,
stop_year = 2015,
founder_byears = c(1900, 1905))))
})
test_that("pedigree always contains at least 1 person", {
expect_true(nrow(sim_ped(hazard_rates = hazard(AgeSpecific_Hazards),
GRR = 10, carrier_prob = 0.002,
RVfounder = TRUE,
FamID = 1, stop_year = 2015,
founder_byears = c(1900, 1905))) >= 1)
})
test_that("Effects of RVfounder = 'first' ", {
exPed <- sim_ped(hazard_rates = hazard(AgeSpecific_Hazards),
GRR = 10, carrier_prob = 0.002,
RVfounder = TRUE,
FamID = 1, stop_year = 2015,
founder_byears = c(1900, 1905))
#expect that first founder introduces causal variant
expect_true(1 %in% exPed[1, c(7,8)])
#expect that only one founder introduces causal variant
expect_true(sum(exPed[which(is.na(exPed$dadID)), c(7, 8)]) == 1)
#expect that no children are heterozygous at the disease locus
expect_true(!any(exPed$DA1 + exPed$DA2 == 2))
})
test_that("If GRR = 1", {
exPed <- sim_ped(hazard_rates = hazard(AgeSpecific_Hazards),
GRR = 1, carrier_prob = 0.002,
FamID = 1, stop_year = 2015,
founder_byears = c(1900, 1905))
#expect that no founder introduce causal variant
expect_true(!any(exPed[, c(7,8)] == 1))
})
test_that("issues error when hazard_rates is not a hazard object", {
expect_error(sim_ped(hazard_rates = AgeSpecific_Hazards,
GRR = 1, carrier_prob = 0.002,
FamID = 1, stop_year = 2015,
founder_byears = c(1900, 1905),
RVfounder = TRUE))
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/survival-cch-tidiers.R
\name{glance.cch}
\alias{glance.cch}
\title{Glance at a(n) cch object}
\usage{
\method{glance}{cch}(x, ...)
}
\arguments{
\item{x}{An \code{cch} object returned from \code{\link[survival:cch]{survival::cch()}}.}
\item{...}{Additional arguments. Not used. Needed to match generic
signature only. \strong{Cautionary note:} Misspelled arguments will be
absorbed in \code{...}, where they will be ignored. If the misspelled
argument has a default value, the default value will be used.
For example, if you pass \code{conf.lvel = 0.9}, all computation will
proceed using \code{conf.level = 0.95}. Two exceptions here are:
\itemize{
\item \code{tidy()} methods will warn when supplied an \code{exponentiate} argument if
it will be ignored.
\item \code{augment()} methods will warn when supplied a \code{newdata} argument if it
will be ignored.
}}
}
\description{
Glance accepts a model object and returns a \code{\link[tibble:tibble]{tibble::tibble()}}
with exactly one row of model summaries. The summaries are typically
goodness of fit measures, p-values for hypothesis tests on residuals,
or model convergence information.
Glance never returns information from the original call to the modeling
function. This includes the name of the modeling function or any
arguments passed to the modeling function.
Glance does not calculate summary measures. Rather, it farms out these
computations to appropriate methods and gathers the results together.
Sometimes a goodness of fit measure will be undefined. In these cases
the measure will be reported as \code{NA}.
Glance returns the same number of columns regardless of whether the
model matrix is rank-deficient or not. If so, entries in columns
that no longer have a well-defined value are filled in with an \code{NA}
of the appropriate type.
}
\examples{
\dontshow{if (rlang::is_installed(c("survival", "ggplot2"))) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf}
# load libraries for models and data
library(survival)
# examples come from cch documentation
subcoh <- nwtco$in.subcohort
selccoh <- with(nwtco, rel == 1 | subcoh == 1)
ccoh.data <- nwtco[selccoh, ]
ccoh.data$subcohort <- subcoh[selccoh]
# central-lab histology
ccoh.data$histol <- factor(ccoh.data$histol, labels = c("FH", "UH"))
# tumour stage
ccoh.data$stage <- factor(ccoh.data$stage, labels = c("I", "II", "III", "IV"))
ccoh.data$age <- ccoh.data$age / 12 # age in years
# fit model
fit.ccP <- cch(Surv(edrel, rel) ~ stage + histol + age,
data = ccoh.data,
subcoh = ~subcohort, id = ~seqno, cohort.size = 4028
)
# summarize model fit with tidiers + visualization
tidy(fit.ccP)
# coefficient plot
library(ggplot2)
ggplot(tidy(fit.ccP), aes(x = estimate, y = term)) +
geom_point() +
geom_errorbarh(aes(xmin = conf.low, xmax = conf.high), height = 0) +
geom_vline(xintercept = 0)
\dontshow{\}) # examplesIf}
}
\seealso{
\code{\link[=glance]{glance()}}, \code{\link[survival:cch]{survival::cch()}}
Other cch tidiers:
\code{\link{glance.survfit}()},
\code{\link{tidy.cch}()}
Other survival tidiers:
\code{\link{augment.coxph}()},
\code{\link{augment.survreg}()},
\code{\link{glance.aareg}()},
\code{\link{glance.coxph}()},
\code{\link{glance.pyears}()},
\code{\link{glance.survdiff}()},
\code{\link{glance.survexp}()},
\code{\link{glance.survfit}()},
\code{\link{glance.survreg}()},
\code{\link{tidy.aareg}()},
\code{\link{tidy.cch}()},
\code{\link{tidy.coxph}()},
\code{\link{tidy.pyears}()},
\code{\link{tidy.survdiff}()},
\code{\link{tidy.survexp}()},
\code{\link{tidy.survfit}()},
\code{\link{tidy.survreg}()}
}
\concept{cch tidiers}
\concept{survival tidiers}
\value{
A \code{\link[tibble:tibble]{tibble::tibble()}} with exactly one row and columns:
\item{iter}{Iterations of algorithm/fitting procedure completed.}
\item{p.value}{P-value corresponding to the test statistic.}
\item{rscore}{Robust log-rank statistic}
\item{score}{Score.}
\item{n}{number of predictions}
\item{nevent}{number of events}
}
| /man/glance.cch.Rd | permissive | tidymodels/broom | R | false | true | 4,082 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/survival-cch-tidiers.R
\name{glance.cch}
\alias{glance.cch}
\title{Glance at a(n) cch object}
\usage{
\method{glance}{cch}(x, ...)
}
\arguments{
\item{x}{An \code{cch} object returned from \code{\link[survival:cch]{survival::cch()}}.}
\item{...}{Additional arguments. Not used. Needed to match generic
signature only. \strong{Cautionary note:} Misspelled arguments will be
absorbed in \code{...}, where they will be ignored. If the misspelled
argument has a default value, the default value will be used.
For example, if you pass \code{conf.lvel = 0.9}, all computation will
proceed using \code{conf.level = 0.95}. Two exceptions here are:
\itemize{
\item \code{tidy()} methods will warn when supplied an \code{exponentiate} argument if
it will be ignored.
\item \code{augment()} methods will warn when supplied a \code{newdata} argument if it
will be ignored.
}}
}
\description{
Glance accepts a model object and returns a \code{\link[tibble:tibble]{tibble::tibble()}}
with exactly one row of model summaries. The summaries are typically
goodness of fit measures, p-values for hypothesis tests on residuals,
or model convergence information.
Glance never returns information from the original call to the modeling
function. This includes the name of the modeling function or any
arguments passed to the modeling function.
Glance does not calculate summary measures. Rather, it farms out these
computations to appropriate methods and gathers the results together.
Sometimes a goodness of fit measure will be undefined. In these cases
the measure will be reported as \code{NA}.
Glance returns the same number of columns regardless of whether the
model matrix is rank-deficient or not. If so, entries in columns
that no longer have a well-defined value are filled in with an \code{NA}
of the appropriate type.
}
\examples{
\dontshow{if (rlang::is_installed(c("survival", "ggplot2"))) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf}
# load libraries for models and data
library(survival)
# examples come from cch documentation
subcoh <- nwtco$in.subcohort
selccoh <- with(nwtco, rel == 1 | subcoh == 1)
ccoh.data <- nwtco[selccoh, ]
ccoh.data$subcohort <- subcoh[selccoh]
# central-lab histology
ccoh.data$histol <- factor(ccoh.data$histol, labels = c("FH", "UH"))
# tumour stage
ccoh.data$stage <- factor(ccoh.data$stage, labels = c("I", "II", "III", "IV"))
ccoh.data$age <- ccoh.data$age / 12 # age in years
# fit model
fit.ccP <- cch(Surv(edrel, rel) ~ stage + histol + age,
data = ccoh.data,
subcoh = ~subcohort, id = ~seqno, cohort.size = 4028
)
# summarize model fit with tidiers + visualization
tidy(fit.ccP)
# coefficient plot
library(ggplot2)
ggplot(tidy(fit.ccP), aes(x = estimate, y = term)) +
geom_point() +
geom_errorbarh(aes(xmin = conf.low, xmax = conf.high), height = 0) +
geom_vline(xintercept = 0)
\dontshow{\}) # examplesIf}
}
\seealso{
\code{\link[=glance]{glance()}}, \code{\link[survival:cch]{survival::cch()}}
Other cch tidiers:
\code{\link{glance.survfit}()},
\code{\link{tidy.cch}()}
Other survival tidiers:
\code{\link{augment.coxph}()},
\code{\link{augment.survreg}()},
\code{\link{glance.aareg}()},
\code{\link{glance.coxph}()},
\code{\link{glance.pyears}()},
\code{\link{glance.survdiff}()},
\code{\link{glance.survexp}()},
\code{\link{glance.survfit}()},
\code{\link{glance.survreg}()},
\code{\link{tidy.aareg}()},
\code{\link{tidy.cch}()},
\code{\link{tidy.coxph}()},
\code{\link{tidy.pyears}()},
\code{\link{tidy.survdiff}()},
\code{\link{tidy.survexp}()},
\code{\link{tidy.survfit}()},
\code{\link{tidy.survreg}()}
}
\concept{cch tidiers}
\concept{survival tidiers}
\value{
A \code{\link[tibble:tibble]{tibble::tibble()}} with exactly one row and columns:
\item{iter}{Iterations of algorithm/fitting procedure completed.}
\item{p.value}{P-value corresponding to the test statistic.}
\item{rscore}{Robust log-rank statistic}
\item{score}{Score.}
\item{n}{number of predictions}
\item{nevent}{number of events}
}
|
#' ACAPS secondary effects of COVID-19
#'
#' The objective of the dataset is to provide information for decision makers to improve their efforts in addressing the wider effects of the COVID-19 pandemic.
#'
#' @details
#'
#' The ACAPS COVID-19 Analytical Framework, together with the data collected for the Government Measures dataset, provided the foundation for the Secondary Impacts Analytical Framework and dataset. The dataset will track secondary impacts across a wide range of relevant themes such as economy, health, migration and education.
#'
#' Around 80 impact indicators, anticipated to be impacted by COVID-19, have been identified and organised across 4 pillars and 13 thematic blocks.
#'
#' The data collection is ongoing and will be conducted on a country-level. Eventually, data will identify the secondary impacts of the COVID-19 pandemic in more than 190 countries. Data comes from a range of available sources including international organisations, research centres and media analysis.
#'
#' The data collection is supported by a group of student volunteers from various universities worldwide. Volunteers receive training on the analytical framework and indicators to ensure coherent data. Additional guidance on analytical and data collection techniques are also provided by an ACAPS analyst team which supervises the data entered. This model has already been successfully implemented in the ACAPS Government Measures project.
#'
#' @author Sean Davis <seandavi@gmail.com>
#'
#' @return a data.frame
#'
#' @references
#' - https://www.acaps.org/secondary-impacts-covid-19
#'
#' @family data-import
#' @family economics
#' @family social
#'
#' @examples
#'
#' res = acaps_secondary_impact_data()
#'
#' head(res)
#' dplyr::glimpse(res)
#' dim(res)
#'
#' @export
acaps_secondary_impact_data = function() {
url = 'https://www.acaps.org/sites/acaps/files/resources/files/acaps_covid19_secondary_impacts_beta.xlsx'
rpath = s2p_cached_url(url)
dat = readxl::read_excel(rpath) %>%
dplyr::rename_with(function(z) gsub(" ", "_", tolower(z))) %>%
dplyr::mutate(date=lubridate::dmy(.data$source_date,quiet = TRUE))
dat
}
| /R/acaps_secondary_impact_data.R | permissive | RichardMN/sars2pack | R | false | false | 2,165 | r | #' ACAPS secondary effects of COVID-19
#'
#' The objective of the dataset is to provide information for decision makers to improve their efforts in addressing the wider effects of the COVID-19 pandemic.
#'
#' @details
#'
#' The ACAPS COVID-19 Analytical Framework, together with the data collected for the Government Measures dataset, provided the foundation for the Secondary Impacts Analytical Framework and dataset. The dataset will track secondary impacts across a wide range of relevant themes such as economy, health, migration and education.
#'
#' Around 80 impact indicators, anticipated to be impacted by COVID-19, have been identified and organised across 4 pillars and 13 thematic blocks.
#'
#' The data collection is ongoing and will be conducted on a country-level. Eventually, data will identify the secondary impacts of the COVID-19 pandemic in more than 190 countries. Data comes from a range of available sources including international organisations, research centres and media analysis.
#'
#' The data collection is supported by a group of student volunteers from various universities worldwide. Volunteers receive training on the analytical framework and indicators to ensure coherent data. Additional guidance on analytical and data collection techniques are also provided by an ACAPS analyst team which supervises the data entered. This model has already been successfully implemented in the ACAPS Government Measures project.
#'
#' @author Sean Davis <seandavi@gmail.com>
#'
#' @return a data.frame
#'
#' @references
#' - https://www.acaps.org/secondary-impacts-covid-19
#'
#' @family data-import
#' @family economics
#' @family social
#'
#' @examples
#'
#' res = acaps_secondary_impact_data()
#'
#' head(res)
#' dplyr::glimpse(res)
#' dim(res)
#'
#' @export
acaps_secondary_impact_data = function() {
url = 'https://www.acaps.org/sites/acaps/files/resources/files/acaps_covid19_secondary_impacts_beta.xlsx'
rpath = s2p_cached_url(url)
dat = readxl::read_excel(rpath) %>%
dplyr::rename_with(function(z) gsub(" ", "_", tolower(z))) %>%
dplyr::mutate(date=lubridate::dmy(.data$source_date,quiet = TRUE))
dat
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/iot_operations.R
\name{iot_update_ca_certificate}
\alias{iot_update_ca_certificate}
\title{Updates a registered CA certificate}
\usage{
iot_update_ca_certificate(certificateId, newStatus,
newAutoRegistrationStatus, registrationConfig, removeAutoRegistration)
}
\arguments{
\item{certificateId}{[required] The CA certificate identifier.}
\item{newStatus}{The updated status of the CA certificate.
\strong{Note:} The status value REGISTER_INACTIVE is deprecated and should
not be used.}
\item{newAutoRegistrationStatus}{The new value for the auto registration status. Valid values are:
"ENABLE" or "DISABLE".}
\item{registrationConfig}{Information about the registration configuration.}
\item{removeAutoRegistration}{If true, removes auto registration.}
}
\value{
An empty list.
}
\description{
Updates a registered CA certificate.
}
\section{Request syntax}{
\preformatted{svc$update_ca_certificate(
certificateId = "string",
newStatus = "ACTIVE"|"INACTIVE",
newAutoRegistrationStatus = "ENABLE"|"DISABLE",
registrationConfig = list(
templateBody = "string",
roleArn = "string"
),
removeAutoRegistration = TRUE|FALSE
)
}
}
\keyword{internal}
| /cran/paws.internet.of.things/man/iot_update_ca_certificate.Rd | permissive | paws-r/paws | R | false | true | 1,247 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/iot_operations.R
\name{iot_update_ca_certificate}
\alias{iot_update_ca_certificate}
\title{Updates a registered CA certificate}
\usage{
iot_update_ca_certificate(certificateId, newStatus,
newAutoRegistrationStatus, registrationConfig, removeAutoRegistration)
}
\arguments{
\item{certificateId}{[required] The CA certificate identifier.}
\item{newStatus}{The updated status of the CA certificate.
\strong{Note:} The status value REGISTER_INACTIVE is deprecated and should
not be used.}
\item{newAutoRegistrationStatus}{The new value for the auto registration status. Valid values are:
"ENABLE" or "DISABLE".}
\item{registrationConfig}{Information about the registration configuration.}
\item{removeAutoRegistration}{If true, removes auto registration.}
}
\value{
An empty list.
}
\description{
Updates a registered CA certificate.
}
\section{Request syntax}{
\preformatted{svc$update_ca_certificate(
certificateId = "string",
newStatus = "ACTIVE"|"INACTIVE",
newAutoRegistrationStatus = "ENABLE"|"DISABLE",
registrationConfig = list(
templateBody = "string",
roleArn = "string"
),
removeAutoRegistration = TRUE|FALSE
)
}
}
\keyword{internal}
|
#' @title Two-Sample T-Test for Equality of Variability Mesures
#'
#' @description Two-sample t-test for comparing variability measures.
#' @export
#' @param x A vector specifying the first sample.
#' @param y A vector specifying the second sample.
#' @param alternative A character string specifying the alternative hypothesis; "two.sided" for two-tailed, "less" for left-tailed, and "greater" for right-tailed alternative hypothesis. The default option is "two.sided".
#' @param scale.option A character string specifying the transformation on each observation; "Levene.med" for Levene's transformation (absolute difference from the sample median), "Levene.med.0" for Levene's transformation with zero removal for odd sample(s), "Levene.med.00" for Levene's transformation with zero removal for both odd and even sample(s), and "Variance" for squared difference from the sample mean. The default option is "Levene.med.0".
#' @param scale.adj A boolean specifying whether or not any constant should be multiplied to each transformed observation. This is useful to obtain unbiased estimates. The default option is TRUE.
#' @param scale.00 A scale to be applied for an even sample when "Levene.med.00" is chosen. The default option is 2, but the square root of 2 is another viable option.
#' @param paired A boolean specifying whether or not the samples are paired or independent. The default option is FALSE.
#' @param alpha A numeric specifying the significance level. The default option is 0.05.
#' @param plot.ci A boolean specifying whether or not the deviation between the two samples should be plotted. The default option is TRUE.
#' @param plot.ici A boolean specifying whether or not the inferential confidence intervals for the two samples should be plotted. The default option is TRUE.
#' @param ici.interval A vector with two elements specifying the range of values for the plot of inferential confidence intervals. The default option is NULL, in which case, an appropriate range is automatically specified.
#' @param add.individual.ci A boolean specifying whether or not the confidence intervals with the confidence level implied by \code{alpha} should also be plotted. The default option is TRUE.
#' @param by.ici A numeric specifying te scales in the plot for ici.interval. The default option is 0.5.
#' @param xlab.ici A character string specifying the x axis labels used for the inferential confidence intervals and confidence intervals. The default option is "".
#' @param rounds A numeric specifying the number of decimal places to be rounded. The default option is 3.
#' @param rounds.plot A numeric specifying the number of decimal places to be rounded. The default option is NULL, in which case it is set equal to \code{rounds}.
#' @param ci.interval A vector with two elements specifying the range of values for the plot of confidence intervals. The default option is NULL, in which case, an appropriate range is automatically specified.
#' @param by.ci A numeric specifying te scales in the plot for ci.interval. The default option is 0.5.
#' @param name.x A character string specifying the label for the x variable. The default option is NULL, in which case, it is set to "x".
#' @param name.y A character string specifying the label for the y variable. The default option is NULL, in which case, it is set to "y".
#' @param pool.mean A boolean specifying whether or not the sample means should be pooled for the degrees of freedom. The default option is FALSE.
#' @param logit.interval A vector with two elements specifying the lower and upper bound if logit transformation is used. The default option is NULL.
#' @param tol A numeric specifying the cut-off value for a numerical zero. The default option is 1e-7.
#'
#' @return
#' \item{CI}{A data frame displaying the effect size estimate, lower and upper bound of the effect size confidence interval, and the degrees of freedom.}
#' \item{ICI}{A data frame displaying the sample mean for both x and y, lower and upper bounds of the inferential confidence intervals, the degrees of freedom of the inferential confidence intervals, the inferential significance level, and the significance level.}
#' \item{Statistic}{A data frame displaying the test statistic and p-value of the generalized two-sample t-test.}
#' \item{Ind.CI}{A data frame displaying the sample mean for both x and y, lower and upper bounds of the individual confidence intervals, the degrees of freedom of the individual confidence intervals, the sample standard deviations, and the significance level.}
#' \item{Effect.Sizes}{A data frame displaying Cohen's d and log ratio for the mean and variability measure comparisons.}
#'
#' @examples
#' set.seed(123)
#' x<-rexp(10)
#' y<-rexp(15)
#' two.sample.var(x,y,alternative="two.sided",scale.option="Levene.Med.0",scale.adj=TRUE,paired=FALSE)
#'
#' @importFrom stats na.omit var
two.sample.var<-function(x,y,alternative=c("two.sided", "less", "greater"),
scale.option=c("Levene.Med","Levene.Med.0","Levene.Med.00","Variance"), scale.adj=TRUE, scale.00=2,
paired=FALSE,alpha=0.05,plot.ci=TRUE,plot.ici=TRUE,ici.interval=NULL,add.individual.ci=TRUE, by.ici=0.5,
xlab.ici="", rounds=3, rounds.plot=NULL, ci.interval=NULL, by.ci=0.5, name.x=NULL, name.y=NULL,
pool.mean=FALSE,logit.interval=NULL, tol=1e-7)
{
scale.data.xy<-data.scale.xy(x,y,scale.option=scale.option,scale.adj=scale.adj,scale.00=scale.00,paired=paired)
new.x<-scale.data.xy$trans.x
new.y<-scale.data.xy$trans.y
orig.rho<-scale.data.xy$rho
xn <- na.omit(x)
yn <- na.omit(y)
cohen.m <- (mean(xn) - mean(yn))/sqrt((var(xn)*(length(xn)-1)+var(yn)*(length(yn)-1))/(length(xn)+length(yn)-2))
logratio.m <- NA
if(min(xn) > 0 && min(yn) > 0)
{
logratio.m <- log(mean(xn)/mean(yn))
}
newx <- na.omit(new.x)
newy <- na.omit(new.y)
cohen.v <- (mean(newx) - mean(newy))/sqrt((var(newx)*(length(newx)-1)+var(newy)*(length(newy)-1))/(length(newx)+length(newy)-2))
logratio.v <- log(mean(newx)/mean(newy))
results<-two.sample.mean(new.x,new.y,transformation="log",alternative=alternative,
paired=paired,alpha=alpha,plot.ci=plot.ci,plot.ici=plot.ici,ici.interval=ici.interval,add.individual.ci=add.individual.ci,
by.ici=by.ici, xlab.ici=xlab.ici, rounds=rounds, rounds.plot=rounds.plot, ci.interval=ci.interval, by.ci=by.ci,
name.x=name.x, name.y=name.y, pool.mean=pool.mean,logit.interval=logit.interval, rho=orig.rho, tol=tol)
ess <- c(cohen.m,logratio.m,cohen.v,logratio.v)
names(ess) <- c("Cohen.d.Mean","Log.Ratio.Mean","Cohen.d.Var","Log.Ratio.Var")
Analysis <- list(CI = results$CI, ICI = results$ICI, Statistic = results$Statistic, Ind.CI = results$Ind.CI,
Effect.Sizes = ess)
return(Analysis)
}
| /R/two.sample.var.R | no_license | cran/intervcomp | R | false | false | 6,688 | r | #' @title Two-Sample T-Test for Equality of Variability Mesures
#'
#' @description Two-sample t-test for comparing variability measures.
#' @export
#' @param x A vector specifying the first sample.
#' @param y A vector specifying the second sample.
#' @param alternative A character string specifying the alternative hypothesis; "two.sided" for two-tailed, "less" for left-tailed, and "greater" for right-tailed alternative hypothesis. The default option is "two.sided".
#' @param scale.option A character string specifying the transformation on each observation; "Levene.med" for Levene's transformation (absolute difference from the sample median), "Levene.med.0" for Levene's transformation with zero removal for odd sample(s), "Levene.med.00" for Levene's transformation with zero removal for both odd and even sample(s), and "Variance" for squared difference from the sample mean. The default option is "Levene.med.0".
#' @param scale.adj A boolean specifying whether or not any constant should be multiplied to each transformed observation. This is useful to obtain unbiased estimates. The default option is TRUE.
#' @param scale.00 A scale to be applied for an even sample when "Levene.med.00" is chosen. The default option is 2, but the square root of 2 is another viable option.
#' @param paired A boolean specifying whether or not the samples are paired or independent. The default option is FALSE.
#' @param alpha A numeric specifying the significance level. The default option is 0.05.
#' @param plot.ci A boolean specifying whether or not the deviation between the two samples should be plotted. The default option is TRUE.
#' @param plot.ici A boolean specifying whether or not the inferential confidence intervals for the two samples should be plotted. The default option is TRUE.
#' @param ici.interval A vector with two elements specifying the range of values for the plot of inferential confidence intervals. The default option is NULL, in which case, an appropriate range is automatically specified.
#' @param add.individual.ci A boolean specifying whether or not the confidence intervals with the confidence level implied by \code{alpha} should also be plotted. The default option is TRUE.
#' @param by.ici A numeric specifying te scales in the plot for ici.interval. The default option is 0.5.
#' @param xlab.ici A character string specifying the x axis labels used for the inferential confidence intervals and confidence intervals. The default option is "".
#' @param rounds A numeric specifying the number of decimal places to be rounded. The default option is 3.
#' @param rounds.plot A numeric specifying the number of decimal places to be rounded. The default option is NULL, in which case it is set equal to \code{rounds}.
#' @param ci.interval A vector with two elements specifying the range of values for the plot of confidence intervals. The default option is NULL, in which case, an appropriate range is automatically specified.
#' @param by.ci A numeric specifying te scales in the plot for ci.interval. The default option is 0.5.
#' @param name.x A character string specifying the label for the x variable. The default option is NULL, in which case, it is set to "x".
#' @param name.y A character string specifying the label for the y variable. The default option is NULL, in which case, it is set to "y".
#' @param pool.mean A boolean specifying whether or not the sample means should be pooled for the degrees of freedom. The default option is FALSE.
#' @param logit.interval A vector with two elements specifying the lower and upper bound if logit transformation is used. The default option is NULL.
#' @param tol A numeric specifying the cut-off value for a numerical zero. The default option is 1e-7.
#'
#' @return
#' \item{CI}{A data frame displaying the effect size estimate, lower and upper bound of the effect size confidence interval, and the degrees of freedom.}
#' \item{ICI}{A data frame displaying the sample mean for both x and y, lower and upper bounds of the inferential confidence intervals, the degrees of freedom of the inferential confidence intervals, the inferential significance level, and the significance level.}
#' \item{Statistic}{A data frame displaying the test statistic and p-value of the generalized two-sample t-test.}
#' \item{Ind.CI}{A data frame displaying the sample mean for both x and y, lower and upper bounds of the individual confidence intervals, the degrees of freedom of the individual confidence intervals, the sample standard deviations, and the significance level.}
#' \item{Effect.Sizes}{A data frame displaying Cohen's d and log ratio for the mean and variability measure comparisons.}
#'
#' @examples
#' set.seed(123)
#' x<-rexp(10)
#' y<-rexp(15)
#' two.sample.var(x,y,alternative="two.sided",scale.option="Levene.Med.0",scale.adj=TRUE,paired=FALSE)
#'
#' @importFrom stats na.omit var
two.sample.var<-function(x,y,alternative=c("two.sided", "less", "greater"),
scale.option=c("Levene.Med","Levene.Med.0","Levene.Med.00","Variance"), scale.adj=TRUE, scale.00=2,
paired=FALSE,alpha=0.05,plot.ci=TRUE,plot.ici=TRUE,ici.interval=NULL,add.individual.ci=TRUE, by.ici=0.5,
xlab.ici="", rounds=3, rounds.plot=NULL, ci.interval=NULL, by.ci=0.5, name.x=NULL, name.y=NULL,
pool.mean=FALSE,logit.interval=NULL, tol=1e-7)
{
scale.data.xy<-data.scale.xy(x,y,scale.option=scale.option,scale.adj=scale.adj,scale.00=scale.00,paired=paired)
new.x<-scale.data.xy$trans.x
new.y<-scale.data.xy$trans.y
orig.rho<-scale.data.xy$rho
xn <- na.omit(x)
yn <- na.omit(y)
cohen.m <- (mean(xn) - mean(yn))/sqrt((var(xn)*(length(xn)-1)+var(yn)*(length(yn)-1))/(length(xn)+length(yn)-2))
logratio.m <- NA
if(min(xn) > 0 && min(yn) > 0)
{
logratio.m <- log(mean(xn)/mean(yn))
}
newx <- na.omit(new.x)
newy <- na.omit(new.y)
cohen.v <- (mean(newx) - mean(newy))/sqrt((var(newx)*(length(newx)-1)+var(newy)*(length(newy)-1))/(length(newx)+length(newy)-2))
logratio.v <- log(mean(newx)/mean(newy))
results<-two.sample.mean(new.x,new.y,transformation="log",alternative=alternative,
paired=paired,alpha=alpha,plot.ci=plot.ci,plot.ici=plot.ici,ici.interval=ici.interval,add.individual.ci=add.individual.ci,
by.ici=by.ici, xlab.ici=xlab.ici, rounds=rounds, rounds.plot=rounds.plot, ci.interval=ci.interval, by.ci=by.ci,
name.x=name.x, name.y=name.y, pool.mean=pool.mean,logit.interval=logit.interval, rho=orig.rho, tol=tol)
ess <- c(cohen.m,logratio.m,cohen.v,logratio.v)
names(ess) <- c("Cohen.d.Mean","Log.Ratio.Mean","Cohen.d.Var","Log.Ratio.Var")
Analysis <- list(CI = results$CI, ICI = results$ICI, Statistic = results$Statistic, Ind.CI = results$Ind.CI,
Effect.Sizes = ess)
return(Analysis)
}
|
setwd(normalizePath(dirname(R.utils::commandArgs(asValues=TRUE)$"f")))
source("../../../scripts/h2o-r-test-setup.R")
check.drf.grid.cars.negative <- function() {
cars <- h2o.uploadFile(locate("smalldata/junit/cars_20mpg.csv"))
seed <- sample(1:1000000, 1)
Log.info(paste0("runif seed: ",seed))
r <- h2o.runif(cars,seed=seed)
train <- cars[r > 0.2,]
validation_scheme = sample(1:3,1) # 1:none, 2:cross-validation, 3:validation set
Log.info(paste0("Validation scheme (1:none, 2:cross-validation, 3:validation set): ", validation_scheme))
if ( validation_scheme == 3 ) { valid <- cars[r <= 0.2,] }
if ( validation_scheme == 2 ) {
nfolds = 2
Log.info(paste0("N-folds: ", nfolds))
}
problem = sample(0:2,1)
Log.info(paste0("Type model-building exercise (0:regression, 1:binomial, 2:multinomial): ", problem))
predictors <- c("displacement","power","weight","acceleration","year")
if ( problem == 1 ) { response_col <- "economy_20mpg"
} else if ( problem == 2 ) { response_col <- "cylinders"
} else { response_col <- "economy" }
if ( problem == 1 || problem == 2 ) {
Log.info("Converting the response column to a factor...")
train[,response_col] <- as.factor(train[,response_col])
if ( validation_scheme == 3 ) { valid[,response_col] <- as.factor(valid[,response_col]) } }
Log.info(paste0("Predictors: ", paste(predictors, collapse=',')))
Log.info(paste0("Response: ", response_col))
## Invalid drf parameters
grid_space <- list()
grid_space$ntrees <- c(5,10,-5)
grid_space$max_depth <- c(2,5)
grid_space$min_rows <- c(1,10,-7)
Log.info(lapply(names(grid_space), function(n) paste0("The provided ",n," search space: ", grid_space[n])))
expected_grid_space <- list()
expected_grid_space$ntrees <- c(5,10)
expected_grid_space$max_depth <- c(2,5)
expected_grid_space$min_rows <- c(1,10)
Log.info(lapply(names(grid_space), function(n) paste0("The expected ",n," search space: ", expected_grid_space[n])))
Log.info("Constructing the grid of drf models with some invalid drf parameters...")
if ( validation_scheme == 1 ) {
cars_drf_grid <- h2o.grid("randomForest", grid_id="drf_grid_cars_test", x=predictors, y=response_col, training_frame=train, hyper_params=grid_space, do_hyper_params_check=FALSE)
expect_error(h2o.grid("randomForest", grid_id="drf_grid_cars_test", x=predictors, y=response_col, training_frame=train, hyper_params=grid_space, do_hyper_params_check=TRUE))
} else if ( validation_scheme == 2 ) {
cars_drf_grid <- h2o.grid("randomForest", grid_id="drf_grid_cars_test", x=predictors, y=response_col, training_frame=train, nfolds=nfolds, hyper_params=grid_space, do_hyper_params_check=FALSE)
expect_error(h2o.grid("randomForest", grid_id="drf_grid_cars_test", x=predictors, y=response_col, training_frame=train, nfolds=nfolds, hyper_params=grid_space, do_hyper_params_check=TRUE))
} else {
cars_drf_grid <- h2o.grid("randomForest", grid_id="drf_grid_cars_test", x=predictors, y=response_col, training_frame=train, validation_frame=valid, hyper_params=grid_space, do_hyper_params_check=FALSE)
expect_error(h2o.grid("randomForest", grid_id="drf_grid_cars_test", x=predictors, y=response_col, training_frame=train, validation_frame=valid, hyper_params=grid_space, do_hyper_params_check=TRUE)) }
Log.info("Performing various checks of the constructed grid...")
Log.info("Check cardinality of grid, that is, the correct number of models have been created...")
expect_equal(length(cars_drf_grid@model_ids), 8)
Log.info("Check that the hyper_params that were passed to grid, were used to construct the models...")
# Get models
grid_models <- lapply(cars_drf_grid@model_ids, function(mid) { model = h2o.getModel(mid) })
# Check expected number of models
expect_equal(length(grid_models), 8)
# Check parameters coverage
for ( name in names(grid_space) ) { expect_model_param(grid_models, name, expected_grid_space[[name]]) }
# TODO: Check error messages for cases with invalid drf parameters
## Non-gridable parameter passed as grid parameter
non_gridable_parameter <- sample(1:2, 1)
if ( non_gridable_parameter == 1 ) { grid_space$build_tree_one_node <- c(TRUE, FALSE) }
if ( non_gridable_parameter == 2 ) { grid_space$binomial_double_trees <- c(TRUE, FALSE) }
Log.info(paste0("Constructing the grid of drf models with non-gridable parameter: ", non_gridable_parameter ,
" (1:build_tree_one_node, 2:binomial_double_trees). Expecting failure..."))
if ( validation_scheme == 1 ) {
expect_error(cars_drf_grid <- h2o.grid("randomForest", grid_id="drf_grid_cars_test", x=predictors, y=response_col,
training_frame=train, hyper_params=grid_space, do_hyper_params_check=TRUE))
} else if ( validation_scheme == 2 ) {
expect_error(cars_drf_grid <- h2o.grid("randomForest", grid_id="drf_grid_cars_test", x=predictors, y=response_col,
training_frame=train, nfolds=nfolds, hyper_params=grid_space, do_hyper_params_check=TRUE))
} else {
expect_error(cars_drf_grid <- h2o.grid("randomForest", grid_id="drf_grid_cars_test", x=predictors, y=response_col,
training_frame=train, validation_frame=valid, hyper_params=grid_space, do_hyper_params_check=TRUE)) }
}
doTest("Random Forest Grid Search using bad parameters", check.drf.grid.cars.negative)
| /h2o-r/tests/testdir_algos/randomforest/runit_RF_grid_cars_negative.R | permissive | h2oai/h2o-3 | R | false | false | 5,483 | r | setwd(normalizePath(dirname(R.utils::commandArgs(asValues=TRUE)$"f")))
source("../../../scripts/h2o-r-test-setup.R")
check.drf.grid.cars.negative <- function() {
cars <- h2o.uploadFile(locate("smalldata/junit/cars_20mpg.csv"))
seed <- sample(1:1000000, 1)
Log.info(paste0("runif seed: ",seed))
r <- h2o.runif(cars,seed=seed)
train <- cars[r > 0.2,]
validation_scheme = sample(1:3,1) # 1:none, 2:cross-validation, 3:validation set
Log.info(paste0("Validation scheme (1:none, 2:cross-validation, 3:validation set): ", validation_scheme))
if ( validation_scheme == 3 ) { valid <- cars[r <= 0.2,] }
if ( validation_scheme == 2 ) {
nfolds = 2
Log.info(paste0("N-folds: ", nfolds))
}
problem = sample(0:2,1)
Log.info(paste0("Type model-building exercise (0:regression, 1:binomial, 2:multinomial): ", problem))
predictors <- c("displacement","power","weight","acceleration","year")
if ( problem == 1 ) { response_col <- "economy_20mpg"
} else if ( problem == 2 ) { response_col <- "cylinders"
} else { response_col <- "economy" }
if ( problem == 1 || problem == 2 ) {
Log.info("Converting the response column to a factor...")
train[,response_col] <- as.factor(train[,response_col])
if ( validation_scheme == 3 ) { valid[,response_col] <- as.factor(valid[,response_col]) } }
Log.info(paste0("Predictors: ", paste(predictors, collapse=',')))
Log.info(paste0("Response: ", response_col))
## Invalid drf parameters
grid_space <- list()
grid_space$ntrees <- c(5,10,-5)
grid_space$max_depth <- c(2,5)
grid_space$min_rows <- c(1,10,-7)
Log.info(lapply(names(grid_space), function(n) paste0("The provided ",n," search space: ", grid_space[n])))
expected_grid_space <- list()
expected_grid_space$ntrees <- c(5,10)
expected_grid_space$max_depth <- c(2,5)
expected_grid_space$min_rows <- c(1,10)
Log.info(lapply(names(grid_space), function(n) paste0("The expected ",n," search space: ", expected_grid_space[n])))
Log.info("Constructing the grid of drf models with some invalid drf parameters...")
if ( validation_scheme == 1 ) {
cars_drf_grid <- h2o.grid("randomForest", grid_id="drf_grid_cars_test", x=predictors, y=response_col, training_frame=train, hyper_params=grid_space, do_hyper_params_check=FALSE)
expect_error(h2o.grid("randomForest", grid_id="drf_grid_cars_test", x=predictors, y=response_col, training_frame=train, hyper_params=grid_space, do_hyper_params_check=TRUE))
} else if ( validation_scheme == 2 ) {
cars_drf_grid <- h2o.grid("randomForest", grid_id="drf_grid_cars_test", x=predictors, y=response_col, training_frame=train, nfolds=nfolds, hyper_params=grid_space, do_hyper_params_check=FALSE)
expect_error(h2o.grid("randomForest", grid_id="drf_grid_cars_test", x=predictors, y=response_col, training_frame=train, nfolds=nfolds, hyper_params=grid_space, do_hyper_params_check=TRUE))
} else {
cars_drf_grid <- h2o.grid("randomForest", grid_id="drf_grid_cars_test", x=predictors, y=response_col, training_frame=train, validation_frame=valid, hyper_params=grid_space, do_hyper_params_check=FALSE)
expect_error(h2o.grid("randomForest", grid_id="drf_grid_cars_test", x=predictors, y=response_col, training_frame=train, validation_frame=valid, hyper_params=grid_space, do_hyper_params_check=TRUE)) }
Log.info("Performing various checks of the constructed grid...")
Log.info("Check cardinality of grid, that is, the correct number of models have been created...")
expect_equal(length(cars_drf_grid@model_ids), 8)
Log.info("Check that the hyper_params that were passed to grid, were used to construct the models...")
# Get models
grid_models <- lapply(cars_drf_grid@model_ids, function(mid) { model = h2o.getModel(mid) })
# Check expected number of models
expect_equal(length(grid_models), 8)
# Check parameters coverage
for ( name in names(grid_space) ) { expect_model_param(grid_models, name, expected_grid_space[[name]]) }
# TODO: Check error messages for cases with invalid drf parameters
## Non-gridable parameter passed as grid parameter
non_gridable_parameter <- sample(1:2, 1)
if ( non_gridable_parameter == 1 ) { grid_space$build_tree_one_node <- c(TRUE, FALSE) }
if ( non_gridable_parameter == 2 ) { grid_space$binomial_double_trees <- c(TRUE, FALSE) }
Log.info(paste0("Constructing the grid of drf models with non-gridable parameter: ", non_gridable_parameter ,
" (1:build_tree_one_node, 2:binomial_double_trees). Expecting failure..."))
if ( validation_scheme == 1 ) {
expect_error(cars_drf_grid <- h2o.grid("randomForest", grid_id="drf_grid_cars_test", x=predictors, y=response_col,
training_frame=train, hyper_params=grid_space, do_hyper_params_check=TRUE))
} else if ( validation_scheme == 2 ) {
expect_error(cars_drf_grid <- h2o.grid("randomForest", grid_id="drf_grid_cars_test", x=predictors, y=response_col,
training_frame=train, nfolds=nfolds, hyper_params=grid_space, do_hyper_params_check=TRUE))
} else {
expect_error(cars_drf_grid <- h2o.grid("randomForest", grid_id="drf_grid_cars_test", x=predictors, y=response_col,
training_frame=train, validation_frame=valid, hyper_params=grid_space, do_hyper_params_check=TRUE)) }
}
doTest("Random Forest Grid Search using bad parameters", check.drf.grid.cars.negative)
|
### R code from vignette source 'embedding.Rnw'
###################################################
### code chunk number 1: embedding.Rnw:11-12
###################################################
options(width=80, digits=4, useFancyQuotes=FALSE, prompt=" ", continue=" ")
###################################################
### code chunk number 2: embedding.Rnw:28-31
###################################################
library(car)
m1 <- lm(time ~ t1 + t2, Transact)
deltaMethod(m1, "t1/(t2 + 2)")
###################################################
### code chunk number 3: embedding.Rnw:34-39
###################################################
ans <- NULL
for (z in 1:4) {
ans <- rbind(ans, deltaMethod(m1, "t1/(t2 + z)",
func = gsub("z", z, "t1/(t1+z)"))) }
ans
###################################################
### code chunk number 4: embedding.Rnw:44-51
###################################################
f1 <- function(mod) {
ans <- NULL
for (x in 1:4) {
ans <- rbind(ans, deltaMethod(mod, "t1/(t2 + x)",
func = gsub("x", x, "t1/(t1+x)")) )}
ans
}
###################################################
### code chunk number 5: embedding.Rnw:63-65
###################################################
x <- 10
f1(m1)
###################################################
### code chunk number 6: embedding.Rnw:71-79
###################################################
f2 <- function(mod) {
ans <- NULL
for (x in 1:4) {
ans <- rbind(ans, deltaMethod(mod, "t1/(t2 + x)",
func = gsub("x", x, "t1/(t1+x)"), constants=list(x=x)) )}
ans
}
f2(m1)
###################################################
### code chunk number 7: embedding.Rnw:85-87
###################################################
m2 <- lm(prestige ~ education, Prestige)
ncvTest(m2, ~ income)
###################################################
### code chunk number 8: embedding.Rnw:90-95 (eval = FALSE)
###################################################
## f3 <- function(meanmod, dta, varmod) {
## m3 <- lm(meanmod, dta)
## ncvTest(m3, varmod)
## }
## f3(prestige ~ education, Prestige, ~ income)
###################################################
### code chunk number 9: embedding.Rnw:103-114
###################################################
f4 <- function(meanmod, dta, varmod) {
assign(".dta", dta, envir=.GlobalEnv)
assign(".meanmod", meanmod, envir=.GlobalEnv)
m1 <- lm(.meanmod, .dta)
ans <- ncvTest(m1, varmod)
remove(".dta", envir=.GlobalEnv)
remove(".meanmod", envir=.GlobalEnv)
ans
}
f4(prestige ~ education, Prestige, ~income)
f4(prestige ~ education, Prestige, ~income)
###################################################
### code chunk number 10: embedding.Rnw:119-127 (eval = FALSE)
###################################################
## library(effects)
## fc <- function(dta, formula, terms) {
## print(m1 <- lm(formula, .dta))
## Effect(terms, m1)
## }
## form <- prestige ~ income*type + education
## terms <- c("income", "type")
## fc(Duncan, form, terms)
###################################################
### code chunk number 11: embedding.Rnw:130-138 (eval = FALSE)
###################################################
## library(effects)
## fc.working <- function(dta, formula, terms) {
## assign(".dta", dta, env=.GlobalEnv)
## print(m1 <- lm(formula, .dta))
## Effect(terms, m1)
## remove(".dta", envir=.GlobalEnv)
## }
## fc.working(Duncan, form, terms)
###################################################
### code chunk number 12: embedding.Rnw:144-147
###################################################
m1 <- lm(time ~ t1 + t2, Transact)
b1 <- Boot(m1, R=999)
summary(b1)
###################################################
### code chunk number 13: embedding.Rnw:150-151
###################################################
confint(b1)
###################################################
### code chunk number 14: embedding.Rnw:155-156 (eval = FALSE)
###################################################
## .carEnv <- new.env(parent=emptyenv())
###################################################
### code chunk number 15: embedding.Rnw:160-203 (eval = FALSE)
###################################################
## Boot.default <- function(object, f=coef, labels=names(coef(object)),
## R=999, method=c("case", "residual")) {
## if(!(require(boot))) stop("The 'boot' package is missing")
## f0 <- f(object)
## if(length(labels) != length(f0)) labels <- paste("V", seq(length(f0)), sep="")
## method <- match.arg(method)
## if(method=="case") {
## boot.f <- function(data, indices, .fn) {
## assign(".boot.indices", indices, envir=car:::.carEnv)
## mod <- update(object, subset=get(".boot.indices", envir=car:::.carEnv))
## if(mod$qr$rank != object$qr$rank){
## out <- .fn(object)
## out <- rep(NA, length(out)) } else {out <- .fn(mod)}
## out
## }
## } else {
## boot.f <- function(data, indices, .fn) {
## first <- all(indices == seq(length(indices)))
## res <- if(first) object$residuals else
## residuals(object, type="pearson")/sqrt(1 - hatvalues(object))
## res <- if(!first) (res - mean(res)) else res
## val <- fitted(object) + res[indices]
## if (!is.null(object$na.action)){
## pad <- object$na.action
## attr(pad, "class") <- "exclude"
## val <- naresid(pad, val)
## }
## assign(".y.boot", val, envir=car:::.carEnv)
## mod <- update(object, get(".y.boot", envir=car:::.carEnv) ~ .)
## if(mod$qr$rank != object$qr$rank){
## out <- .fn(object)
## out <- rep(NA, length(out)) } else {out <- .fn(mod)}
## out
## }
## }
## b <- boot(data.frame(update(object, model=TRUE)$model), boot.f, R, .fn=f)
## colnames(b$t) <- labels
## if(exists(".y.boot", envir=car:::.carEnv))
## remove(".y.boot", envir=car:::.carEnv)
## if(exists(".boot.indices", envir=car:::.carEnv))
## remove(".boot.indices", envir=car:::.carEnv)
## b
## }
| /R/lib/car/doc/embedding.R | no_license | BRICOMATA/Bricomata_ | R | false | false | 6,295 | r | ### R code from vignette source 'embedding.Rnw'
###################################################
### code chunk number 1: embedding.Rnw:11-12
###################################################
options(width=80, digits=4, useFancyQuotes=FALSE, prompt=" ", continue=" ")
###################################################
### code chunk number 2: embedding.Rnw:28-31
###################################################
library(car)
m1 <- lm(time ~ t1 + t2, Transact)
deltaMethod(m1, "t1/(t2 + 2)")
###################################################
### code chunk number 3: embedding.Rnw:34-39
###################################################
ans <- NULL
for (z in 1:4) {
ans <- rbind(ans, deltaMethod(m1, "t1/(t2 + z)",
func = gsub("z", z, "t1/(t1+z)"))) }
ans
###################################################
### code chunk number 4: embedding.Rnw:44-51
###################################################
f1 <- function(mod) {
ans <- NULL
for (x in 1:4) {
ans <- rbind(ans, deltaMethod(mod, "t1/(t2 + x)",
func = gsub("x", x, "t1/(t1+x)")) )}
ans
}
###################################################
### code chunk number 5: embedding.Rnw:63-65
###################################################
x <- 10
f1(m1)
###################################################
### code chunk number 6: embedding.Rnw:71-79
###################################################
f2 <- function(mod) {
ans <- NULL
for (x in 1:4) {
ans <- rbind(ans, deltaMethod(mod, "t1/(t2 + x)",
func = gsub("x", x, "t1/(t1+x)"), constants=list(x=x)) )}
ans
}
f2(m1)
###################################################
### code chunk number 7: embedding.Rnw:85-87
###################################################
m2 <- lm(prestige ~ education, Prestige)
ncvTest(m2, ~ income)
###################################################
### code chunk number 8: embedding.Rnw:90-95 (eval = FALSE)
###################################################
## f3 <- function(meanmod, dta, varmod) {
## m3 <- lm(meanmod, dta)
## ncvTest(m3, varmod)
## }
## f3(prestige ~ education, Prestige, ~ income)
###################################################
### code chunk number 9: embedding.Rnw:103-114
###################################################
f4 <- function(meanmod, dta, varmod) {
assign(".dta", dta, envir=.GlobalEnv)
assign(".meanmod", meanmod, envir=.GlobalEnv)
m1 <- lm(.meanmod, .dta)
ans <- ncvTest(m1, varmod)
remove(".dta", envir=.GlobalEnv)
remove(".meanmod", envir=.GlobalEnv)
ans
}
f4(prestige ~ education, Prestige, ~income)
f4(prestige ~ education, Prestige, ~income)
###################################################
### code chunk number 10: embedding.Rnw:119-127 (eval = FALSE)
###################################################
## library(effects)
## fc <- function(dta, formula, terms) {
## print(m1 <- lm(formula, .dta))
## Effect(terms, m1)
## }
## form <- prestige ~ income*type + education
## terms <- c("income", "type")
## fc(Duncan, form, terms)
###################################################
### code chunk number 11: embedding.Rnw:130-138 (eval = FALSE)
###################################################
## library(effects)
## fc.working <- function(dta, formula, terms) {
## assign(".dta", dta, env=.GlobalEnv)
## print(m1 <- lm(formula, .dta))
## Effect(terms, m1)
## remove(".dta", envir=.GlobalEnv)
## }
## fc.working(Duncan, form, terms)
###################################################
### code chunk number 12: embedding.Rnw:144-147
###################################################
m1 <- lm(time ~ t1 + t2, Transact)
b1 <- Boot(m1, R=999)
summary(b1)
###################################################
### code chunk number 13: embedding.Rnw:150-151
###################################################
confint(b1)
###################################################
### code chunk number 14: embedding.Rnw:155-156 (eval = FALSE)
###################################################
## .carEnv <- new.env(parent=emptyenv())
###################################################
### code chunk number 15: embedding.Rnw:160-203 (eval = FALSE)
###################################################
## Boot.default <- function(object, f=coef, labels=names(coef(object)),
## R=999, method=c("case", "residual")) {
## if(!(require(boot))) stop("The 'boot' package is missing")
## f0 <- f(object)
## if(length(labels) != length(f0)) labels <- paste("V", seq(length(f0)), sep="")
## method <- match.arg(method)
## if(method=="case") {
## boot.f <- function(data, indices, .fn) {
## assign(".boot.indices", indices, envir=car:::.carEnv)
## mod <- update(object, subset=get(".boot.indices", envir=car:::.carEnv))
## if(mod$qr$rank != object$qr$rank){
## out <- .fn(object)
## out <- rep(NA, length(out)) } else {out <- .fn(mod)}
## out
## }
## } else {
## boot.f <- function(data, indices, .fn) {
## first <- all(indices == seq(length(indices)))
## res <- if(first) object$residuals else
## residuals(object, type="pearson")/sqrt(1 - hatvalues(object))
## res <- if(!first) (res - mean(res)) else res
## val <- fitted(object) + res[indices]
## if (!is.null(object$na.action)){
## pad <- object$na.action
## attr(pad, "class") <- "exclude"
## val <- naresid(pad, val)
## }
## assign(".y.boot", val, envir=car:::.carEnv)
## mod <- update(object, get(".y.boot", envir=car:::.carEnv) ~ .)
## if(mod$qr$rank != object$qr$rank){
## out <- .fn(object)
## out <- rep(NA, length(out)) } else {out <- .fn(mod)}
## out
## }
## }
## b <- boot(data.frame(update(object, model=TRUE)$model), boot.f, R, .fn=f)
## colnames(b$t) <- labels
## if(exists(".y.boot", envir=car:::.carEnv))
## remove(".y.boot", envir=car:::.carEnv)
## if(exists(".boot.indices", envir=car:::.carEnv))
## remove(".boot.indices", envir=car:::.carEnv)
## b
## }
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/zwater_xml_water_demand_primary.R
\name{module_water_water_demand_primary_xml}
\alias{module_water_water_demand_primary_xml}
\title{module_water_water_demand_primary_xml}
\usage{
module_water_water_demand_primary_xml(command, ...)
}
\arguments{
\item{command}{API command to execute}
\item{...}{other optional parameters, depending on command}
}
\value{
Depends on \code{command}: either a vector of required inputs,
a vector of output names, or (if \code{command} is "MAKE") all
the generated outputs: \code{water_demand_primary.xml}. The corresponding file in the
original data system was \code{batch_water_demand_primary.xml.R} (water XML).
}
\description{
Construct XML data structure for \code{water_demand_primary.xml}.
}
| /input/gcamdata/man/module_water_water_demand_primary_xml.Rd | permissive | JGCRI/gcam-core | R | false | true | 807 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/zwater_xml_water_demand_primary.R
\name{module_water_water_demand_primary_xml}
\alias{module_water_water_demand_primary_xml}
\title{module_water_water_demand_primary_xml}
\usage{
module_water_water_demand_primary_xml(command, ...)
}
\arguments{
\item{command}{API command to execute}
\item{...}{other optional parameters, depending on command}
}
\value{
Depends on \code{command}: either a vector of required inputs,
a vector of output names, or (if \code{command} is "MAKE") all
the generated outputs: \code{water_demand_primary.xml}. The corresponding file in the
original data system was \code{batch_water_demand_primary.xml.R} (water XML).
}
\description{
Construct XML data structure for \code{water_demand_primary.xml}.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SCESet-methods.R
\name{newSCESet}
\alias{newSCESet}
\title{Create a new SCESet object.}
\usage{
newSCESet(exprsData = NULL, countData = NULL, tpmData = NULL,
fpkmData = NULL, cpmData = NULL, phenoData = NULL, featureData = NULL,
experimentData = NULL, is_exprsData = NULL,
cellPairwiseDistances = dist(vector()),
featurePairwiseDistances = dist(vector()), lowerDetectionLimit = NULL,
logExprsOffset = NULL)
}
\arguments{
\item{exprsData}{expression data matrix for an experiment (features x cells)}
\item{countData}{data matrix containing raw count expression values}
\item{tpmData}{matrix of class \code{"numeric"} containing
transcripts-per-million (TPM) expression values}
\item{fpkmData}{matrix of class \code{"numeric"} containing fragments per
kilobase of exon per million reads mapped (FPKM) expression values}
\item{cpmData}{matrix of class \code{"numeric"} containing counts per
million (CPM) expression values (optional)}
\item{phenoData}{data frame containing attributes of individual cells}
\item{featureData}{data frame containing attributes of features (e.g. genes)}
\item{experimentData}{MIAME class object containing metadata data and details
about the experiment and dataset.}
\item{is_exprsData}{matrix of class \code{"logical"}, indicating whether
or not each observation is above the \code{lowerDetectionLimit}.}
\item{cellPairwiseDistances}{object of class \code{"dist"} (or a class that
extends "dist") containing cell-cell distance or dissimilarity values.}
\item{featurePairwiseDistances}{object of class \code{"dist"} (or a class that
extends "dist") containing feature-feature distance or dissimilarity values.}
\item{lowerDetectionLimit}{the minimum expression level that constitutes true
expression (defaults to zero and uses count data to determine if an
observation is expressed or not).}
\item{logExprsOffset}{numeric scalar, providing the offset used when doing
log2-transformations of expression data to avoid trying to take logs of zero.
Default offset value is \code{1}.}
}
\value{
a new SCESet object
}
\description{
Create a new SCESet object (the basic data container class in scater) from a
supplied matrix of expression values, plus cell and feature metadata. The
expression matrix have rows representing features (usually genes) and columns
representing cells.
}
\details{
Scater requires that all data be housed in SCESet objects. SCESet extends
Bioconductor's ExpressionSet class, and the same basic interface is
supported. newSCESet() expects a single matrix of expression values of a
nominated type to be provided, for example a matrix of counts or a matrix of
transcripts-per-million values. There is a hierarchy applied to the
expression data: counts > transcripts-per-million (tpm) > counts-per-million
(cpm) > fragments-per-kilobase-per-million-mapped (fpkm) > generic expression
values on the log2 scale (exprs). Data types higher in the higher are
preferred. Data types lower in the hierarchy will be computed from values
higher in the hierarchy - e.g. counts-per-million and expression values
(as log2(cpm + offset)) will be computed from counts. Data types higher in
the hierarchy will never be computed from types lower in the hierarchy (e.g.
counts will never be computed from exprs values). At a
minimum, an SCESet object will contain exprs values; these will be computed
as log2(*pm + offset) values if a data type higher in the hierarchy is
supplied as the expression matrix.
Per-feature and per-cell metadata can be supplied with the featureData and
phenoData arguments, respectively. Use of these optional arguments is strongly encouraged.
Many methods are provided in the package that operate on SCESet objects.
Aside from the hierarchy of data types described above, scater is relatively
agnostic with respect to data the nature of the expression values. Most
frequently used values are feature counts or transcripts-per-million (tpm),
but any valid output from a program that calculates expression values from
RNA-Seq reads is supported. For example, expression values could also be
values from a single cell qPCR run or some other type of assay.
In some cases it may be desirable to have both tpm and counts in an SCESet
object. In such cases, expression matrices can be added to an SCESet object
after it has been produced by using the \code{\link{set_exprs}} function to
add the expression matrix to the SCESet object.
In many downstream functions it is most convenient if the
\code{'exprs'} values are on the log2-scale, so this is done by default.
}
\examples{
data("sc_example_counts")
data("sc_example_cell_info")
pd <- new("AnnotatedDataFrame", data = sc_example_cell_info)
example_sceset <- newSCESet(countData = sc_example_counts, phenoData = pd)
example_sceset
}
| /man/newSCESet.Rd | permissive | dynverse/scaterlegacy | R | false | true | 4,842 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SCESet-methods.R
\name{newSCESet}
\alias{newSCESet}
\title{Create a new SCESet object.}
\usage{
newSCESet(exprsData = NULL, countData = NULL, tpmData = NULL,
fpkmData = NULL, cpmData = NULL, phenoData = NULL, featureData = NULL,
experimentData = NULL, is_exprsData = NULL,
cellPairwiseDistances = dist(vector()),
featurePairwiseDistances = dist(vector()), lowerDetectionLimit = NULL,
logExprsOffset = NULL)
}
\arguments{
\item{exprsData}{expression data matrix for an experiment (features x cells)}
\item{countData}{data matrix containing raw count expression values}
\item{tpmData}{matrix of class \code{"numeric"} containing
transcripts-per-million (TPM) expression values}
\item{fpkmData}{matrix of class \code{"numeric"} containing fragments per
kilobase of exon per million reads mapped (FPKM) expression values}
\item{cpmData}{matrix of class \code{"numeric"} containing counts per
million (CPM) expression values (optional)}
\item{phenoData}{data frame containing attributes of individual cells}
\item{featureData}{data frame containing attributes of features (e.g. genes)}
\item{experimentData}{MIAME class object containing metadata data and details
about the experiment and dataset.}
\item{is_exprsData}{matrix of class \code{"logical"}, indicating whether
or not each observation is above the \code{lowerDetectionLimit}.}
\item{cellPairwiseDistances}{object of class \code{"dist"} (or a class that
extends "dist") containing cell-cell distance or dissimilarity values.}
\item{featurePairwiseDistances}{object of class \code{"dist"} (or a class that
extends "dist") containing feature-feature distance or dissimilarity values.}
\item{lowerDetectionLimit}{the minimum expression level that constitutes true
expression (defaults to zero and uses count data to determine if an
observation is expressed or not).}
\item{logExprsOffset}{numeric scalar, providing the offset used when doing
log2-transformations of expression data to avoid trying to take logs of zero.
Default offset value is \code{1}.}
}
\value{
a new SCESet object
}
\description{
Create a new SCESet object (the basic data container class in scater) from a
supplied matrix of expression values, plus cell and feature metadata. The
expression matrix have rows representing features (usually genes) and columns
representing cells.
}
\details{
Scater requires that all data be housed in SCESet objects. SCESet extends
Bioconductor's ExpressionSet class, and the same basic interface is
supported. newSCESet() expects a single matrix of expression values of a
nominated type to be provided, for example a matrix of counts or a matrix of
transcripts-per-million values. There is a hierarchy applied to the
expression data: counts > transcripts-per-million (tpm) > counts-per-million
(cpm) > fragments-per-kilobase-per-million-mapped (fpkm) > generic expression
values on the log2 scale (exprs). Data types higher in the higher are
preferred. Data types lower in the hierarchy will be computed from values
higher in the hierarchy - e.g. counts-per-million and expression values
(as log2(cpm + offset)) will be computed from counts. Data types higher in
the hierarchy will never be computed from types lower in the hierarchy (e.g.
counts will never be computed from exprs values). At a
minimum, an SCESet object will contain exprs values; these will be computed
as log2(*pm + offset) values if a data type higher in the hierarchy is
supplied as the expression matrix.
Per-feature and per-cell metadata can be supplied with the featureData and
phenoData arguments, respectively. Use of these optional arguments is strongly encouraged.
Many methods are provided in the package that operate on SCESet objects.
Aside from the hierarchy of data types described above, scater is relatively
agnostic with respect to data the nature of the expression values. Most
frequently used values are feature counts or transcripts-per-million (tpm),
but any valid output from a program that calculates expression values from
RNA-Seq reads is supported. For example, expression values could also be
values from a single cell qPCR run or some other type of assay.
In some cases it may be desirable to have both tpm and counts in an SCESet
object. In such cases, expression matrices can be added to an SCESet object
after it has been produced by using the \code{\link{set_exprs}} function to
add the expression matrix to the SCESet object.
In many downstream functions it is most convenient if the
\code{'exprs'} values are on the log2-scale, so this is done by default.
}
\examples{
data("sc_example_counts")
data("sc_example_cell_info")
pd <- new("AnnotatedDataFrame", data = sc_example_cell_info)
example_sceset <- newSCESet(countData = sc_example_counts, phenoData = pd)
example_sceset
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/model.comparison.R
\name{sensitivity.table}
\alias{sensitivity.table}
\title{Compute sensitivity/specificity/etc.}
\usage{
sensitivity.table(object)
}
\arguments{
\item{object}{an object that can be predicted (e.g., glm). Note the thing to be predicted must have only two outcomes}
}
\description{
Compute sensitivity/specificity/etc.
}
\details{
This function computes sensitivity, specificity, positive/negative predictive value and accuracy and reports
them as a list.
}
\author{
Dustin Fife
}
| /man/sensitivity.table.Rd | no_license | daniloap/flexplot | R | false | true | 575 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/model.comparison.R
\name{sensitivity.table}
\alias{sensitivity.table}
\title{Compute sensitivity/specificity/etc.}
\usage{
sensitivity.table(object)
}
\arguments{
\item{object}{an object that can be predicted (e.g., glm). Note the thing to be predicted must have only two outcomes}
}
\description{
Compute sensitivity/specificity/etc.
}
\details{
This function computes sensitivity, specificity, positive/negative predictive value and accuracy and reports
them as a list.
}
\author{
Dustin Fife
}
|
context("Testing the trawl autocorrelation functions in TrawlCorrelationFunctions.R")
test_that("Function acf_Exp works",{
expect_equal(acf_Exp(0,0.1),1)
expect_equal(acf_Exp(1,0.1),base::exp(-1*0.1))
expect_equal(acf_Exp(10,0.1),base::exp(-10*0.1))
expect_equal(acf_Exp(0,0.01),1)
expect_equal(acf_Exp(1,0.01),base::exp(-1*0.01))
expect_equal(acf_Exp(10,0.01),base::exp(-10*0.01))
})
test_that("Function acf_DExp works",{
expect_equal(acf_DExp(0,0.1,0.1,1),1)
expect_equal(acf_DExp(1,0.1,0.1,1),(0.1*base::exp(-1*0.1)/0.1+0.9*base::exp(-1*1)/1)/(0.1/0.1+0.9/1))
expect_equal(acf_DExp(10,0.1,0.1,1),(0.1*base::exp(-10*0.1)/0.1+0.9*base::exp(-10*1)/1)/(0.1/0.1+0.9/1))
expect_equal(acf_DExp(0,0.1,0.1,1),1)
expect_equal(acf_DExp(1,0,0.1,0.01),base::exp(-1*0.01))
expect_equal(acf_DExp(10,0,0.1,0.01),base::exp(-10*0.01))
})
test_that("Function acf_supIG works",{
expect_equal(acf_supIG(0,0.1,0.5),1)
expect_equal(acf_supIG(1,0.1,0.5),base::exp(0.1*0.5*(1-base::sqrt(1+2*1/0.5^2))))
expect_equal(acf_supIG(10,0.1,0.5),base::exp(0.1*0.5*(1-base::sqrt(1+2*10/0.5^2))))
})
test_that("Function acf_LM works",{
expect_equal(acf_LM(0,0.1,1.1),1)
expect_equal(acf_LM(1,0.1,1.1),(1+1/0.1)^(-0.1))
expect_equal(acf_LM(10,0.1,1.1),(1+10/0.1)^(-0.1))
})
| /tests/testthat/test_TrawlCorrelationFunctions.R | no_license | cran/trawl | R | false | false | 1,312 | r | context("Testing the trawl autocorrelation functions in TrawlCorrelationFunctions.R")
test_that("Function acf_Exp works",{
expect_equal(acf_Exp(0,0.1),1)
expect_equal(acf_Exp(1,0.1),base::exp(-1*0.1))
expect_equal(acf_Exp(10,0.1),base::exp(-10*0.1))
expect_equal(acf_Exp(0,0.01),1)
expect_equal(acf_Exp(1,0.01),base::exp(-1*0.01))
expect_equal(acf_Exp(10,0.01),base::exp(-10*0.01))
})
test_that("Function acf_DExp works",{
expect_equal(acf_DExp(0,0.1,0.1,1),1)
expect_equal(acf_DExp(1,0.1,0.1,1),(0.1*base::exp(-1*0.1)/0.1+0.9*base::exp(-1*1)/1)/(0.1/0.1+0.9/1))
expect_equal(acf_DExp(10,0.1,0.1,1),(0.1*base::exp(-10*0.1)/0.1+0.9*base::exp(-10*1)/1)/(0.1/0.1+0.9/1))
expect_equal(acf_DExp(0,0.1,0.1,1),1)
expect_equal(acf_DExp(1,0,0.1,0.01),base::exp(-1*0.01))
expect_equal(acf_DExp(10,0,0.1,0.01),base::exp(-10*0.01))
})
test_that("Function acf_supIG works",{
expect_equal(acf_supIG(0,0.1,0.5),1)
expect_equal(acf_supIG(1,0.1,0.5),base::exp(0.1*0.5*(1-base::sqrt(1+2*1/0.5^2))))
expect_equal(acf_supIG(10,0.1,0.5),base::exp(0.1*0.5*(1-base::sqrt(1+2*10/0.5^2))))
})
test_that("Function acf_LM works",{
expect_equal(acf_LM(0,0.1,1.1),1)
expect_equal(acf_LM(1,0.1,1.1),(1+1/0.1)^(-0.1))
expect_equal(acf_LM(10,0.1,1.1),(1+10/0.1)^(-0.1))
})
|
#automation webscrape of the greenwich new buildings site
#Run in 64 bit to do arcgis binding with armap pto
#clear all variables
rm(list=ls())
library(purrr, lib = "C:/Program Files/R/R-3.4.1/library")
library(httr, lib = "C:/Program Files/R/R-3.4.1/library")
library(rvest,lib = "C:/Program Files/R/R-3.4.1/library")
library(XML, lib = "C:/Program Files/R/R-3.4.1/library")
library(magrittr,lib = "C:/Program Files/R/R-3.4.1/library")
library(multiplex,lib = "C:/Program Files/R/R-3.4.1/library")
library(rgdal,lib = "C:/Program Files/R/R-3.4.1/library")
library(data.table,lib = "C:/Program Files/R/R-3.4.1/library")
library(dplyr,lib = "C:/Program Files/R/R-3.4.1/library")
library(tidyr,lib = "C:/Program Files/R/R-3.4.1/library")
library(RCurl,lib = "C:/Program Files/R/R-3.4.1/library")
library(curl,lib = "C:/Program Files/R/R-3.4.1/library")
#library(RPostgreSQL, lib = "C:/Program Files/R/R-3.4.1/library")
#library(rpostgis, lib = "C:/Program Files/R/R-3.4.1/library")
#library(arcgisbinding, lib = "C:/Program Files/R/R-3.4.1/library")
#arc.check_product()
#setwd
setwd("D:/FME Scheduled_tasks/R_tasks/")
#rvest to locates form
url<-"https://planning.royalgreenwich.gov.uk/online-applications/search.do?action=monthlyList" ## page to spider
session <-html_session(url)
## create session
greenwhichForm <-html_form(session)[[1]]
#which month to query
month <- "Nov 17"
parish <- "GRN"
greenwhichForm <- set_values(greenwhichForm, searchCriteria.parish = parish, month = month, dateType = "DC_Decided")
session1 <- submit_form(session,greenwhichForm)
#Row1 of table
link <- session1 %>% html_nodes(xpath = paste0('//*[@id="searchresults"]/li[',1,']/a')) %>% html_text()
table <- session1 %>%
follow_link(link) %>%
read_html() %>%
html_table() %>%
as.data.frame()
wideTable <- table %>%
spread(X1,X2)
linkCount <- length(session1 %>% html_nodes(xpath = '//*[@id="searchresults"]/li/a') %>% html_text())
#page
pageScrape <- function(hyperlink, session) {
link <- session %>% html_nodes(xpath = paste0('//*[@id="searchresults"]/li[',hyperlink,']/a')) %>% html_attr('href')
table1 <- session %>%
jump_to(link) %>%
read_html() %>%
####remove non unicode
html_table() %>%
as.data.frame()
table2 <- session %>%
jump_to(link) %>%
follow_link("Further Information") %>%
read_html() %>%
####remove non unicode
html_table() %>%
as.data.frame()
table3 <- bind_rows(table1, table2) %>%
distinct()
wideTableUpdate <- table3 %>%
spread(X1,X2)
wideTable <<- bind_rows(wideTable, wideTableUpdate)
}
#function navigates to the next page using the button, x = 3 would take you to page 4
#new method just presses the next button each time if there is one
pager <- function() {
if (session1 %>%
html_nodes(xpath = paste0('///*[@id="searchResultsContainer"]/p[1]')) %>%
html_text() %>%
stringr::str_detect("Next")){
nextCommand <- c(" %>% follow_link(\"Next\")")
session1 <<- eval(parse(text = paste0("session1", nextCommand)))
print(session1)
} else {
print("no more next buttons, end of scrape")
}
}
Pages <- session1 %>%
html_nodes(xpath = paste0('//*[@id="searchResultsContainer"]/p[1]/span[1]/text()')) %>%
html_text() %>%
stringr::str_extract_all("\\d+") %>%
as.numeric()/10
pageLimit <- ceiling(Pages)
for(i in 0:pageLimit) {
linkCount <- session1 %>%
html_nodes(xpath = paste0('//*[@id="searchresults"]/li/a')) %>%
html_text() %>%
as.data.frame() %>%
nrow() %>%
seq(from = 1)
lapply(linkCount, pageScrape, session = session1)
pager()
}
uniqueRefWideTable <- distinct(wideTable, Reference, .keep_all = TRUE)
write.csv(uniqueRefWideTable, "Q:/Projects/Data Science/Data Cakes/WebScrape Greenwich/november_decisions.csv")
#write.csv("N:/LDD/Web_Scrape/Planning_Decision_List/Greenwich/november_decisions.csv")
#git hub test
| /greenwich_webscrape_DataCakesExample.R | no_license | seanolondon/WebScrape-Greenwich | R | false | false | 4,087 | r | #automation webscrape of the greenwich new buildings site
#Run in 64 bit to do arcgis binding with armap pto
#clear all variables
rm(list=ls())
library(purrr, lib = "C:/Program Files/R/R-3.4.1/library")
library(httr, lib = "C:/Program Files/R/R-3.4.1/library")
library(rvest,lib = "C:/Program Files/R/R-3.4.1/library")
library(XML, lib = "C:/Program Files/R/R-3.4.1/library")
library(magrittr,lib = "C:/Program Files/R/R-3.4.1/library")
library(multiplex,lib = "C:/Program Files/R/R-3.4.1/library")
library(rgdal,lib = "C:/Program Files/R/R-3.4.1/library")
library(data.table,lib = "C:/Program Files/R/R-3.4.1/library")
library(dplyr,lib = "C:/Program Files/R/R-3.4.1/library")
library(tidyr,lib = "C:/Program Files/R/R-3.4.1/library")
library(RCurl,lib = "C:/Program Files/R/R-3.4.1/library")
library(curl,lib = "C:/Program Files/R/R-3.4.1/library")
#library(RPostgreSQL, lib = "C:/Program Files/R/R-3.4.1/library")
#library(rpostgis, lib = "C:/Program Files/R/R-3.4.1/library")
#library(arcgisbinding, lib = "C:/Program Files/R/R-3.4.1/library")
#arc.check_product()
#setwd
setwd("D:/FME Scheduled_tasks/R_tasks/")
#rvest to locates form
url<-"https://planning.royalgreenwich.gov.uk/online-applications/search.do?action=monthlyList" ## page to spider
session <-html_session(url)
## create session
greenwhichForm <-html_form(session)[[1]]
#which month to query
month <- "Nov 17"
parish <- "GRN"
greenwhichForm <- set_values(greenwhichForm, searchCriteria.parish = parish, month = month, dateType = "DC_Decided")
session1 <- submit_form(session,greenwhichForm)
#Row1 of table
link <- session1 %>% html_nodes(xpath = paste0('//*[@id="searchresults"]/li[',1,']/a')) %>% html_text()
table <- session1 %>%
follow_link(link) %>%
read_html() %>%
html_table() %>%
as.data.frame()
wideTable <- table %>%
spread(X1,X2)
linkCount <- length(session1 %>% html_nodes(xpath = '//*[@id="searchresults"]/li/a') %>% html_text())
#page
pageScrape <- function(hyperlink, session) {
link <- session %>% html_nodes(xpath = paste0('//*[@id="searchresults"]/li[',hyperlink,']/a')) %>% html_attr('href')
table1 <- session %>%
jump_to(link) %>%
read_html() %>%
####remove non unicode
html_table() %>%
as.data.frame()
table2 <- session %>%
jump_to(link) %>%
follow_link("Further Information") %>%
read_html() %>%
####remove non unicode
html_table() %>%
as.data.frame()
table3 <- bind_rows(table1, table2) %>%
distinct()
wideTableUpdate <- table3 %>%
spread(X1,X2)
wideTable <<- bind_rows(wideTable, wideTableUpdate)
}
#function navigates to the next page using the button, x = 3 would take you to page 4
#new method just presses the next button each time if there is one
pager <- function() {
if (session1 %>%
html_nodes(xpath = paste0('///*[@id="searchResultsContainer"]/p[1]')) %>%
html_text() %>%
stringr::str_detect("Next")){
nextCommand <- c(" %>% follow_link(\"Next\")")
session1 <<- eval(parse(text = paste0("session1", nextCommand)))
print(session1)
} else {
print("no more next buttons, end of scrape")
}
}
Pages <- session1 %>%
html_nodes(xpath = paste0('//*[@id="searchResultsContainer"]/p[1]/span[1]/text()')) %>%
html_text() %>%
stringr::str_extract_all("\\d+") %>%
as.numeric()/10
pageLimit <- ceiling(Pages)
for(i in 0:pageLimit) {
linkCount <- session1 %>%
html_nodes(xpath = paste0('//*[@id="searchresults"]/li/a')) %>%
html_text() %>%
as.data.frame() %>%
nrow() %>%
seq(from = 1)
lapply(linkCount, pageScrape, session = session1)
pager()
}
uniqueRefWideTable <- distinct(wideTable, Reference, .keep_all = TRUE)
write.csv(uniqueRefWideTable, "Q:/Projects/Data Science/Data Cakes/WebScrape Greenwich/november_decisions.csv")
#write.csv("N:/LDD/Web_Scrape/Planning_Decision_List/Greenwich/november_decisions.csv")
#git hub test
|
set.seed(100)
source('metalsimuestimationholdout2WR.R')
# metalsimuestimation('c4')
# metalsimuestimation('c5')
metalsimuestimationholdout2WR('c5',filenamess='SA150'); metalsimuestimationholdout2WR('c5',filenamess='SA180'); metalsimuestimationholdout2WR('c5',filenamess='SA210');metalsimuestimationholdout2WR('c5',filenamess='SA240')
# metalsimuestimation2('c5',filenamess='SA150'); metalsimuestimation2('c5',filenamess='SA180'); metalsimuestimation2('c5',filenamess='SA210');metalsimuestimation2('c5',filenamess='SA240')
# metalsimuestimation2('c4',filenamess='SA150'); metalsimuestimation2('c4',filenamess='SA180'); metalsimuestimation2('c4',filenamess='SA210');metalsimuestimation2('c4',filenamess='SA240')
# metalsimuestimation2('c3',filenamess='SA150'); metalsimuestimation2('c3',filenamess='SA180'); metalsimuestimation2('c3',filenamess='SA210');metalsimuestimation2('c3',filenamess='SA240')
# metalsimuestimation2('c2',filenamess='SA150'); metalsimuestimation2('c2',filenamess='SA180'); metalsimuestimation2('c2',filenamess='SA210');metalsimuestimation2('c2',filenamess='SA240')
# metalsimuestimation2('c1',filenamess='SA150'); metalsimuestimation2('c1',filenamess='SA180'); metalsimuestimation2('c1',filenamess='SA210');metalsimuestimation2('c1',filenamess='SA240')
| /runestimationSIMUholdoutWRcase3.R | no_license | blue6896/ggm3 | R | false | false | 1,301 | r |
set.seed(100)
source('metalsimuestimationholdout2WR.R')
# metalsimuestimation('c4')
# metalsimuestimation('c5')
metalsimuestimationholdout2WR('c5',filenamess='SA150'); metalsimuestimationholdout2WR('c5',filenamess='SA180'); metalsimuestimationholdout2WR('c5',filenamess='SA210');metalsimuestimationholdout2WR('c5',filenamess='SA240')
# metalsimuestimation2('c5',filenamess='SA150'); metalsimuestimation2('c5',filenamess='SA180'); metalsimuestimation2('c5',filenamess='SA210');metalsimuestimation2('c5',filenamess='SA240')
# metalsimuestimation2('c4',filenamess='SA150'); metalsimuestimation2('c4',filenamess='SA180'); metalsimuestimation2('c4',filenamess='SA210');metalsimuestimation2('c4',filenamess='SA240')
# metalsimuestimation2('c3',filenamess='SA150'); metalsimuestimation2('c3',filenamess='SA180'); metalsimuestimation2('c3',filenamess='SA210');metalsimuestimation2('c3',filenamess='SA240')
# metalsimuestimation2('c2',filenamess='SA150'); metalsimuestimation2('c2',filenamess='SA180'); metalsimuestimation2('c2',filenamess='SA210');metalsimuestimation2('c2',filenamess='SA240')
# metalsimuestimation2('c1',filenamess='SA150'); metalsimuestimation2('c1',filenamess='SA180'); metalsimuestimation2('c1',filenamess='SA210');metalsimuestimation2('c1',filenamess='SA240')
|
#
# code: YamdenaTechnical Report Datasets
#
# github: WWF-ConsEvidence/MPAMystery/2_Social/TechnicalReports/SBS
# --- Duplicate all code from "2_Social" onward, to maintain file structure for sourced code
#
# author: Kelly Claborn, clabornkelly@gmail.com
# created: November 2016
# modified: November 2017
#
#
# ---- inputs ----
# 1) Source Yamdena.TechReport.SigTests.R
# - Dependencies: SBS_MPA_Mystery.R
#
# ---- code sections ----
# 1) Data Sourcing, Configuration, and Subsetting
# 2) Define Datasets for Status, Trend, and Annex Plots for Export
# 3) Export Data to Excel
# 4) Synthesize other social data for interpretation/context
#
#
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#
# ---- SECTION 1: Data Sourcing, Configuration, and Subsetting ----
#
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#
# ---- 1.1 Source or run statistical test results ----
source("C:/Users/denardo/Dropbox (Personal)/MPA_R_Scripts_for_Kelly/SBS/Scripts/Koon_TechReport_SigTests.R")
# ---- 1.2 Subset Days Unwell variable by settlement and MPA ----
Days.unwell.Koon.BySett <-
Days.unwell.BySett[Days.unwell.BySett$MPAID==18 &
!is.na(Days.unwell.BySett$SettlementID), c("SettlementID", "MonitoringYear", "UnwellMean", "UnwellErr")]
Days.unwell.Koon.ByMPA <-
Days.unwell.ByMPA[Days.unwell.ByMPA$MPAID==18 &
!is.na(Days.unwell.ByMPA$MPAID),c("MonitoringYear", "UnwellMean", "UnwellErr")]
Days.unwell.Koon.control <-
Days.unwell.control[Days.unwell.control$MPAID==18 &
!is.na(Days.unwell.control$MPAID),c("UnwellMean", "UnwellErr")]
# ---- 1.3 Subset Proportional Data of Age/Gender for Koon----
Koon.AgeGender <-
data.frame(AgeCat=factor(c("0-4","5-9","10-14","15-19","20-24","25-29","30-34","35-39","40-44","45-49",
"50-54","55-59","60-64","65-69","70-74","75-79","80-84","85-89","90-94","95-99"),
levels=c("0-4","5-9","10-14","15-19","20-24","25-29","30-34","35-39","40-44","45-49",
"50-54","55-59","60-64","65-69","70-74","75-79","80-84","85-89","90-94","95-99"),
ordered=T),
Male.Baseline=t(AgeGenderDemos.ByMPA[AgeGenderDemos.ByMPA$MPAID==18 &
AgeGenderDemos.ByMPA$MonitoringYear=="Baseline",
seq(3,41,by=2)]),
Female.Baseline=t(AgeGenderDemos.ByMPA[AgeGenderDemos.ByMPA$MPAID==18 &
AgeGenderDemos.ByMPA$MonitoringYear=="Baseline",
seq(4,42,by=2)]),
row.names=NULL)
# ---- 1.4 MPA-level Proportional data (row to be added to bottom of status and annex plots in tech report) ----
Koon.level.PropData.status <-
rbind.data.frame(data.frame(MonitoringYear="Baseline",
SettlementID=0,
SettlementName="Control\nSettlements",
Techreport.ByMPA.control[Techreport.ByMPA.control$MPAID==18,c("HHH.female", "HHH.male", "Percent.Rel.Christian", "Percent.Rel.Muslim",
"Percent.Rel.Other", "Percent.PrimaryOcc.Fish", "Percent.PrimaryOcc.Farm",
"Percent.PrimaryOcc.WageLabor", "Percent.PrimaryOcc.HarvestForest",
"Percent.PrimaryOcc.Tourism", "Percent.PrimaryOcc.Aquaculture", "Percent.PrimaryOcc.Extraction",
"Percent.PrimaryOcc.Other", "Prop.Fish.AlmostNever",
"Prop.Fish.FewTimesPer6Mo", "Prop.Fish.FewTimesPerMo", "Prop.Fish.FewTimesPerWk",
"Prop.Fish.MoreFewTimesWk", "Prop.SellFish.AlmostNever", "Prop.SellFish.FewTimesPer6Mo",
"Prop.SellFish.FewTimesPerMo", "Prop.SellFish.FewTimesPerWk",
"Prop.SellFish.MoreFewTimesWk", "Prop.IncFish.None", "Prop.IncFish.Some",
"Prop.IncFish.Half", "Prop.IncFish.Most", "Prop.IncFish.All",
"Prop.FishTech.ByHand", "Prop.FishTech.StatNet", "Prop.FishTech.MobileNet",
"Prop.FishTech.StatLine", "Prop.FishTech.MobileLine", "Child.FS.no",
"Child.FS.yes")],
FishProtein.ByMPA.control[FishProtein.ByMPA.control$MPAID==18,c("ProteinFish.None", "ProteinFish.Some", "ProteinFish.Half",
"ProteinFish.Most", "ProteinFish.All")]),
data.frame(MonitoringYear="Baseline",
SettlementID=0,
SettlementName="KoonMPA",
Techreport.ByMPA[Techreport.ByMPA$MPAID==18,c("HHH.female", "HHH.male", "Percent.Rel.Christian", "Percent.Rel.Muslim",
"Percent.Rel.Other", "Percent.PrimaryOcc.Fish", "Percent.PrimaryOcc.Farm",
"Percent.PrimaryOcc.WageLabor", "Percent.PrimaryOcc.HarvestForest",
"Percent.PrimaryOcc.Tourism", "Percent.PrimaryOcc.Aquaculture", "Percent.PrimaryOcc.Extraction",
"Percent.PrimaryOcc.Other", "Prop.Fish.AlmostNever",
"Prop.Fish.FewTimesPer6Mo", "Prop.Fish.FewTimesPerMo", "Prop.Fish.FewTimesPerWk",
"Prop.Fish.MoreFewTimesWk", "Prop.SellFish.AlmostNever", "Prop.SellFish.FewTimesPer6Mo",
"Prop.SellFish.FewTimesPerMo", "Prop.SellFish.FewTimesPerWk",
"Prop.SellFish.MoreFewTimesWk", "Prop.IncFish.None", "Prop.IncFish.Some",
"Prop.IncFish.Half", "Prop.IncFish.Most", "Prop.IncFish.All",
"Prop.FishTech.ByHand", "Prop.FishTech.StatNet", "Prop.FishTech.MobileNet",
"Prop.FishTech.StatLine", "Prop.FishTech.MobileLine", "Child.FS.no",
"Child.FS.yes")],
FishProtein.ByMPA[FishProtein.ByMPA$MPAID==18,c("ProteinFish.None", "ProteinFish.Some", "ProteinFish.Half",
"ProteinFish.Most", "ProteinFish.All")]))
null.row.PropData <-
matrix(rep(NA,43),ncol=43,dimnames=list(NULL,colnames(Koon.level.PropData.status)))
# ---- 1.5 MPA-level Continuous data (row to be added to bottom of status and annex plots in tech report) ----
Koon.level.ContData.status <-
rbind.data.frame(cbind.data.frame(MonitoringYear="Baseline",SettlementID=0,SettlementName="Control\nSettlements",
BigFive.ControlGroup[BigFive.ControlGroup$MPAID==18,c("FSMean", "FSErr", "MAMean", "MAErr", "PAMean", "PAErr", "MTMean",
"MTErr", "SEMean", "SEErr")],
Techreport.ByMPA.control[Techreport.ByMPA.control$MPAID==18,c("TimeMarketMean","TimeMarketErr")],
Days.unwell.Koon.control[,c("UnwellMean","UnwellErr")]),
cbind.data.frame(MonitoringYear="Baseline",SettlementID=0,SettlementName="KoonMPA",
BigFive.MPAGroup[BigFive.MPAGroup$MPAID==18,c("FSMean", "FSErr", "MAMean", "MAErr", "PAMean", "PAErr", "MTMean",
"MTErr", "SEMean", "SEErr")],
Techreport.ByMPA[Techreport.ByMPA$MPAID==18,c("TimeMarketMean","TimeMarketErr")],
Days.unwell.Koon.ByMPA[,c("UnwellMean","UnwellErr")]))
null.row.ContData <-
cbind.data.frame(matrix(rep(NA,17),ncol=17,dimnames=list(NULL,colnames(Koon.level.ContData.status))))
#
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#
# ---- SECTION 2: Define Datasets for Status, Trend, and Annex Plots for Export ----
#
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#
# ---- 2.1 Status dataset for Koon, proportional data ----
Koon.PropData.Techreport.status <-
left_join(Techreport.BySett[Techreport.BySett$MPAID==18,c("SettlementID", "SettlementName", "HHH.female", "HHH.male",
"Percent.Rel.Christian", "Percent.Rel.Muslim", "Percent.Rel.Other",
"Percent.PrimaryOcc.Fish", "Percent.PrimaryOcc.Farm", "Percent.PrimaryOcc.WageLabor",
"Percent.PrimaryOcc.HarvestForest", "Percent.PrimaryOcc.Tourism",
"Percent.PrimaryOcc.Aquaculture", "Percent.PrimaryOcc.Extraction",
"Percent.PrimaryOcc.Other", "Prop.Fish.AlmostNever", "Prop.Fish.FewTimesPer6Mo",
"Prop.Fish.FewTimesPerMo", "Prop.Fish.FewTimesPerWk", "Prop.Fish.MoreFewTimesWk",
"Prop.SellFish.AlmostNever", "Prop.SellFish.FewTimesPer6Mo",
"Prop.SellFish.FewTimesPerMo", "Prop.SellFish.FewTimesPerWk",
"Prop.SellFish.MoreFewTimesWk", "Prop.IncFish.None", "Prop.IncFish.Some",
"Prop.IncFish.Half", "Prop.IncFish.Most", "Prop.IncFish.All",
"Prop.FishTech.ByHand", "Prop.FishTech.StatNet", "Prop.FishTech.MobileNet",
"Prop.FishTech.StatLine", "Prop.FishTech.MobileLine", "Child.FS.no",
"Child.FS.yes")],
FishProtein.BySett[FishProtein.BySett$MPAID==18,c("SettlementID", "SettlementName", "ProteinFish.None", "ProteinFish.Some",
"ProteinFish.Half", "ProteinFish.Most", "ProteinFish.All")],
by=c("SettlementID","SettlementName"))
Koon.PropData.Techreport.status <-
Koon.PropData.Techreport.status[rev(order(Koon.PropData.Techreport.status$SettlementName)),]
Koon.PropData.Techreport.status.PLOTFORMAT <-
rbind.data.frame(Koon.level.PropData.status[c("SettlementID", "SettlementName", "HHH.female", "HHH.male",
"Percent.Rel.Christian", "Percent.Rel.Muslim", "Percent.Rel.Other",
"Percent.PrimaryOcc.Fish", "Percent.PrimaryOcc.Farm", "Percent.PrimaryOcc.WageLabor",
"Percent.PrimaryOcc.HarvestForest", "Percent.PrimaryOcc.Tourism",
"Percent.PrimaryOcc.Aquaculture", "Percent.PrimaryOcc.Extraction",
"Percent.PrimaryOcc.Other", "Prop.Fish.AlmostNever", "Prop.Fish.FewTimesPer6Mo",
"Prop.Fish.FewTimesPerMo", "Prop.Fish.FewTimesPerWk", "Prop.Fish.MoreFewTimesWk",
"Prop.SellFish.AlmostNever", "Prop.SellFish.FewTimesPer6Mo",
"Prop.SellFish.FewTimesPerMo", "Prop.SellFish.FewTimesPerWk",
"Prop.SellFish.MoreFewTimesWk", "Prop.IncFish.None", "Prop.IncFish.Some",
"Prop.IncFish.Half", "Prop.IncFish.Most", "Prop.IncFish.All",
"Prop.FishTech.ByHand", "Prop.FishTech.StatNet", "Prop.FishTech.MobileNet",
"Prop.FishTech.StatLine", "Prop.FishTech.MobileLine", "Child.FS.no",
"Child.FS.yes", "ProteinFish.None", "ProteinFish.Some", "ProteinFish.Half",
"ProteinFish.Most", "ProteinFish.All")],
null.row.PropData[c("SettlementID", "SettlementName", "HHH.female", "HHH.male",
"Percent.Rel.Christian", "Percent.Rel.Muslim", "Percent.Rel.Other",
"Percent.PrimaryOcc.Fish", "Percent.PrimaryOcc.Farm", "Percent.PrimaryOcc.WageLabor",
"Percent.PrimaryOcc.HarvestForest", "Percent.PrimaryOcc.Tourism", "Percent.PrimaryOcc.Aquaculture",
"Percent.PrimaryOcc.Extraction","Percent.PrimaryOcc.Other", "Prop.Fish.AlmostNever", "Prop.Fish.FewTimesPer6Mo",
"Prop.Fish.FewTimesPerMo", "Prop.Fish.FewTimesPerWk", "Prop.Fish.MoreFewTimesWk",
"Prop.SellFish.AlmostNever", "Prop.SellFish.FewTimesPer6Mo",
"Prop.SellFish.FewTimesPerMo", "Prop.SellFish.FewTimesPerWk",
"Prop.SellFish.MoreFewTimesWk", "Prop.IncFish.None", "Prop.IncFish.Some",
"Prop.IncFish.Half", "Prop.IncFish.Most", "Prop.IncFish.All",
"Prop.FishTech.ByHand", "Prop.FishTech.StatNet", "Prop.FishTech.MobileNet",
"Prop.FishTech.StatLine", "Prop.FishTech.MobileLine", "Child.FS.no",
"Child.FS.yes", "ProteinFish.None", "ProteinFish.Some", "ProteinFish.Half",
"ProteinFish.Most", "ProteinFish.All")],
Koon.PropData.Techreport.status)
# - make SettlementName an ordered factor for plotting
Koon.PropData.Techreport.status.PLOTFORMAT$SettlementName <-
ifelse(is.na(Koon.PropData.Techreport.status.PLOTFORMAT$SettlementName),"",
as.character(Koon.PropData.Techreport.status.PLOTFORMAT$SettlementName))
Koon.PropData.Techreport.status.PLOTFORMAT$SettlementName <-
factor(Koon.PropData.Techreport.status.PLOTFORMAT$SettlementName,
levels=unique(Koon.PropData.Techreport.status.PLOTFORMAT$SettlementName),
ordered=T)
# - add row for plot fill colour formatting
Koon.PropData.Techreport.status.PLOTFORMAT$Dummy <-
ifelse(Koon.PropData.Techreport.status.PLOTFORMAT$SettlementName=="","Dummy","NotDummy")
# ---- 2.2 Status dataset for Koon, continuous data (with p values) ----
Koon.ContData.Techreport.status <-
left_join(BigFive.SettleGroup[BigFive.SettleGroup$Treatment==1 &
BigFive.SettleGroup$MPAID==18,
c("SettlementID", "SettlementName", "FSMean", "FSErr", "MAMean",
"MAErr", "PAMean", "PAErr", "MTMean", "MTErr", "SEMean", "SEErr"
)],
Techreport.BySett[Techreport.BySett$MPAID==18,c("SettlementID","TimeMarketMean","TimeMarketErr")],
by="SettlementID")
Koon.ContData.Techreport.status <-
left_join(Koon.ContData.Techreport.status,
Days.unwell.Koon.BySett[,c("SettlementID", "UnwellMean", "UnwellErr")],
by="SettlementID")
Koon.ContData.Techreport.status <-
Koon.ContData.Techreport.status[rev(order(Koon.ContData.Techreport.status$SettlementName)),]
Koon.ContData.Techreport.status.withMPA <-
rbind.data.frame(Koon.level.ContData.status[c("SettlementID", "SettlementName", "FSMean", "FSErr", "MAMean",
"MAErr", "PAMean", "PAErr", "MTMean", "MTErr", "SEMean", "SEErr",
"TimeMarketMean", "TimeMarketErr", "UnwellMean", "UnwellErr")],
null.row.ContData[c("SettlementID", "SettlementName", "FSMean", "FSErr", "MAMean",
"MAErr", "PAMean", "PAErr", "MTMean", "MTErr", "SEMean", "SEErr",
"TimeMarketMean", "TimeMarketErr", "UnwellMean", "UnwellErr")],
Koon.ContData.Techreport.status)
# - plot-formatted dataset
Koon.ContData.Techreport.status.PLOTFORMAT <-
left_join(Koon.ContData.Techreport.status.withMPA,
sigvals.Koon,by="SettlementName")
# - make SettlementName an ordered factor for plotting
Koon.ContData.Techreport.status.PLOTFORMAT$SettlementName <-
ifelse(is.na(Koon.ContData.Techreport.status.PLOTFORMAT$SettlementName),"",
Koon.ContData.Techreport.status.PLOTFORMAT$SettlementName)
Koon.ContData.Techreport.status.PLOTFORMAT$SettlementName <-
factor(Koon.ContData.Techreport.status.PLOTFORMAT$SettlementName,
levels=unique(Koon.ContData.Techreport.status.PLOTFORMAT$SettlementName),
ordered=T)
# - add column for plot fill colour formatting
Koon.ContData.Techreport.status.PLOTFORMAT$SettLevel <-
ifelse(Koon.ContData.Techreport.status.PLOTFORMAT$SettlementName=="","Dummy","NotDummy")
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#
# ---- SECTION 3: Synthesize other social data for interpretation/context ----
#
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#
# ---- 3.1 Tech report data synthesis aid ----
# years resident, categorical food security, changes in social conflict,
# material assets gini coefficient, mean material assets, % fishers,
# % wage labor, marine tenure manage and harvest components
Koon.level.synth <- rbind.data.frame(cbind.data.frame(SettlementID=NA,
Synth.techreport.byMPA[Synth.techreport.byMPA$MPAID==18,c("MPAID","MonitoringYear")],
SettlementName="MPA",
Synth.techreport.byMPA[Synth.techreport.byMPA$MPAID==18,3:length(Synth.techreport.byMPA)],
AgeGender.AvgAge.byMPA[AgeGender.AvgAge.byMPA$MPAID==18,3]),
cbind.data.frame(SettlementID=NA,
Synth.techreport.byMPA.control[Synth.techreport.byMPA.control$MPAID==18,c("MPAID","MonitoringYear")],
SettlementName="Control",
Synth.techreport.byMPA.control[Synth.techreport.byMPA.control$MPAID==18,3:length(Synth.techreport.byMPA.control)],
AgeGender.AvgAge.control[AgeGender.AvgAge.control$MPAID==18,3]))
null.row.synth <- matrix(NA,ncol=length(colnames(Koon.level.synth)),
dimnames=list(NULL,colnames(Koon.level.synth)))
Koon.setts.synth <-
Synth.techreport.bySett[Synth.techreport.bySett$MPAID==18,] %>%
left_join(AgeGender.AvgAge.bySett[,c("SettlementName","MonitoringYear","AvgAge")])
Koon.setts.synth <-
Koon.setts.synth[rev(order(Koon.setts.synth$SettlementName)),]
# ---- 3.2 Output for data synthesis/interpretation ----
Koon.synth.techreport.extra.PLOTFORMAT <- rbind.data.frame(Koon.level.synth,
null.row.synth,
Koon.setts.synth)
# - make SettlementName an ordered factor for plotting
Koon.synth.techreport.extra.PLOTFORMAT$SettlementName <-
ifelse(is.na(Koon.synth.techreport.extra.PLOTFORMAT$SettlementName),"",
as.character(Koon.synth.techreport.extra.PLOTFORMAT$SettlementName))
Koon.synth.techreport.extra.PLOTFORMAT$SettlementName <-
factor(Koon.synth.techreport.extra.PLOTFORMAT$SettlementName,
levels=unique(Koon.synth.techreport.extra.PLOTFORMAT$SettlementName),
ordered=T)
# - add row for plot fill colour formatting
Koon.synth.techreport.extra.PLOTFORMAT$Dummy <-
ifelse(Koon.synth.techreport.extra.PLOTFORMAT$SettlementName=="","Dummy","NotDummy")
#
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#
# ---- SECTION 4: Export Data to Excel ----
#
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# ---- 4.1 Define filename for Excel spreadsheet ----
# If necessary, change file name to match the desired directory in your machine.
FileName <- paste(paste("C:/Users/denardo/Dropbox (Personal)/MPA_R_Scripts_for_Kelly/SBS/TechReportOutput/Koon/Koon_TechReportData--produced",
format(Sys.Date(),format="%Y_%m_%d"),sep="_"),
"xlsx",sep=".")
# ---- 4.2 Write to Excel, each data frame as a new sheet ----
write.xlsx(Koon.PropData.Techreport.status.PLOTFORMAT,FileName,sheetName='PropData_StatusPlots',row.names=F)
write.xlsx(Koon.ContData.Techreport.status.PLOTFORMAT,FileName,sheetName='ContData_StatusPlots_withpvals',row.names=F,append=T)
write.xlsx(Koon.AgeGender,FileName,sheetName='AgeGender',row.names=F,append=T)
write.xlsx(Koon.synth.techreport.extra.PLOTFORMAT,FileName,sheetName='Extra_data',row.names=F,append=T)
# ---- Remove all unneeded dataframes from environment, to reduce clutter ----
rm(Koon.level.PropData.status)
rm(Koon.level.ContData.status)
rm(Koon.level.PropData.annex)
rm(Koon.level.ContData.annex)
rm(Days.unwell.Koon.ByMPA)
rm(Days.unwell.Koon.BySett)
rm(null.row.PropData)
rm(null.row.ContData)
rm(Koon.PropData.Techreport.status)
rm(Koon.ContData.Techreport.status)
rm(Koon.AnnexPropData.Techreport)
rm(Koon.AnnexContData.Techreport)
rm(Koon.ContData.Techreport.status.withMPA)
rm(Koon.level.synth)
rm(null.row.synth)
rm(Koon.setts.synth)
rm(Koon.synth.techreport)
rm(FileName)
| /xx_Archive/3_Products/Status_trends/SBS_Koon/Koon_TechReport_Datasets_Updated.R | no_license | WWF-ConsEvidence/MPAMystery | R | false | false | 23,232 | r | #
# code: YamdenaTechnical Report Datasets
#
# github: WWF-ConsEvidence/MPAMystery/2_Social/TechnicalReports/SBS
# --- Duplicate all code from "2_Social" onward, to maintain file structure for sourced code
#
# author: Kelly Claborn, clabornkelly@gmail.com
# created: November 2016
# modified: November 2017
#
#
# ---- inputs ----
# 1) Source Yamdena.TechReport.SigTests.R
# - Dependencies: SBS_MPA_Mystery.R
#
# ---- code sections ----
# 1) Data Sourcing, Configuration, and Subsetting
# 2) Define Datasets for Status, Trend, and Annex Plots for Export
# 3) Export Data to Excel
# 4) Synthesize other social data for interpretation/context
#
#
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#
# ---- SECTION 1: Data Sourcing, Configuration, and Subsetting ----
#
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#
# ---- 1.1 Source or run statistical test results ----
source("C:/Users/denardo/Dropbox (Personal)/MPA_R_Scripts_for_Kelly/SBS/Scripts/Koon_TechReport_SigTests.R")
# ---- 1.2 Subset Days Unwell variable by settlement and MPA ----
Days.unwell.Koon.BySett <-
Days.unwell.BySett[Days.unwell.BySett$MPAID==18 &
!is.na(Days.unwell.BySett$SettlementID), c("SettlementID", "MonitoringYear", "UnwellMean", "UnwellErr")]
Days.unwell.Koon.ByMPA <-
Days.unwell.ByMPA[Days.unwell.ByMPA$MPAID==18 &
!is.na(Days.unwell.ByMPA$MPAID),c("MonitoringYear", "UnwellMean", "UnwellErr")]
Days.unwell.Koon.control <-
Days.unwell.control[Days.unwell.control$MPAID==18 &
!is.na(Days.unwell.control$MPAID),c("UnwellMean", "UnwellErr")]
# ---- 1.3 Subset Proportional Data of Age/Gender for Koon----
Koon.AgeGender <-
data.frame(AgeCat=factor(c("0-4","5-9","10-14","15-19","20-24","25-29","30-34","35-39","40-44","45-49",
"50-54","55-59","60-64","65-69","70-74","75-79","80-84","85-89","90-94","95-99"),
levels=c("0-4","5-9","10-14","15-19","20-24","25-29","30-34","35-39","40-44","45-49",
"50-54","55-59","60-64","65-69","70-74","75-79","80-84","85-89","90-94","95-99"),
ordered=T),
Male.Baseline=t(AgeGenderDemos.ByMPA[AgeGenderDemos.ByMPA$MPAID==18 &
AgeGenderDemos.ByMPA$MonitoringYear=="Baseline",
seq(3,41,by=2)]),
Female.Baseline=t(AgeGenderDemos.ByMPA[AgeGenderDemos.ByMPA$MPAID==18 &
AgeGenderDemos.ByMPA$MonitoringYear=="Baseline",
seq(4,42,by=2)]),
row.names=NULL)
# ---- 1.4 MPA-level Proportional data (row to be added to bottom of status and annex plots in tech report) ----
Koon.level.PropData.status <-
rbind.data.frame(data.frame(MonitoringYear="Baseline",
SettlementID=0,
SettlementName="Control\nSettlements",
Techreport.ByMPA.control[Techreport.ByMPA.control$MPAID==18,c("HHH.female", "HHH.male", "Percent.Rel.Christian", "Percent.Rel.Muslim",
"Percent.Rel.Other", "Percent.PrimaryOcc.Fish", "Percent.PrimaryOcc.Farm",
"Percent.PrimaryOcc.WageLabor", "Percent.PrimaryOcc.HarvestForest",
"Percent.PrimaryOcc.Tourism", "Percent.PrimaryOcc.Aquaculture", "Percent.PrimaryOcc.Extraction",
"Percent.PrimaryOcc.Other", "Prop.Fish.AlmostNever",
"Prop.Fish.FewTimesPer6Mo", "Prop.Fish.FewTimesPerMo", "Prop.Fish.FewTimesPerWk",
"Prop.Fish.MoreFewTimesWk", "Prop.SellFish.AlmostNever", "Prop.SellFish.FewTimesPer6Mo",
"Prop.SellFish.FewTimesPerMo", "Prop.SellFish.FewTimesPerWk",
"Prop.SellFish.MoreFewTimesWk", "Prop.IncFish.None", "Prop.IncFish.Some",
"Prop.IncFish.Half", "Prop.IncFish.Most", "Prop.IncFish.All",
"Prop.FishTech.ByHand", "Prop.FishTech.StatNet", "Prop.FishTech.MobileNet",
"Prop.FishTech.StatLine", "Prop.FishTech.MobileLine", "Child.FS.no",
"Child.FS.yes")],
FishProtein.ByMPA.control[FishProtein.ByMPA.control$MPAID==18,c("ProteinFish.None", "ProteinFish.Some", "ProteinFish.Half",
"ProteinFish.Most", "ProteinFish.All")]),
data.frame(MonitoringYear="Baseline",
SettlementID=0,
SettlementName="KoonMPA",
Techreport.ByMPA[Techreport.ByMPA$MPAID==18,c("HHH.female", "HHH.male", "Percent.Rel.Christian", "Percent.Rel.Muslim",
"Percent.Rel.Other", "Percent.PrimaryOcc.Fish", "Percent.PrimaryOcc.Farm",
"Percent.PrimaryOcc.WageLabor", "Percent.PrimaryOcc.HarvestForest",
"Percent.PrimaryOcc.Tourism", "Percent.PrimaryOcc.Aquaculture", "Percent.PrimaryOcc.Extraction",
"Percent.PrimaryOcc.Other", "Prop.Fish.AlmostNever",
"Prop.Fish.FewTimesPer6Mo", "Prop.Fish.FewTimesPerMo", "Prop.Fish.FewTimesPerWk",
"Prop.Fish.MoreFewTimesWk", "Prop.SellFish.AlmostNever", "Prop.SellFish.FewTimesPer6Mo",
"Prop.SellFish.FewTimesPerMo", "Prop.SellFish.FewTimesPerWk",
"Prop.SellFish.MoreFewTimesWk", "Prop.IncFish.None", "Prop.IncFish.Some",
"Prop.IncFish.Half", "Prop.IncFish.Most", "Prop.IncFish.All",
"Prop.FishTech.ByHand", "Prop.FishTech.StatNet", "Prop.FishTech.MobileNet",
"Prop.FishTech.StatLine", "Prop.FishTech.MobileLine", "Child.FS.no",
"Child.FS.yes")],
FishProtein.ByMPA[FishProtein.ByMPA$MPAID==18,c("ProteinFish.None", "ProteinFish.Some", "ProteinFish.Half",
"ProteinFish.Most", "ProteinFish.All")]))
null.row.PropData <-
matrix(rep(NA,43),ncol=43,dimnames=list(NULL,colnames(Koon.level.PropData.status)))
# ---- 1.5 MPA-level Continuous data (row to be added to bottom of status and annex plots in tech report) ----
Koon.level.ContData.status <-
rbind.data.frame(cbind.data.frame(MonitoringYear="Baseline",SettlementID=0,SettlementName="Control\nSettlements",
BigFive.ControlGroup[BigFive.ControlGroup$MPAID==18,c("FSMean", "FSErr", "MAMean", "MAErr", "PAMean", "PAErr", "MTMean",
"MTErr", "SEMean", "SEErr")],
Techreport.ByMPA.control[Techreport.ByMPA.control$MPAID==18,c("TimeMarketMean","TimeMarketErr")],
Days.unwell.Koon.control[,c("UnwellMean","UnwellErr")]),
cbind.data.frame(MonitoringYear="Baseline",SettlementID=0,SettlementName="KoonMPA",
BigFive.MPAGroup[BigFive.MPAGroup$MPAID==18,c("FSMean", "FSErr", "MAMean", "MAErr", "PAMean", "PAErr", "MTMean",
"MTErr", "SEMean", "SEErr")],
Techreport.ByMPA[Techreport.ByMPA$MPAID==18,c("TimeMarketMean","TimeMarketErr")],
Days.unwell.Koon.ByMPA[,c("UnwellMean","UnwellErr")]))
null.row.ContData <-
cbind.data.frame(matrix(rep(NA,17),ncol=17,dimnames=list(NULL,colnames(Koon.level.ContData.status))))
#
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#
# ---- SECTION 2: Define Datasets for Status, Trend, and Annex Plots for Export ----
#
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#
# ---- 2.1 Status dataset for Koon, proportional data ----
Koon.PropData.Techreport.status <-
left_join(Techreport.BySett[Techreport.BySett$MPAID==18,c("SettlementID", "SettlementName", "HHH.female", "HHH.male",
"Percent.Rel.Christian", "Percent.Rel.Muslim", "Percent.Rel.Other",
"Percent.PrimaryOcc.Fish", "Percent.PrimaryOcc.Farm", "Percent.PrimaryOcc.WageLabor",
"Percent.PrimaryOcc.HarvestForest", "Percent.PrimaryOcc.Tourism",
"Percent.PrimaryOcc.Aquaculture", "Percent.PrimaryOcc.Extraction",
"Percent.PrimaryOcc.Other", "Prop.Fish.AlmostNever", "Prop.Fish.FewTimesPer6Mo",
"Prop.Fish.FewTimesPerMo", "Prop.Fish.FewTimesPerWk", "Prop.Fish.MoreFewTimesWk",
"Prop.SellFish.AlmostNever", "Prop.SellFish.FewTimesPer6Mo",
"Prop.SellFish.FewTimesPerMo", "Prop.SellFish.FewTimesPerWk",
"Prop.SellFish.MoreFewTimesWk", "Prop.IncFish.None", "Prop.IncFish.Some",
"Prop.IncFish.Half", "Prop.IncFish.Most", "Prop.IncFish.All",
"Prop.FishTech.ByHand", "Prop.FishTech.StatNet", "Prop.FishTech.MobileNet",
"Prop.FishTech.StatLine", "Prop.FishTech.MobileLine", "Child.FS.no",
"Child.FS.yes")],
FishProtein.BySett[FishProtein.BySett$MPAID==18,c("SettlementID", "SettlementName", "ProteinFish.None", "ProteinFish.Some",
"ProteinFish.Half", "ProteinFish.Most", "ProteinFish.All")],
by=c("SettlementID","SettlementName"))
Koon.PropData.Techreport.status <-
Koon.PropData.Techreport.status[rev(order(Koon.PropData.Techreport.status$SettlementName)),]
Koon.PropData.Techreport.status.PLOTFORMAT <-
rbind.data.frame(Koon.level.PropData.status[c("SettlementID", "SettlementName", "HHH.female", "HHH.male",
"Percent.Rel.Christian", "Percent.Rel.Muslim", "Percent.Rel.Other",
"Percent.PrimaryOcc.Fish", "Percent.PrimaryOcc.Farm", "Percent.PrimaryOcc.WageLabor",
"Percent.PrimaryOcc.HarvestForest", "Percent.PrimaryOcc.Tourism",
"Percent.PrimaryOcc.Aquaculture", "Percent.PrimaryOcc.Extraction",
"Percent.PrimaryOcc.Other", "Prop.Fish.AlmostNever", "Prop.Fish.FewTimesPer6Mo",
"Prop.Fish.FewTimesPerMo", "Prop.Fish.FewTimesPerWk", "Prop.Fish.MoreFewTimesWk",
"Prop.SellFish.AlmostNever", "Prop.SellFish.FewTimesPer6Mo",
"Prop.SellFish.FewTimesPerMo", "Prop.SellFish.FewTimesPerWk",
"Prop.SellFish.MoreFewTimesWk", "Prop.IncFish.None", "Prop.IncFish.Some",
"Prop.IncFish.Half", "Prop.IncFish.Most", "Prop.IncFish.All",
"Prop.FishTech.ByHand", "Prop.FishTech.StatNet", "Prop.FishTech.MobileNet",
"Prop.FishTech.StatLine", "Prop.FishTech.MobileLine", "Child.FS.no",
"Child.FS.yes", "ProteinFish.None", "ProteinFish.Some", "ProteinFish.Half",
"ProteinFish.Most", "ProteinFish.All")],
null.row.PropData[c("SettlementID", "SettlementName", "HHH.female", "HHH.male",
"Percent.Rel.Christian", "Percent.Rel.Muslim", "Percent.Rel.Other",
"Percent.PrimaryOcc.Fish", "Percent.PrimaryOcc.Farm", "Percent.PrimaryOcc.WageLabor",
"Percent.PrimaryOcc.HarvestForest", "Percent.PrimaryOcc.Tourism", "Percent.PrimaryOcc.Aquaculture",
"Percent.PrimaryOcc.Extraction","Percent.PrimaryOcc.Other", "Prop.Fish.AlmostNever", "Prop.Fish.FewTimesPer6Mo",
"Prop.Fish.FewTimesPerMo", "Prop.Fish.FewTimesPerWk", "Prop.Fish.MoreFewTimesWk",
"Prop.SellFish.AlmostNever", "Prop.SellFish.FewTimesPer6Mo",
"Prop.SellFish.FewTimesPerMo", "Prop.SellFish.FewTimesPerWk",
"Prop.SellFish.MoreFewTimesWk", "Prop.IncFish.None", "Prop.IncFish.Some",
"Prop.IncFish.Half", "Prop.IncFish.Most", "Prop.IncFish.All",
"Prop.FishTech.ByHand", "Prop.FishTech.StatNet", "Prop.FishTech.MobileNet",
"Prop.FishTech.StatLine", "Prop.FishTech.MobileLine", "Child.FS.no",
"Child.FS.yes", "ProteinFish.None", "ProteinFish.Some", "ProteinFish.Half",
"ProteinFish.Most", "ProteinFish.All")],
Koon.PropData.Techreport.status)
# - make SettlementName an ordered factor for plotting
Koon.PropData.Techreport.status.PLOTFORMAT$SettlementName <-
ifelse(is.na(Koon.PropData.Techreport.status.PLOTFORMAT$SettlementName),"",
as.character(Koon.PropData.Techreport.status.PLOTFORMAT$SettlementName))
Koon.PropData.Techreport.status.PLOTFORMAT$SettlementName <-
factor(Koon.PropData.Techreport.status.PLOTFORMAT$SettlementName,
levels=unique(Koon.PropData.Techreport.status.PLOTFORMAT$SettlementName),
ordered=T)
# - add row for plot fill colour formatting
Koon.PropData.Techreport.status.PLOTFORMAT$Dummy <-
ifelse(Koon.PropData.Techreport.status.PLOTFORMAT$SettlementName=="","Dummy","NotDummy")
# ---- 2.2 Status dataset for Koon, continuous data (with p values) ----
Koon.ContData.Techreport.status <-
left_join(BigFive.SettleGroup[BigFive.SettleGroup$Treatment==1 &
BigFive.SettleGroup$MPAID==18,
c("SettlementID", "SettlementName", "FSMean", "FSErr", "MAMean",
"MAErr", "PAMean", "PAErr", "MTMean", "MTErr", "SEMean", "SEErr"
)],
Techreport.BySett[Techreport.BySett$MPAID==18,c("SettlementID","TimeMarketMean","TimeMarketErr")],
by="SettlementID")
Koon.ContData.Techreport.status <-
left_join(Koon.ContData.Techreport.status,
Days.unwell.Koon.BySett[,c("SettlementID", "UnwellMean", "UnwellErr")],
by="SettlementID")
Koon.ContData.Techreport.status <-
Koon.ContData.Techreport.status[rev(order(Koon.ContData.Techreport.status$SettlementName)),]
Koon.ContData.Techreport.status.withMPA <-
rbind.data.frame(Koon.level.ContData.status[c("SettlementID", "SettlementName", "FSMean", "FSErr", "MAMean",
"MAErr", "PAMean", "PAErr", "MTMean", "MTErr", "SEMean", "SEErr",
"TimeMarketMean", "TimeMarketErr", "UnwellMean", "UnwellErr")],
null.row.ContData[c("SettlementID", "SettlementName", "FSMean", "FSErr", "MAMean",
"MAErr", "PAMean", "PAErr", "MTMean", "MTErr", "SEMean", "SEErr",
"TimeMarketMean", "TimeMarketErr", "UnwellMean", "UnwellErr")],
Koon.ContData.Techreport.status)
# - plot-formatted dataset
Koon.ContData.Techreport.status.PLOTFORMAT <-
left_join(Koon.ContData.Techreport.status.withMPA,
sigvals.Koon,by="SettlementName")
# - make SettlementName an ordered factor for plotting
Koon.ContData.Techreport.status.PLOTFORMAT$SettlementName <-
ifelse(is.na(Koon.ContData.Techreport.status.PLOTFORMAT$SettlementName),"",
Koon.ContData.Techreport.status.PLOTFORMAT$SettlementName)
Koon.ContData.Techreport.status.PLOTFORMAT$SettlementName <-
factor(Koon.ContData.Techreport.status.PLOTFORMAT$SettlementName,
levels=unique(Koon.ContData.Techreport.status.PLOTFORMAT$SettlementName),
ordered=T)
# - add column for plot fill colour formatting
Koon.ContData.Techreport.status.PLOTFORMAT$SettLevel <-
ifelse(Koon.ContData.Techreport.status.PLOTFORMAT$SettlementName=="","Dummy","NotDummy")
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#
# ---- SECTION 3: Synthesize other social data for interpretation/context ----
#
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#
# ---- 3.1 Tech report data synthesis aid ----
# years resident, categorical food security, changes in social conflict,
# material assets gini coefficient, mean material assets, % fishers,
# % wage labor, marine tenure manage and harvest components
Koon.level.synth <- rbind.data.frame(cbind.data.frame(SettlementID=NA,
Synth.techreport.byMPA[Synth.techreport.byMPA$MPAID==18,c("MPAID","MonitoringYear")],
SettlementName="MPA",
Synth.techreport.byMPA[Synth.techreport.byMPA$MPAID==18,3:length(Synth.techreport.byMPA)],
AgeGender.AvgAge.byMPA[AgeGender.AvgAge.byMPA$MPAID==18,3]),
cbind.data.frame(SettlementID=NA,
Synth.techreport.byMPA.control[Synth.techreport.byMPA.control$MPAID==18,c("MPAID","MonitoringYear")],
SettlementName="Control",
Synth.techreport.byMPA.control[Synth.techreport.byMPA.control$MPAID==18,3:length(Synth.techreport.byMPA.control)],
AgeGender.AvgAge.control[AgeGender.AvgAge.control$MPAID==18,3]))
null.row.synth <- matrix(NA,ncol=length(colnames(Koon.level.synth)),
dimnames=list(NULL,colnames(Koon.level.synth)))
Koon.setts.synth <-
Synth.techreport.bySett[Synth.techreport.bySett$MPAID==18,] %>%
left_join(AgeGender.AvgAge.bySett[,c("SettlementName","MonitoringYear","AvgAge")])
Koon.setts.synth <-
Koon.setts.synth[rev(order(Koon.setts.synth$SettlementName)),]
# ---- 3.2 Output for data synthesis/interpretation ----
Koon.synth.techreport.extra.PLOTFORMAT <- rbind.data.frame(Koon.level.synth,
null.row.synth,
Koon.setts.synth)
# - make SettlementName an ordered factor for plotting
Koon.synth.techreport.extra.PLOTFORMAT$SettlementName <-
ifelse(is.na(Koon.synth.techreport.extra.PLOTFORMAT$SettlementName),"",
as.character(Koon.synth.techreport.extra.PLOTFORMAT$SettlementName))
Koon.synth.techreport.extra.PLOTFORMAT$SettlementName <-
factor(Koon.synth.techreport.extra.PLOTFORMAT$SettlementName,
levels=unique(Koon.synth.techreport.extra.PLOTFORMAT$SettlementName),
ordered=T)
# - add row for plot fill colour formatting
Koon.synth.techreport.extra.PLOTFORMAT$Dummy <-
ifelse(Koon.synth.techreport.extra.PLOTFORMAT$SettlementName=="","Dummy","NotDummy")
#
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#
# ---- SECTION 4: Export Data to Excel ----
#
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# ---- 4.1 Define filename for Excel spreadsheet ----
# If necessary, change file name to match the desired directory in your machine.
FileName <- paste(paste("C:/Users/denardo/Dropbox (Personal)/MPA_R_Scripts_for_Kelly/SBS/TechReportOutput/Koon/Koon_TechReportData--produced",
format(Sys.Date(),format="%Y_%m_%d"),sep="_"),
"xlsx",sep=".")
# ---- 4.2 Write to Excel, each data frame as a new sheet ----
write.xlsx(Koon.PropData.Techreport.status.PLOTFORMAT,FileName,sheetName='PropData_StatusPlots',row.names=F)
write.xlsx(Koon.ContData.Techreport.status.PLOTFORMAT,FileName,sheetName='ContData_StatusPlots_withpvals',row.names=F,append=T)
write.xlsx(Koon.AgeGender,FileName,sheetName='AgeGender',row.names=F,append=T)
write.xlsx(Koon.synth.techreport.extra.PLOTFORMAT,FileName,sheetName='Extra_data',row.names=F,append=T)
# ---- Remove all unneeded dataframes from environment, to reduce clutter ----
rm(Koon.level.PropData.status)
rm(Koon.level.ContData.status)
rm(Koon.level.PropData.annex)
rm(Koon.level.ContData.annex)
rm(Days.unwell.Koon.ByMPA)
rm(Days.unwell.Koon.BySett)
rm(null.row.PropData)
rm(null.row.ContData)
rm(Koon.PropData.Techreport.status)
rm(Koon.ContData.Techreport.status)
rm(Koon.AnnexPropData.Techreport)
rm(Koon.AnnexContData.Techreport)
rm(Koon.ContData.Techreport.status.withMPA)
rm(Koon.level.synth)
rm(null.row.synth)
rm(Koon.setts.synth)
rm(Koon.synth.techreport)
rm(FileName)
|
#!/usr/bin/env Rscript
# Generates a random vector following a normal distribution.
# Values for 'n', 'mean', 'sd' are expected (in that order).
# reading arguments ('n', 'mean', 'sd')
args <- commandArgs(trailingOnly = TRUE)
n <- as.numeric(args[1])
mean <- as.numeric(args[2])
sd <- as.numeric(args[3])
x <- rnorm(n, mean, sd)
cat(x, '\n')
| /scripts/normal-vector.txt | permissive | gastonstat/tutorial-R-noninteractive | R | false | false | 346 | txt | #!/usr/bin/env Rscript
# Generates a random vector following a normal distribution.
# Values for 'n', 'mean', 'sd' are expected (in that order).
# reading arguments ('n', 'mean', 'sd')
args <- commandArgs(trailingOnly = TRUE)
n <- as.numeric(args[1])
mean <- as.numeric(args[2])
sd <- as.numeric(args[3])
x <- rnorm(n, mean, sd)
cat(x, '\n')
|
bddp <-
function(y, X, alpha = 1, m, S, nu, psi, a, b, nsim, L, scale = TRUE) {
yt <- y
if (scale==TRUE) {yt <- y/sd(y)}
n <- length(y)
k <- ncol(X)
p <- ns <- rep(0,L)
v <- rep(1/L,L)
v[L] <- 1
beta <- matrix(0, nrow = L, ncol = k)
aux <- try(solve(t(X)%*%X)%*%t(X)%*%yt, silent = TRUE)
if(!inherits(aux, "try-error")) {
for(l in 1:L) {
beta[l,] <- aux
}
}
tau <- rep(1/var(yt),L)
prop <- prob <- matrix(0, nrow = n, ncol = L)
P <- Tau <- Sigma2 <- matrix(0, nrow = nsim, ncol = L)
Beta <- Beta1 <- array(0,c(nsim,L,k))
Beta[1,,] <- beta
Tau[1,] <- tau
mu <- matrix(0, nrow = nsim, ncol = k)
Sigmainv <- array(0, c(nsim,k,k))
mu[1,] <- mvrnorm(1, mu = m, Sigma = S)
Sigmainv[1,,] <- rWishart(1, df = nu, solve(nu*psi))
for(i in 2:nsim) {
cumv <- cumprod(1-v)
p[1] <- v[1]
for(l in 2:L) {
p[l] <- v[l]*cumv[l-1]
}
for(l in 1:L) {
prop[,l] <- p[l]*dnorm(yt,mean=X%*%beta[l,],sd = sqrt(1/tau[l]))
}
prob <- prop/apply(prop,1,sum)
z <- rMultinom(prob,1)
P[i,] <- p
for(l in 1:L) {
ns[l] <- length(which(z == l))
}
for(l in 1:(L-1)) {
v[l] <- rbeta(1, 1 + ns[l],alpha+sum(ns[(l+1):L]))
}
for(l in 1:L) {
tX <- matrix(t(X[z == l, ]),nrow = k, ncol = ns[l])
V <- solve(Sigmainv[i-1,,]+tau[l]*tX%*%X[z == l,])
mu1 <- V%*%(Sigmainv[i-1,,]%*%mu[i-1,]+tau[l]*tX%*%yt[z == l])
Beta1[i,l,] <- Beta[i,l,] <- beta[l,] <- mvrnorm(1, mu = mu1, Sigma = V)
if (scale == TRUE) {
Beta1[i,l,] <- sd(y)*Beta[i,l,]
}
Tau[i,l] <- tau[l] <- rgamma(1, shape = a + (ns[l]/2), rate = b + 0.5*(t(yt[z==l]-X[z==l,]%*%t(beta[l,,drop=FALSE]))%*%(yt[z==l]-X[z==l,]%*%t(beta[l,,drop=FALSE]))))
Sigma2[i,l] <- 1/Tau[i,l]
if (scale == TRUE){
Sigma2[i,l] <- var(y)*(1/Tau[i,l])
}
}
Vaux <- solve(solve(S)+L*Sigmainv[i-1,,])
if(k == 1) {
meanmu <- Vaux%*%(solve(S)%*%m+Sigmainv[i-1,,]%*%sum(Beta[i,,]))
} else {
meanmu <- Vaux%*%(solve(S)%*%m+Sigmainv[i-1,,]%*%t(t(apply(Beta[i,,],2,sum))))
}
mu[i,] <- mvrnorm(1, mu = meanmu, Sigma = Vaux)
Vaux1 <- 0
for(l in 1:L) {
Vaux1 <- Vaux1+(Beta[i,l,]-mu[i,])%*%t((Beta[i,l,]-mu[i,]))
}
Sigmainv[i,,] <- rWishart(1,nu+L,solve(nu*psi+Vaux1))
}
return(list(P,Beta1,Sigma2))
}
| /R/bddp.R | no_license | cran/AROC | R | false | false | 2,383 | r | bddp <-
function(y, X, alpha = 1, m, S, nu, psi, a, b, nsim, L, scale = TRUE) {
yt <- y
if (scale==TRUE) {yt <- y/sd(y)}
n <- length(y)
k <- ncol(X)
p <- ns <- rep(0,L)
v <- rep(1/L,L)
v[L] <- 1
beta <- matrix(0, nrow = L, ncol = k)
aux <- try(solve(t(X)%*%X)%*%t(X)%*%yt, silent = TRUE)
if(!inherits(aux, "try-error")) {
for(l in 1:L) {
beta[l,] <- aux
}
}
tau <- rep(1/var(yt),L)
prop <- prob <- matrix(0, nrow = n, ncol = L)
P <- Tau <- Sigma2 <- matrix(0, nrow = nsim, ncol = L)
Beta <- Beta1 <- array(0,c(nsim,L,k))
Beta[1,,] <- beta
Tau[1,] <- tau
mu <- matrix(0, nrow = nsim, ncol = k)
Sigmainv <- array(0, c(nsim,k,k))
mu[1,] <- mvrnorm(1, mu = m, Sigma = S)
Sigmainv[1,,] <- rWishart(1, df = nu, solve(nu*psi))
for(i in 2:nsim) {
cumv <- cumprod(1-v)
p[1] <- v[1]
for(l in 2:L) {
p[l] <- v[l]*cumv[l-1]
}
for(l in 1:L) {
prop[,l] <- p[l]*dnorm(yt,mean=X%*%beta[l,],sd = sqrt(1/tau[l]))
}
prob <- prop/apply(prop,1,sum)
z <- rMultinom(prob,1)
P[i,] <- p
for(l in 1:L) {
ns[l] <- length(which(z == l))
}
for(l in 1:(L-1)) {
v[l] <- rbeta(1, 1 + ns[l],alpha+sum(ns[(l+1):L]))
}
for(l in 1:L) {
tX <- matrix(t(X[z == l, ]),nrow = k, ncol = ns[l])
V <- solve(Sigmainv[i-1,,]+tau[l]*tX%*%X[z == l,])
mu1 <- V%*%(Sigmainv[i-1,,]%*%mu[i-1,]+tau[l]*tX%*%yt[z == l])
Beta1[i,l,] <- Beta[i,l,] <- beta[l,] <- mvrnorm(1, mu = mu1, Sigma = V)
if (scale == TRUE) {
Beta1[i,l,] <- sd(y)*Beta[i,l,]
}
Tau[i,l] <- tau[l] <- rgamma(1, shape = a + (ns[l]/2), rate = b + 0.5*(t(yt[z==l]-X[z==l,]%*%t(beta[l,,drop=FALSE]))%*%(yt[z==l]-X[z==l,]%*%t(beta[l,,drop=FALSE]))))
Sigma2[i,l] <- 1/Tau[i,l]
if (scale == TRUE){
Sigma2[i,l] <- var(y)*(1/Tau[i,l])
}
}
Vaux <- solve(solve(S)+L*Sigmainv[i-1,,])
if(k == 1) {
meanmu <- Vaux%*%(solve(S)%*%m+Sigmainv[i-1,,]%*%sum(Beta[i,,]))
} else {
meanmu <- Vaux%*%(solve(S)%*%m+Sigmainv[i-1,,]%*%t(t(apply(Beta[i,,],2,sum))))
}
mu[i,] <- mvrnorm(1, mu = meanmu, Sigma = Vaux)
Vaux1 <- 0
for(l in 1:L) {
Vaux1 <- Vaux1+(Beta[i,l,]-mu[i,])%*%t((Beta[i,l,]-mu[i,]))
}
Sigmainv[i,,] <- rWishart(1,nu+L,solve(nu*psi+Vaux1))
}
return(list(P,Beta1,Sigma2))
}
|
library(IAPWS95)
### Name: sCrit
### Title: Water Critical Entropy
### Aliases: sCrit
### ** Examples
sC <- sCrit()
sC
| /data/genthat_extracted_code/IAPWS95/examples/sCrit.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 127 | r | library(IAPWS95)
### Name: sCrit
### Title: Water Critical Entropy
### Aliases: sCrit
### ** Examples
sC <- sCrit()
sC
|
testlist <- list(AgeVector = c(-4.73074171454048e-167, 2.2262381097027e-76, -9.12990429452974e-204, 5.97087417427845e-79, 4.7390525269307e-300, 6.58361441690132e-121, 3.58611068565168e-154, -2.94504776827523e-186, 2.62380314702636e-116, -6.78950518864266e+23, 6.99695749856012e-167, 86485.676793021, 1.11271562183704e+230, 1.94114173595984e-186, 1.44833381226225e-178, -6.75217876587581e-69, 1.17166524186752e-15, -4.66902120197297e-64, -1.96807327384856e+304, 4.43806122192432e-53, 9.29588680224717e-276, -6.49633240047463e-239, -1.22140819059424e-138, 5.03155164774999e-80, -6.36956558303921e-38, 7.15714506860012e-155, -1.05546603899445e-274, -3.66720914317747e-169, -6.94681701552128e+38, 2.93126040859825e-33, 2.03804078100055e-84, 3.62794352816579e+190, 3.84224576683191e+202, 2.90661893502594e+44, -5.43046915655589e-132, -1.22315376742253e-152), ExpressionMatrix = structure(c(4.80597147865938e+96, 6.97343932706536e+155, 1.3267342810479e+281, 1.34663897260867e+171, 1.76430141680543e+158, 1.20021255064002e-241, 1.72046093489436e+274, 4.64807629890539e-66, 3.23566990107388e-38, 3.70896378162114e-42, 1.09474740380531e+92, 7.49155705745727e-308, 3.26639180474928e+224, 3.21841801500177e-79, 4.26435540037564e-295, 1.40002857604021e+82, 47573397570345336, 2.00517157311369e-187, 2.74035572944044e+70, 2.89262435086883e-308, 6.65942057982148e-198, 1.10979548758712e-208, 1.40208057226312e-220, 6.25978904299555e-111, 1.06191688875218e+167, 1.1857452172049, 7.01135380962132e-157, 4.49610615342627e-308, 8.04053421408348e+261, 6.23220855980985e+275, 1.91601752509744e+141, 2.27737212344351e-244, 1.6315101795754e+126, 3.83196182917788e+160, 1.53445011275161e-192), .Dim = c(5L, 7L)), permutations = 415362983L)
result <- do.call(myTAI:::cpp_bootMatrix,testlist)
str(result) | /myTAI/inst/testfiles/cpp_bootMatrix/AFL_cpp_bootMatrix/cpp_bootMatrix_valgrind_files/1615764739-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 1,803 | r | testlist <- list(AgeVector = c(-4.73074171454048e-167, 2.2262381097027e-76, -9.12990429452974e-204, 5.97087417427845e-79, 4.7390525269307e-300, 6.58361441690132e-121, 3.58611068565168e-154, -2.94504776827523e-186, 2.62380314702636e-116, -6.78950518864266e+23, 6.99695749856012e-167, 86485.676793021, 1.11271562183704e+230, 1.94114173595984e-186, 1.44833381226225e-178, -6.75217876587581e-69, 1.17166524186752e-15, -4.66902120197297e-64, -1.96807327384856e+304, 4.43806122192432e-53, 9.29588680224717e-276, -6.49633240047463e-239, -1.22140819059424e-138, 5.03155164774999e-80, -6.36956558303921e-38, 7.15714506860012e-155, -1.05546603899445e-274, -3.66720914317747e-169, -6.94681701552128e+38, 2.93126040859825e-33, 2.03804078100055e-84, 3.62794352816579e+190, 3.84224576683191e+202, 2.90661893502594e+44, -5.43046915655589e-132, -1.22315376742253e-152), ExpressionMatrix = structure(c(4.80597147865938e+96, 6.97343932706536e+155, 1.3267342810479e+281, 1.34663897260867e+171, 1.76430141680543e+158, 1.20021255064002e-241, 1.72046093489436e+274, 4.64807629890539e-66, 3.23566990107388e-38, 3.70896378162114e-42, 1.09474740380531e+92, 7.49155705745727e-308, 3.26639180474928e+224, 3.21841801500177e-79, 4.26435540037564e-295, 1.40002857604021e+82, 47573397570345336, 2.00517157311369e-187, 2.74035572944044e+70, 2.89262435086883e-308, 6.65942057982148e-198, 1.10979548758712e-208, 1.40208057226312e-220, 6.25978904299555e-111, 1.06191688875218e+167, 1.1857452172049, 7.01135380962132e-157, 4.49610615342627e-308, 8.04053421408348e+261, 6.23220855980985e+275, 1.91601752509744e+141, 2.27737212344351e-244, 1.6315101795754e+126, 3.83196182917788e+160, 1.53445011275161e-192), .Dim = c(5L, 7L)), permutations = 415362983L)
result <- do.call(myTAI:::cpp_bootMatrix,testlist)
str(result) |
# mergeUSandCanadaPolygons.R
# This script gets US and canada shapefiles and merges them for easy access to
# all political borders in North America.
library(stringr)
library(maps)
library(sp)
library(rgdal)
library(maptools)
library(rgeos)
library(raster)
Canada <- getData('GADM', country='CAN', level=1)
Canada_df <- Canada@data
Canada_simple <- rgeos::gSimplify(Canada, tol = 0.1, topologyPreserve = TRUE)
Canada_simple_spdf <- SpatialPolygonsDataFrame(Canada_simple, Canada_df)
USA <- getData('GADM', country='USA', level=1)
USA_df <- USA@data
USA_simple <- rgeos::gSimplify(USA, tol = 0.1, topologyPreserve = TRUE)
USA_simple_spdf <- SpatialPolygonsDataFrame(USA_simple, USA_df)
# Merge the U.S. and Canada
#north_america <- raster::union(USA_simple_spdf, Canada_simple_spdf)
north_america <- rbind(USA_simple_spdf, Canada_simple_spdf)
quartz()
plot(north_america)
# nCanada <- length(Canada_simple_spdf)
# north_america <- USA_simple_spdf
# for (i in 1:nCanada){
# print(i)
# print(paste("Merging:", Canada_df$NAME_1[i]))
# north_america <- raster::union(north_america, Canada_simple_spdf[i,])
#
# }
save(north_america, file="Data/GIS/north_america.RData") | /R/mergeUSandCanadaPolygons.R | permissive | stevenjoelbrey/PMFutures | R | false | false | 1,182 | r | # mergeUSandCanadaPolygons.R
# This script gets US and canada shapefiles and merges them for easy access to
# all political borders in North America.
library(stringr)
library(maps)
library(sp)
library(rgdal)
library(maptools)
library(rgeos)
library(raster)
Canada <- getData('GADM', country='CAN', level=1)
Canada_df <- Canada@data
Canada_simple <- rgeos::gSimplify(Canada, tol = 0.1, topologyPreserve = TRUE)
Canada_simple_spdf <- SpatialPolygonsDataFrame(Canada_simple, Canada_df)
USA <- getData('GADM', country='USA', level=1)
USA_df <- USA@data
USA_simple <- rgeos::gSimplify(USA, tol = 0.1, topologyPreserve = TRUE)
USA_simple_spdf <- SpatialPolygonsDataFrame(USA_simple, USA_df)
# Merge the U.S. and Canada
#north_america <- raster::union(USA_simple_spdf, Canada_simple_spdf)
north_america <- rbind(USA_simple_spdf, Canada_simple_spdf)
quartz()
plot(north_america)
# nCanada <- length(Canada_simple_spdf)
# north_america <- USA_simple_spdf
# for (i in 1:nCanada){
# print(i)
# print(paste("Merging:", Canada_df$NAME_1[i]))
# north_america <- raster::union(north_america, Canada_simple_spdf[i,])
#
# }
save(north_america, file="Data/GIS/north_america.RData") |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RenderSql.R
\name{renderSql}
\alias{renderSql}
\title{renderSql}
\usage{
renderSql(sql = "", ...)
}
\arguments{
\item{sql}{The parameterized SQL}
\item{...}{Parameter values}
}
\value{
A list containing the following elements: \describe{ \item{parameterizedSql}{The original
parameterized SQL code} \item{sql}{The rendered sql} }
}
\description{
\code{renderSql} Renders SQL code based on parameterized SQL and parameter values.
}
\details{
This function takes parameterized SQL and a list of parameter values and renders the SQL that can
be send to the server. Parameterization syntax: \describe{ \item{@parameterName}{Parameters are
indicated using a @ prefix, and are replaced with the actual values provided in the renderSql
call.} \item{\{DEFAULT @parameterName = parameterValue\}}{Default values for parameters can be
defined using curly and the DEFAULT keyword.} \item{\{if\}?\{then\}:\{else\}}{The if-then-else
pattern is used to turn on or off blocks of SQL code.} }
}
\examples{
renderSql("SELECT * FROM @a;", a = "myTable")
renderSql("SELECT * FROM @a {@b}?{WHERE x = 1};", a = "myTable", b = "true")
renderSql("SELECT * FROM @a {@b == ''}?{WHERE x = 1}:{ORDER BY x};", a = "myTable", b = "true")
renderSql("SELECT * FROM @a {@b != ''}?{WHERE @b = 1};", a = "myTable", b = "y")
renderSql("SELECT * FROM @a {1 IN (@c)}?{WHERE @b = 1};",
a = "myTable",
b = "y",
c = c(1, 2, 3, 4))
renderSql("{DEFAULT @b = \\"someField\\"}SELECT * FROM @a {@b != ''}?{WHERE @b = 1};",
a = "myTable")
renderSql("SELECT * FROM @a {@a == 'myTable' & @b != 'x'}?{WHERE @b = 1};",
a = "myTable",
b = "y")
}
| /man/renderSql.Rd | permissive | hajarhomayouni/SqlRender | R | false | true | 1,778 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RenderSql.R
\name{renderSql}
\alias{renderSql}
\title{renderSql}
\usage{
renderSql(sql = "", ...)
}
\arguments{
\item{sql}{The parameterized SQL}
\item{...}{Parameter values}
}
\value{
A list containing the following elements: \describe{ \item{parameterizedSql}{The original
parameterized SQL code} \item{sql}{The rendered sql} }
}
\description{
\code{renderSql} Renders SQL code based on parameterized SQL and parameter values.
}
\details{
This function takes parameterized SQL and a list of parameter values and renders the SQL that can
be send to the server. Parameterization syntax: \describe{ \item{@parameterName}{Parameters are
indicated using a @ prefix, and are replaced with the actual values provided in the renderSql
call.} \item{\{DEFAULT @parameterName = parameterValue\}}{Default values for parameters can be
defined using curly and the DEFAULT keyword.} \item{\{if\}?\{then\}:\{else\}}{The if-then-else
pattern is used to turn on or off blocks of SQL code.} }
}
\examples{
renderSql("SELECT * FROM @a;", a = "myTable")
renderSql("SELECT * FROM @a {@b}?{WHERE x = 1};", a = "myTable", b = "true")
renderSql("SELECT * FROM @a {@b == ''}?{WHERE x = 1}:{ORDER BY x};", a = "myTable", b = "true")
renderSql("SELECT * FROM @a {@b != ''}?{WHERE @b = 1};", a = "myTable", b = "y")
renderSql("SELECT * FROM @a {1 IN (@c)}?{WHERE @b = 1};",
a = "myTable",
b = "y",
c = c(1, 2, 3, 4))
renderSql("{DEFAULT @b = \\"someField\\"}SELECT * FROM @a {@b != ''}?{WHERE @b = 1};",
a = "myTable")
renderSql("SELECT * FROM @a {@a == 'myTable' & @b != 'x'}?{WHERE @b = 1};",
a = "myTable",
b = "y")
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/progressList.R
\name{progressList}
\alias{progressList}
\title{Create a new progressList}
\usage{
progressList(inputId, label, status)
}
\arguments{
\item{inputId}{The input id of the widget}
\item{label}{A list of labels - one for each step in the process}
\item{status}{A list of status flags - one for each step in the process}
}
\description{
Create a new progressList
}
| /man/progressList.Rd | no_license | harveyl888/progressList | R | false | true | 456 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/progressList.R
\name{progressList}
\alias{progressList}
\title{Create a new progressList}
\usage{
progressList(inputId, label, status)
}
\arguments{
\item{inputId}{The input id of the widget}
\item{label}{A list of labels - one for each step in the process}
\item{status}{A list of status flags - one for each step in the process}
}
\description{
Create a new progressList
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.