content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cba.R
\name{predict.CBARuleModel}
\alias{predict.CBARuleModel}
\title{Apply Rule Model}
\usage{
\method{predict}{CBARuleModel}(
object,
data,
discretize = TRUE,
outputFiringRuleIDs = FALSE,
outputConfidenceScores = FALSE,
confScoreType = "ordered",
positiveClass = NULL,
...
)
}
\arguments{
\item{object}{a \link{CBARuleModel} class instance}
\item{data}{a data frame with data}
\item{discretize}{boolean indicating whether the passed data should be discretized
using information in the passed @cutp slot of the ruleModel argument.}
\item{outputFiringRuleIDs}{if set to TRUE, instead of predictions, the function will return one-based IDs of rules used to classify each instance (one rule per instance).}
\item{outputConfidenceScores}{if set to TRUE, instead of predictions, the function will return confidences of the firing rule}
\item{confScoreType}{applicable only if `outputConfidenceScores=TRUE`, possible values `ordered` for confidence computed only for training instances reaching this rule, or `global` for standard rule confidence computed from the complete training data}
\item{positiveClass}{This setting is only used if `outputConfidenceScores=TRUE`. It should be used only for binary problems. In this
case, the confidence values are recalculated so that these are not confidence values of the predicted class (default behaviour of `outputConfidenceScores=TRUE`)
but rather confidence values associated with the class designated as positive}
\item{...}{other arguments (currently not used)}
}
\value{
A vector with predictions.
}
\description{
Method that matches rule model against test data.
}
\examples{
set.seed(101)
allData <- datasets::iris[sample(nrow(datasets::iris)),]
trainFold <- allData[1:100,]
testFold <- allData[101:nrow(allData),]
#increase for more accurate results in longer time
target_rule_count <- 1000
classAtt <- "Species"
rm <- cba(trainFold, classAtt, list(target_rule_count = target_rule_count))
prediction <- predict(rm, testFold)
acc <- CBARuleModelAccuracy(prediction, testFold[[classAtt]])
message(acc)
# get rules responsible for each prediction
firingRuleIDs <- predict(rm, testFold, outputFiringRuleIDs=TRUE)
# show rule responsible for prediction of test instance no. 28
inspect(rm@rules[firingRuleIDs[28]])
# get prediction confidence (three different versions)
rm@rules[firingRuleIDs[28]]@quality$confidence
rm@rules[firingRuleIDs[28]]@quality$orderedConf
rm@rules[firingRuleIDs[28]]@quality$cumulativeConf
}
\seealso{
\link{cbaIris}
}
| /man/predict.CBARuleModel.Rd | no_license | kliegr/arc | R | false | true | 2,627 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cba.R
\name{predict.CBARuleModel}
\alias{predict.CBARuleModel}
\title{Apply Rule Model}
\usage{
\method{predict}{CBARuleModel}(
object,
data,
discretize = TRUE,
outputFiringRuleIDs = FALSE,
outputConfidenceScores = FALSE,
confScoreType = "ordered",
positiveClass = NULL,
...
)
}
\arguments{
\item{object}{a \link{CBARuleModel} class instance}
\item{data}{a data frame with data}
\item{discretize}{boolean indicating whether the passed data should be discretized
using information in the passed @cutp slot of the ruleModel argument.}
\item{outputFiringRuleIDs}{if set to TRUE, instead of predictions, the function will return one-based IDs of rules used to classify each instance (one rule per instance).}
\item{outputConfidenceScores}{if set to TRUE, instead of predictions, the function will return confidences of the firing rule}
\item{confScoreType}{applicable only if `outputConfidenceScores=TRUE`, possible values `ordered` for confidence computed only for training instances reaching this rule, or `global` for standard rule confidence computed from the complete training data}
\item{positiveClass}{This setting is only used if `outputConfidenceScores=TRUE`. It should be used only for binary problems. In this
case, the confidence values are recalculated so that these are not confidence values of the predicted class (default behaviour of `outputConfidenceScores=TRUE`)
but rather confidence values associated with the class designated as positive}
\item{...}{other arguments (currently not used)}
}
\value{
A vector with predictions.
}
\description{
Method that matches rule model against test data.
}
\examples{
set.seed(101)
allData <- datasets::iris[sample(nrow(datasets::iris)),]
trainFold <- allData[1:100,]
testFold <- allData[101:nrow(allData),]
#increase for more accurate results in longer time
target_rule_count <- 1000
classAtt <- "Species"
rm <- cba(trainFold, classAtt, list(target_rule_count = target_rule_count))
prediction <- predict(rm, testFold)
acc <- CBARuleModelAccuracy(prediction, testFold[[classAtt]])
message(acc)
# get rules responsible for each prediction
firingRuleIDs <- predict(rm, testFold, outputFiringRuleIDs=TRUE)
# show rule responsible for prediction of test instance no. 28
inspect(rm@rules[firingRuleIDs[28]])
# get prediction confidence (three different versions)
rm@rules[firingRuleIDs[28]]@quality$confidence
rm@rules[firingRuleIDs[28]]@quality$orderedConf
rm@rules[firingRuleIDs[28]]@quality$cumulativeConf
}
\seealso{
\link{cbaIris}
}
|
test_that("non-truncated works", {
testthat::expect_s4_class(
bd_create_gauss_mix(x = tibble::tibble(
pi = c(0.2, 0.8),
mu = c(775, 1000),
sig = c(35, 45)
)),
"AbscontDistribution"
)
})
test_that("truncated works", {
testthat::expect_s4_class(
bd_create_gauss_mix(
x = tibble::tibble(
pi = c(0.2, 0.8),
mu = c(775, 1000),
sig = c(35, 45)
),
taumin = 600,
taumax = 1280
),
"AbscontDistribution"
)
})
test_that("big mixtures work", {
n <- 100
testthat::expect_s4_class(
bd_create_gauss_mix(x = tibble::tibble(
pi = runif(n) / n,
mu = runif(n, 1, 1000),
sig = runif(n, 20, 120)
)),
"AbscontDistribution"
)
})
| /tests/testthat/test-bd_create_gauss_mix.R | permissive | ercrema/baydem | R | false | false | 742 | r | test_that("non-truncated works", {
testthat::expect_s4_class(
bd_create_gauss_mix(x = tibble::tibble(
pi = c(0.2, 0.8),
mu = c(775, 1000),
sig = c(35, 45)
)),
"AbscontDistribution"
)
})
test_that("truncated works", {
testthat::expect_s4_class(
bd_create_gauss_mix(
x = tibble::tibble(
pi = c(0.2, 0.8),
mu = c(775, 1000),
sig = c(35, 45)
),
taumin = 600,
taumax = 1280
),
"AbscontDistribution"
)
})
test_that("big mixtures work", {
n <- 100
testthat::expect_s4_class(
bd_create_gauss_mix(x = tibble::tibble(
pi = runif(n) / n,
mu = runif(n, 1, 1000),
sig = runif(n, 20, 120)
)),
"AbscontDistribution"
)
})
|
\name{ocCurves}
\alias{ocCurves}
\alias{print.ocCurves}
\alias{plot.ocCurves}
\alias{ocCurves.xbar}
\alias{ocCurves.R}
\alias{ocCurves.S}
\alias{ocCurves.p}
\alias{ocCurves.c}
\title{Operating Characteristic Function}
\description{Draws the operating characteristic curves for a \code{'qcc'} object.}
\usage{
ocCurves(object, \dots)
ocCurves.xbar(object,
size = c(1, 5, 10, 15, 20),
shift = seq(0, 5, by = 0.1),
nsigmas = object$nsigmas, \dots)
ocCurves.R(object,
size = c(2, 5, 10, 15, 20),
multiplier = seq(1, 6, by = 0.1),
nsigmas = object$nsigmas, \dots)
ocCurves.S(object,
size = c(2, 5, 10, 15, 20),
multiplier = seq(1, 6, by = 0.1),
nsigmas = object$nsigmas, \dots)
ocCurves.p(object, \dots)
ocCurves.c(object, \dots)
\method{print}{ocCurves}(x, digits = getOption("digits"), \dots)
\method{plot}{ocCurves}(x, what = c("beta", "ARL"),
title, xlab, ylab, lty, lwd, col, \dots)
}
\arguments{
\item{object}{an object of class \code{'qcc'}.}
\item{size}{a vector of values specifying the sample sizes for which to draw the OC curves.}
\item{shift, multiplier}{a vector of values specifying the shift or multiplier values (in units of sigma).}
\item{nsigmas}{a numeric value specifying the number of sigmas to use for computing control limits; if \code{nsigmas} is \code{NULL}, \code{object$conf} is used to set up probability limits.}
\item{x}{an object of class \code{'ocCurves'}.}
\item{digits}{the number of significant digits to use.}
\item{what}{a string specifying the quantity to plot on the y-axis. Possible values are \code{"beta"} for the probability of not detecting a shift, and \code{"ARL"} for the average run length.}
\item{title}{a character string specifying the main title. Set \code{title = NULL} to remove the title.}
\item{xlab, ylab}{a string giving the label for the x-axis and the y-axis.}
\item{lty, lwd, col}{values or vector of values controlling the line type, line width and colour of curves.}
\item{\dots}{catches further ignored arguments.}
}
\details{An operating characteristic curve graphically provides information about the probability of not detecting a shift in the process. \code{ocCurves} is a generic function which calls the proper function depending on the type of \code{'qcc'} object. Further arguments provided through \code{\dots} are passed to the specific function depending on the type of chart.
The probabilities are based on the conventional assumptions about process distributions: the normal distribution for \code{"xbar"}, \code{"R"}, and \code{"S"}, the binomial distribution for \code{"p"} and \code{"np"}, and the Poisson distribution for \code{"c"} and \code{"u"}. They are all sensitive to departures from those assumptions, but to varying degrees. The performance of the \code{"S"} chart, and especially the \code{"R"} chart, are likely to be seriously affected by longer tails.}
\value{The function returns an object of class \code{'ocCurves'} which contains a matrix or a vector of beta values (the probability of type II error) and ARL (average run length).}
\references{
Mason, R.L. and Young, J.C. (2002) \emph{Multivariate Statistical Process Control with Industrial Applications}, SIAM.
Montgomery, D.C. (2013) \emph{Introduction to Statistical Quality Control}, 7th ed. New York: John Wiley & Sons.
Ryan, T. P. (2011), \emph{Statistical Methods for Quality Improvement}, 3rd ed. New York: John Wiley & Sons, Inc.
Scrucca, L. (2004). qcc: an R package for quality control charting and statistical process control. \emph{R News} 4/1, 11-17.
Wetherill, G.B. and Brown, D.W. (1991) \emph{Statistical Process Control}. New York: Chapman & Hall.
}
\author{Luca Scrucca}
%\note{ ~~further notes~~ }
\seealso{\code{\link{qcc}}}
\examples{
data(pistonrings)
diameter <- qccGroups(diameter, sample, data = pistonrings)
oc <- ocCurves.xbar(qcc(diameter, type="xbar", nsigmas=3))
oc
plot(oc)
data(orangejuice)
oc <- with(orangejuice,
ocCurves(qcc(D[trial], sizes=size[trial], type="p")))
oc
plot(oc)
data(circuit)
oc <- with(circuit,
ocCurves(qcc(x[trial], sizes=size[trial], type="c")))
oc
plot(oc)
}
\keyword{htest}
\keyword{hplot}
| /man/oc.curves.Rd | no_license | luca-scr/qcc | R | false | false | 4,268 | rd | \name{ocCurves}
\alias{ocCurves}
\alias{print.ocCurves}
\alias{plot.ocCurves}
\alias{ocCurves.xbar}
\alias{ocCurves.R}
\alias{ocCurves.S}
\alias{ocCurves.p}
\alias{ocCurves.c}
\title{Operating Characteristic Function}
\description{Draws the operating characteristic curves for a \code{'qcc'} object.}
\usage{
ocCurves(object, \dots)
ocCurves.xbar(object,
size = c(1, 5, 10, 15, 20),
shift = seq(0, 5, by = 0.1),
nsigmas = object$nsigmas, \dots)
ocCurves.R(object,
size = c(2, 5, 10, 15, 20),
multiplier = seq(1, 6, by = 0.1),
nsigmas = object$nsigmas, \dots)
ocCurves.S(object,
size = c(2, 5, 10, 15, 20),
multiplier = seq(1, 6, by = 0.1),
nsigmas = object$nsigmas, \dots)
ocCurves.p(object, \dots)
ocCurves.c(object, \dots)
\method{print}{ocCurves}(x, digits = getOption("digits"), \dots)
\method{plot}{ocCurves}(x, what = c("beta", "ARL"),
title, xlab, ylab, lty, lwd, col, \dots)
}
\arguments{
\item{object}{an object of class \code{'qcc'}.}
\item{size}{a vector of values specifying the sample sizes for which to draw the OC curves.}
\item{shift, multiplier}{a vector of values specifying the shift or multiplier values (in units of sigma).}
\item{nsigmas}{a numeric value specifying the number of sigmas to use for computing control limits; if \code{nsigmas} is \code{NULL}, \code{object$conf} is used to set up probability limits.}
\item{x}{an object of class \code{'ocCurves'}.}
\item{digits}{the number of significant digits to use.}
\item{what}{a string specifying the quantity to plot on the y-axis. Possible values are \code{"beta"} for the probability of not detecting a shift, and \code{"ARL"} for the average run length.}
\item{title}{a character string specifying the main title. Set \code{title = NULL} to remove the title.}
\item{xlab, ylab}{a string giving the label for the x-axis and the y-axis.}
\item{lty, lwd, col}{values or vector of values controlling the line type, line width and colour of curves.}
\item{\dots}{catches further ignored arguments.}
}
\details{An operating characteristic curve graphically provides information about the probability of not detecting a shift in the process. \code{ocCurves} is a generic function which calls the proper function depending on the type of \code{'qcc'} object. Further arguments provided through \code{\dots} are passed to the specific function depending on the type of chart.
The probabilities are based on the conventional assumptions about process distributions: the normal distribution for \code{"xbar"}, \code{"R"}, and \code{"S"}, the binomial distribution for \code{"p"} and \code{"np"}, and the Poisson distribution for \code{"c"} and \code{"u"}. They are all sensitive to departures from those assumptions, but to varying degrees. The performance of the \code{"S"} chart, and especially the \code{"R"} chart, are likely to be seriously affected by longer tails.}
\value{The function returns an object of class \code{'ocCurves'} which contains a matrix or a vector of beta values (the probability of type II error) and ARL (average run length).}
\references{
Mason, R.L. and Young, J.C. (2002) \emph{Multivariate Statistical Process Control with Industrial Applications}, SIAM.
Montgomery, D.C. (2013) \emph{Introduction to Statistical Quality Control}, 7th ed. New York: John Wiley & Sons.
Ryan, T. P. (2011), \emph{Statistical Methods for Quality Improvement}, 3rd ed. New York: John Wiley & Sons, Inc.
Scrucca, L. (2004). qcc: an R package for quality control charting and statistical process control. \emph{R News} 4/1, 11-17.
Wetherill, G.B. and Brown, D.W. (1991) \emph{Statistical Process Control}. New York: Chapman & Hall.
}
\author{Luca Scrucca}
%\note{ ~~further notes~~ }
\seealso{\code{\link{qcc}}}
\examples{
data(pistonrings)
diameter <- qccGroups(diameter, sample, data = pistonrings)
oc <- ocCurves.xbar(qcc(diameter, type="xbar", nsigmas=3))
oc
plot(oc)
data(orangejuice)
oc <- with(orangejuice,
ocCurves(qcc(D[trial], sizes=size[trial], type="p")))
oc
plot(oc)
data(circuit)
oc <- with(circuit,
ocCurves(qcc(x[trial], sizes=size[trial], type="c")))
oc
plot(oc)
}
\keyword{htest}
\keyword{hplot}
|
rm(list=ls())
setwd("~/R/EDA/EDA")
df = data.frame(A=sample(1:75, 50, replace=TRUE),
B=sample(1:100, 50, replace=TRUE),
stringsAsFactors = FALSE)
library(ggplot2)
library(tidyverse)
library(gganimate)
library(directlabels)
library(png)
library(transformr)
library(grid)
library(gifski)
p = ggplot(df, aes(A, B)) +
geom_line() +
transition_reveal(A) +
labs(title = 'A: {frame_along}')
# p = ggplot(df, aes(A, B, group = C)) +
# geom_line() +
# transition_reveal(A) +
# labs(title = 'A: {frame_along}')
animate(p, nframes=40)
anim_save("basic_animation.gif", p)
animate(p, nframes=40, fps = 2)
# how to stop loop in the animation?
animate(p, renderer = gifski_renderer(loop = FALSE))
# How to change layout of plot?
animate(p, fps = 10, duration = 14, width = 800, height = 400)
| /animate01.R | no_license | Joshuariver/EDA | R | false | false | 880 | r | rm(list=ls())
setwd("~/R/EDA/EDA")
df = data.frame(A=sample(1:75, 50, replace=TRUE),
B=sample(1:100, 50, replace=TRUE),
stringsAsFactors = FALSE)
library(ggplot2)
library(tidyverse)
library(gganimate)
library(directlabels)
library(png)
library(transformr)
library(grid)
library(gifski)
p = ggplot(df, aes(A, B)) +
geom_line() +
transition_reveal(A) +
labs(title = 'A: {frame_along}')
# p = ggplot(df, aes(A, B, group = C)) +
# geom_line() +
# transition_reveal(A) +
# labs(title = 'A: {frame_along}')
animate(p, nframes=40)
anim_save("basic_animation.gif", p)
animate(p, nframes=40, fps = 2)
# how to stop loop in the animation?
animate(p, renderer = gifski_renderer(loop = FALSE))
# How to change layout of plot?
animate(p, fps = 10, duration = 14, width = 800, height = 400)
|
# Generated programmatically at 2013-07-02 13:50:20
cuEventCreate <-
function( Flags )
{
ans = .Call('R_auto_cuEventCreate', as(Flags, 'numeric'))
if(is(ans, 'CUresult') && ans != 0)
raiseError(ans, 'R_auto_cuEventCreate')
ans
}
cuEventRecord <-
function( hEvent, hStream )
{
ans = .Call('R_auto_cuEventRecord', as(hEvent, 'CUevent'), as(hStream, 'CUstream'))
if(is(ans, 'CUresult') && ans != 0)
raiseError(ans, 'R_auto_cuEventRecord')
ans
}
cuEventQuery <-
function( hEvent )
{
ans = .Call('R_auto_cuEventQuery', as(hEvent, 'CUevent'))
if(is(ans, 'CUresult') && ans != 0)
raiseError(ans, 'R_auto_cuEventQuery')
ans
}
cuEventSynchronize <-
function( hEvent )
{
ans = .Call('R_auto_cuEventSynchronize', as(hEvent, 'CUevent'))
if(is(ans, 'CUresult') && ans != 0)
raiseError(ans, 'R_auto_cuEventSynchronize')
ans
}
cuEventDestroy <-
function( hEvent )
{
ans = .Call('R_auto_cuEventDestroy', as(hEvent, 'CUevent'))
if(is(ans, 'CUresult') && ans != 0)
raiseError(ans, 'R_auto_cuEventDestroy')
ans
}
cuEventElapsedTime <-
function( hStart, hEnd )
{
ans = .Call('R_auto_cuEventElapsedTime', as(hStart, 'CUevent'), as(hEnd, 'CUevent'))
if(is(ans, 'CUresult') && ans != 0)
raiseError(ans, 'R_auto_cuEventElapsedTime')
ans
}
| /R/autoEvent.R | no_license | xfbingshan/RCUDA | R | false | false | 1,348 | r | # Generated programmatically at 2013-07-02 13:50:20
cuEventCreate <-
function( Flags )
{
ans = .Call('R_auto_cuEventCreate', as(Flags, 'numeric'))
if(is(ans, 'CUresult') && ans != 0)
raiseError(ans, 'R_auto_cuEventCreate')
ans
}
cuEventRecord <-
function( hEvent, hStream )
{
ans = .Call('R_auto_cuEventRecord', as(hEvent, 'CUevent'), as(hStream, 'CUstream'))
if(is(ans, 'CUresult') && ans != 0)
raiseError(ans, 'R_auto_cuEventRecord')
ans
}
cuEventQuery <-
function( hEvent )
{
ans = .Call('R_auto_cuEventQuery', as(hEvent, 'CUevent'))
if(is(ans, 'CUresult') && ans != 0)
raiseError(ans, 'R_auto_cuEventQuery')
ans
}
cuEventSynchronize <-
function( hEvent )
{
ans = .Call('R_auto_cuEventSynchronize', as(hEvent, 'CUevent'))
if(is(ans, 'CUresult') && ans != 0)
raiseError(ans, 'R_auto_cuEventSynchronize')
ans
}
cuEventDestroy <-
function( hEvent )
{
ans = .Call('R_auto_cuEventDestroy', as(hEvent, 'CUevent'))
if(is(ans, 'CUresult') && ans != 0)
raiseError(ans, 'R_auto_cuEventDestroy')
ans
}
cuEventElapsedTime <-
function( hStart, hEnd )
{
ans = .Call('R_auto_cuEventElapsedTime', as(hStart, 'CUevent'), as(hEnd, 'CUevent'))
if(is(ans, 'CUresult') && ans != 0)
raiseError(ans, 'R_auto_cuEventElapsedTime')
ans
}
|
setwd(dir="C:/Users/Francois/Documents/pheno abeilles belges/scripts finaux/article/data")
library(ggplot2)
library(mgcv)
library(MASS)
require(doBy)
require(gridExtra)
require(lubridate)
library(chron)
library(dplyr)
library(rgbif)
library(reshape2)
library(car)
library(data.table)
library(lme4)
library(RColorBrewer)
library(phia)
library(ggsignif)
library(blme)
library(glmmTMB)
for(j in seq(0.1,0.9,0.1)){
spani=j
liste2=read.table("yearly_estimates_of_occupancy_and_mfd_only_for_years_withdata.txt",sep="\t",header=T)
liste=read.table("linear_mfd_shifts.txt",sep="\t",header=T,na.strings=c("","NA"))
liste2$species=as.character(liste2$species)
liste$species=as.character(liste$species)
liste2$quant_025[liste2$quant_025==0]=1e-16
liste2$quant_975[liste2$quant_975==1]=1-1e-16
liste2[,c("mean2","quant_0252","quant_9752")]=liste2[,c("mean","quant_025","quant_975")]
liste2[which(liste2$rhat>1.1),c("mean2","quant_0252","quant_9752")]=NA
err=function(x){
vec=c()
for(i in 1:length(x)){
vec[i]=sqrt(x[i]^2+x[i-1]^2)
}
return(vec)}
logit_func=function(x){log(x/(1-x))}
for(i in 1:nrow(liste)){
bidon2=subset(liste2,species==liste$species[i])
wci2=logit_func(bidon2$quant_9752)-logit_func(bidon2$quant_0252)
bidon2$occ_derivs=c(NA,diff(logit_func(bidon2$mean2)))
bidon2$occ_derivs_er=err(wci2)
bidon2$pheno_derivs=c(NA,diff(bidon2$fit))
bidon2$pheno_derivs_er=err(bidon2$se.fit)
wci=bidon2$quant_975-bidon2$quant_025
model3=lm(mean~Annee,data=bidon2,weights=1/wci)
liste$trend_effect[i]=model3$coeff[2]
liste$trend_pval[i]=Anova(model3)[1,4]
liste$stat_trend[i]=if(liste$trend_pval[i]>0.05){"stable"}else{if(liste$trend_effect[i]>0){"increase"}else{"decline"}}
liste$stat_year[i]=if(liste$year_pval[i]>0.05){"unaffected"}else{if(liste$year_effect[i]>0){"delay"}else{"advance"}}
if(i==1){res=bidon2}else{res=rbind(res,bidon2)}
}
tabvar=as.data.frame(fread("belgium_variables_SIG.txt",sep="\t",header=T))
newdat=data.frame(Annee=1902:2016)
tabvar=subset(tabvar,Annee>=1900)
model=loess(value~Annee,data=subset(tabvar,variable=="ratio" & !is.na(value)),span=spani)
tabvarb=cbind(newdat,c(NA,diff(predict(model,newdata=newdat))),"ratio")
names(tabvarb)=c("Annee","value","variable")
model=loess(value~Annee,data=subset(tabvar,variable=="temp" & !is.na(value) & Annee>=1902),span=spani)
tabvarc=cbind(newdat,c(NA,diff(predict(model,newdata=newdat))),"temp_trend")
names(tabvarc)=c("Annee","value","variable")
model=loess(value~Annee,data=subset(tabvar,variable=="temp" & !is.na(value) & Annee>=1902),span=spani)
tabvare=cbind(newdat,c(NA,diff(predict(model,newdata=newdat))),"temp")
names(tabvare)=c("Annee","value","variable")
tabvare$value=c(NA,diff(subset(tabvar,variable=="temp" & !is.na(value) & Annee>=1902)$value))
model=loess(value~Annee,data=subset(tabvar,variable=="urban" & !is.na(value)),span=0.2)
tabvarf=cbind(newdat,c(NA,diff(predict(model,newdata=newdat))),"urban")
names(tabvarf)=c("Annee","value","variable")
tabvar=rbind(tabvarb,tabvarc,tabvare,tabvarf)
tabvar=subset(tabvar,Annee>=1950)
tabvar <- tabvar %>% dplyr::group_by(variable) %>% dplyr::mutate(value=scale(value,center=F,scale=T))
tabvar2=dcast(tabvar,Annee~variable,value.var="value")
final=merge(res,tabvar2,by="Annee")
final=merge(final,liste[,c("species","stat_trend","stat_year")],by="species")
final=as.data.frame(final %>% dplyr::group_by(species) %>%
dplyr::mutate(ndelta=length(which(!is.na(pheno_derivs) & abs(pheno_derivs)<50)),
ndelta2=length(which(!is.na(occ_derivs) & occ_derivs_er<30))))
final$stat_trend=as.factor(final$stat_trend)
final$stat_year=as.factor(final$stat_year)
bidonb=subset(final,!is.na(pheno_derivs) & ndelta>=25 & abs(pheno_derivs)<50)
bidono=subset(final,!is.na(occ_derivs) & ndelta2>=25 & occ_derivs_er<30)
bidono$Annee2=numFactor(bidono$Annee-1950)
model=glmmTMB(occ_derivs~(ratio+temp+temp_trend+urban)*stat_trend+
ou(Annee2+0 | species),data=bidono,weights=(1/occ_derivs_er)^0.2,
control=glmmTMBControl(optCtrl = list(iter.max=10000000, eval.max=10000000)))
sde=c(sqrt(vcov(model)$cond["ratio","ratio"]),
sqrt(vcov(model)$cond["ratio","ratio"]+2*vcov(model)$cond["ratio","ratio:stat_trendstable"]+vcov(model)$cond["ratio:stat_trendstable","ratio:stat_trendstable"]),
sqrt(vcov(model)$cond["ratio","ratio"]+2*vcov(model)$cond["ratio","ratio:stat_trendincrease"]+vcov(model)$cond["ratio:stat_trendincrease","ratio:stat_trendincrease"]),
sqrt(vcov(model)$cond["urban","urban"]),
sqrt(vcov(model)$cond["urban","urban"]+2*vcov(model)$cond["urban","urban:stat_trendstable"]+vcov(model)$cond["urban:stat_trendstable","urban:stat_trendstable"]),
sqrt(vcov(model)$cond["urban","urban"]+2*vcov(model)$cond["urban","urban:stat_trendincrease"]+vcov(model)$cond["urban:stat_trendincrease","urban:stat_trendincrease"]),
sqrt(vcov(model)$cond["temp","temp"]),
sqrt(vcov(model)$cond["temp","temp"]+2*vcov(model)$cond["temp","temp:stat_trendstable"]+vcov(model)$cond["temp:stat_trendstable","temp:stat_trendstable"]),
sqrt(vcov(model)$cond["temp","temp"]+2*vcov(model)$cond["temp","temp:stat_trendincrease"]+vcov(model)$cond["temp:stat_trendincrease","temp:stat_trendincrease"]),
sqrt(vcov(model)$cond["temp_trend","temp_trend"]),
sqrt(vcov(model)$cond["temp_trend","temp_trend"]+2*vcov(model)$cond["temp_trend","temp_trend:stat_trendstable"]+vcov(model)$cond["temp_trend:stat_trendstable","temp_trend:stat_trendstable"]),
sqrt(vcov(model)$cond["temp_trend","temp_trend"]+2*vcov(model)$cond["temp_trend","temp_trend:stat_trendincrease"]+vcov(model)$cond["temp_trend:stat_trendincrease","temp_trend:stat_trendincrease"]))
est=c(summary(model)$coeff$cond["ratio",1],
summary(model)$coeff$cond["ratio",1]+summary(model)$coeff$cond["ratio:stat_trendstable",1],
summary(model)$coeff$cond["ratio",1]+summary(model)$coeff$cond["ratio:stat_trendincrease",1],
summary(model)$coeff$cond["urban",1],
summary(model)$coeff$cond["urban",1]+summary(model)$coeff$cond["urban:stat_trendstable",1],
summary(model)$coeff$cond["urban",1]+summary(model)$coeff$cond["urban:stat_trendincrease",1],
summary(model)$coeff$cond["temp",1],
summary(model)$coeff$cond["temp",1]+summary(model)$coeff$cond["temp:stat_trendstable",1],
summary(model)$coeff$cond["temp",1]+summary(model)$coeff$cond["temp:stat_trendincrease",1],
summary(model)$coeff$cond["temp_trend",1],
summary(model)$coeff$cond["temp_trend",1]+summary(model)$coeff$cond["temp_trend:stat_trendstable",1],
summary(model)$coeff$cond["temp_trend",1]+summary(model)$coeff$cond["temp_trend:stat_trendincrease",1])
dat1=data.frame(est=est,sde=sde,group=rep(c("decline","stable","increase"),4),
varia=rep(c("Agriculture intensification","Urbanization","Inter-annual temp. changes","Temperature trend"),each=3),model="lmer")
dat1$cate="occupancy"
dat1$lwr=dat1$est-1.96*dat1$sde
dat1$upr=dat1$est+1.96*dat1$sde
dat1$signi=">0.05"
dat1$signi[which(dat1$upr<0)]="<0.05"
dat1$signi[which(dat1$lwr>0)]="<0.05"
rm(model)
bidonb$Annee2=numFactor(bidonb$Annee-1950)
model=glmmTMB(pheno_derivs~(ratio+temp+temp_trend+urban)*stat_year+ou(Annee2+0|species),data=bidonb,weights=1/pheno_derivs_er,
control=glmmTMBControl(optCtrl = list(iter.max=100000000, eval.max=100000000)))
sde=c(sqrt(vcov(model)$cond["ratio","ratio"]),
sqrt(vcov(model)$cond["ratio","ratio"]+2*vcov(model)$cond["ratio","ratio:stat_yearunaffected"]+vcov(model)$cond["ratio:stat_yearunaffected","ratio:stat_yearunaffected"]),
sqrt(vcov(model)$cond["ratio","ratio"]+2*vcov(model)$cond["ratio","ratio:stat_yeardelay"]+vcov(model)$cond["ratio:stat_yeardelay","ratio:stat_yeardelay"]),
sqrt(vcov(model)$cond["urban","urban"]),
sqrt(vcov(model)$cond["urban","urban"]+2*vcov(model)$cond["urban","urban:stat_yearunaffected"]+vcov(model)$cond["urban:stat_yearunaffected","urban:stat_yearunaffected"]),
sqrt(vcov(model)$cond["urban","urban"]+2*vcov(model)$cond["urban","urban:stat_yeardelay"]+vcov(model)$cond["urban:stat_yeardelay","urban:stat_yeardelay"]),
sqrt(vcov(model)$cond["temp","temp"]),
sqrt(vcov(model)$cond["temp","temp"]+2*vcov(model)$cond["temp","temp:stat_yearunaffected"]+vcov(model)$cond["temp:stat_yearunaffected","temp:stat_yearunaffected"]),
sqrt(vcov(model)$cond["temp","temp"]+2*vcov(model)$cond["temp","temp:stat_yeardelay"]+vcov(model)$cond["temp:stat_yeardelay","temp:stat_yeardelay"]),
sqrt(vcov(model)$cond["temp_trend","temp_trend"]),
sqrt(vcov(model)$cond["temp_trend","temp_trend"]+2*vcov(model)$cond["temp_trend","temp_trend:stat_yearunaffected"]+vcov(model)$cond["temp_trend:stat_yearunaffected","temp_trend:stat_yearunaffected"]),
sqrt(vcov(model)$cond["temp_trend","temp_trend"]+2*vcov(model)$cond["temp_trend","temp_trend:stat_yeardelay"]+vcov(model)$cond["temp_trend:stat_yeardelay","temp_trend:stat_yeardelay"]))
est=c(summary(model)$coeff$cond["ratio",1],
summary(model)$coeff$cond["ratio",1]+summary(model)$coeff$cond["ratio:stat_yearunaffected",1],
summary(model)$coeff$cond["ratio",1]+summary(model)$coeff$cond["ratio:stat_yeardelay",1],
summary(model)$coeff$cond["urban",1],
summary(model)$coeff$cond["urban",1]+summary(model)$coeff$cond["urban:stat_yearunaffected",1],
summary(model)$coeff$cond["urban",1]+summary(model)$coeff$cond["urban:stat_yeardelay",1],
summary(model)$coeff$cond["temp",1],
summary(model)$coeff$cond["temp",1]+summary(model)$coeff$cond["temp:stat_yearunaffected",1],
summary(model)$coeff$cond["temp",1]+summary(model)$coeff$cond["temp:stat_yeardelay",1],
summary(model)$coeff$cond["temp_trend",1],
summary(model)$coeff$cond["temp_trend",1]+summary(model)$coeff$cond["temp_trend:stat_yearunaffected",1],
summary(model)$coeff$cond["temp_trend",1]+summary(model)$coeff$cond["temp_trend:stat_yeardelay",1])
dat2=data.frame(est=est,sde=sde,group=rep(c("advance","unaffected","delay"),4),
varia=rep(c("Agriculture intensification","Urbanization","Inter-annual temp. changes","Temperature trend"),each=3),model="lmer")
dat2$cate="phenology"
dat2$lwr=dat2$est-1.96*dat2$sde
dat2$upr=dat2$est+1.96*dat2$sde
dat2$signi=">0.05"
dat2$signi[which(dat2$upr<0)]="<0.05"
dat2$signi[which(dat2$lwr>0)]="<0.05"
b=rbind(dat1,dat2)
b$nderivs_pheno=nrow(bidonb)
b$nderivs_occ=nrow(bidono)
b$spani=j
if(j==0.1){bf=b}else{bf=rbind(bf,b)}
}
bf$varia=factor(bf$varia,c("Inter-annual temp. changes","Temperature trend","Urbanization","Agriculture intensification"))
bf$moy=bf$est
ponds1=unique(bidono[,c("species","ndelta2","stat_trend"),]) %>%
dplyr::group_by(stat_trend) %>% dplyr::summarise(n=length(species))
ponds1$cate="occupancy"
names(ponds1)[1]="group"
ponds2=unique(bidonb[,c("species","ndelta","stat_year"),]) %>%
dplyr::group_by(stat_year) %>% dplyr::summarise(n=length(species))
ponds2$cate="phenology"
names(ponds2)[1]="group"
bf=merge(bf,rbind(ponds1,ponds2),by=c("group","cate"))
bf$stat=factor(bf$group,c("decline","advance","stable","unaffected","increase","delay"))
bf=bf[order(bf$stat),]
bf$stat2=bf$stat
bf$stat=paste0(bf$stat," (n=",bf$n,")")
bf$stat=factor(bf$stat,unique(bf$stat))
bf$signi=factor(bf$signi,c(">0.05","<0.05"))
bf$star="ns"
bf$star[which(bf$pvalinter<0.05)]="*"
bf$star[which(bf$pvalinter<0.01)]="**"
bf$star[which(bf$pvalinter<0.001)]="***"
labo=unique(bf[,c("cate","varia","star")])
labo=labo[order(labo$varia),]
pl1=ggplot(data=subset(bf,cate=="occupancy" & varia!="inter"),aes(x=as.factor(spani),y=moy,col=stat,shape=signi))+
geom_hline(yintercept=0,size=1.2)+
scale_shape_manual(values=c(19,21),guide=F)+scale_shape_manual(values=c(21,19),guide=F,na.value=15,drop=F)+
geom_pointrange(aes(ymin=lwr,ymax=upr),position=position_dodge(width = 0.50),fill="white")+
theme_bw()+ylab("Standardised effects")+
theme(panel.grid.minor=element_blank(),plot.title=element_text(size=14,face="bold"),
legend.title = element_blank(),axis.title.x=element_blank(),
strip.background = element_blank(),legend.position="bottom")+
scale_color_discrete()+ggtitle("a")+xlab("Maximum time-lag allowed (in years)")+
scale_colour_manual(values=c("darkorchid4","dodgerblue3",
"azure4"))+facet_wrap(~varia,nrow=1)
pl2=ggplot(data=subset(bf,cate=="phenology" & varia!="inter"),aes(x=as.factor(spani),y=moy,col=stat,shape=signi))+
geom_hline(yintercept=0,size=1.2)+
geom_pointrange(aes(ymin=lwr,ymax=upr),position=position_dodge(width = 0.50),fill="white")+
scale_shape_manual(values=c(19,21),guide=F)+scale_shape_manual(values=c(21,19),guide=F,na.value=15,drop=F)+
theme_bw()+ylab("Standardised effects \n")+
theme(panel.grid.minor=element_blank(),plot.title=element_text(size=14,face="bold"),legend.position="bottom",axis.title.x=element_blank(),
strip.background = element_blank(),legend.title = element_blank())+
scale_color_discrete()+ggtitle("b")+xlab("Maximum time-lag allowed (in years)")+
scale_colour_manual(values=c("firebrick4","orange","lemonchiffon3"))+
facet_wrap(~varia,nrow=1)
gridExtra::grid.arrange(pl1,pl2,bottom="Smoothing parameter value",nrow=2)
png(paste0("fig_s5.png"),width=1200,height=1000,res=140)
gridExtra::grid.arrange(pl1,pl2,bottom="Smoothing parameter value",nrow=2)
dev.off();
| /figure_s5.r | no_license | f-duchenne/Wild-bees-in-Belgium | R | false | false | 13,145 | r | setwd(dir="C:/Users/Francois/Documents/pheno abeilles belges/scripts finaux/article/data")
library(ggplot2)
library(mgcv)
library(MASS)
require(doBy)
require(gridExtra)
require(lubridate)
library(chron)
library(dplyr)
library(rgbif)
library(reshape2)
library(car)
library(data.table)
library(lme4)
library(RColorBrewer)
library(phia)
library(ggsignif)
library(blme)
library(glmmTMB)
for(j in seq(0.1,0.9,0.1)){
spani=j
liste2=read.table("yearly_estimates_of_occupancy_and_mfd_only_for_years_withdata.txt",sep="\t",header=T)
liste=read.table("linear_mfd_shifts.txt",sep="\t",header=T,na.strings=c("","NA"))
liste2$species=as.character(liste2$species)
liste$species=as.character(liste$species)
liste2$quant_025[liste2$quant_025==0]=1e-16
liste2$quant_975[liste2$quant_975==1]=1-1e-16
liste2[,c("mean2","quant_0252","quant_9752")]=liste2[,c("mean","quant_025","quant_975")]
liste2[which(liste2$rhat>1.1),c("mean2","quant_0252","quant_9752")]=NA
err=function(x){
vec=c()
for(i in 1:length(x)){
vec[i]=sqrt(x[i]^2+x[i-1]^2)
}
return(vec)}
logit_func=function(x){log(x/(1-x))}
for(i in 1:nrow(liste)){
bidon2=subset(liste2,species==liste$species[i])
wci2=logit_func(bidon2$quant_9752)-logit_func(bidon2$quant_0252)
bidon2$occ_derivs=c(NA,diff(logit_func(bidon2$mean2)))
bidon2$occ_derivs_er=err(wci2)
bidon2$pheno_derivs=c(NA,diff(bidon2$fit))
bidon2$pheno_derivs_er=err(bidon2$se.fit)
wci=bidon2$quant_975-bidon2$quant_025
model3=lm(mean~Annee,data=bidon2,weights=1/wci)
liste$trend_effect[i]=model3$coeff[2]
liste$trend_pval[i]=Anova(model3)[1,4]
liste$stat_trend[i]=if(liste$trend_pval[i]>0.05){"stable"}else{if(liste$trend_effect[i]>0){"increase"}else{"decline"}}
liste$stat_year[i]=if(liste$year_pval[i]>0.05){"unaffected"}else{if(liste$year_effect[i]>0){"delay"}else{"advance"}}
if(i==1){res=bidon2}else{res=rbind(res,bidon2)}
}
tabvar=as.data.frame(fread("belgium_variables_SIG.txt",sep="\t",header=T))
newdat=data.frame(Annee=1902:2016)
tabvar=subset(tabvar,Annee>=1900)
model=loess(value~Annee,data=subset(tabvar,variable=="ratio" & !is.na(value)),span=spani)
tabvarb=cbind(newdat,c(NA,diff(predict(model,newdata=newdat))),"ratio")
names(tabvarb)=c("Annee","value","variable")
model=loess(value~Annee,data=subset(tabvar,variable=="temp" & !is.na(value) & Annee>=1902),span=spani)
tabvarc=cbind(newdat,c(NA,diff(predict(model,newdata=newdat))),"temp_trend")
names(tabvarc)=c("Annee","value","variable")
model=loess(value~Annee,data=subset(tabvar,variable=="temp" & !is.na(value) & Annee>=1902),span=spani)
tabvare=cbind(newdat,c(NA,diff(predict(model,newdata=newdat))),"temp")
names(tabvare)=c("Annee","value","variable")
tabvare$value=c(NA,diff(subset(tabvar,variable=="temp" & !is.na(value) & Annee>=1902)$value))
model=loess(value~Annee,data=subset(tabvar,variable=="urban" & !is.na(value)),span=0.2)
tabvarf=cbind(newdat,c(NA,diff(predict(model,newdata=newdat))),"urban")
names(tabvarf)=c("Annee","value","variable")
tabvar=rbind(tabvarb,tabvarc,tabvare,tabvarf)
tabvar=subset(tabvar,Annee>=1950)
tabvar <- tabvar %>% dplyr::group_by(variable) %>% dplyr::mutate(value=scale(value,center=F,scale=T))
tabvar2=dcast(tabvar,Annee~variable,value.var="value")
final=merge(res,tabvar2,by="Annee")
final=merge(final,liste[,c("species","stat_trend","stat_year")],by="species")
final=as.data.frame(final %>% dplyr::group_by(species) %>%
dplyr::mutate(ndelta=length(which(!is.na(pheno_derivs) & abs(pheno_derivs)<50)),
ndelta2=length(which(!is.na(occ_derivs) & occ_derivs_er<30))))
final$stat_trend=as.factor(final$stat_trend)
final$stat_year=as.factor(final$stat_year)
bidonb=subset(final,!is.na(pheno_derivs) & ndelta>=25 & abs(pheno_derivs)<50)
bidono=subset(final,!is.na(occ_derivs) & ndelta2>=25 & occ_derivs_er<30)
bidono$Annee2=numFactor(bidono$Annee-1950)
model=glmmTMB(occ_derivs~(ratio+temp+temp_trend+urban)*stat_trend+
ou(Annee2+0 | species),data=bidono,weights=(1/occ_derivs_er)^0.2,
control=glmmTMBControl(optCtrl = list(iter.max=10000000, eval.max=10000000)))
sde=c(sqrt(vcov(model)$cond["ratio","ratio"]),
sqrt(vcov(model)$cond["ratio","ratio"]+2*vcov(model)$cond["ratio","ratio:stat_trendstable"]+vcov(model)$cond["ratio:stat_trendstable","ratio:stat_trendstable"]),
sqrt(vcov(model)$cond["ratio","ratio"]+2*vcov(model)$cond["ratio","ratio:stat_trendincrease"]+vcov(model)$cond["ratio:stat_trendincrease","ratio:stat_trendincrease"]),
sqrt(vcov(model)$cond["urban","urban"]),
sqrt(vcov(model)$cond["urban","urban"]+2*vcov(model)$cond["urban","urban:stat_trendstable"]+vcov(model)$cond["urban:stat_trendstable","urban:stat_trendstable"]),
sqrt(vcov(model)$cond["urban","urban"]+2*vcov(model)$cond["urban","urban:stat_trendincrease"]+vcov(model)$cond["urban:stat_trendincrease","urban:stat_trendincrease"]),
sqrt(vcov(model)$cond["temp","temp"]),
sqrt(vcov(model)$cond["temp","temp"]+2*vcov(model)$cond["temp","temp:stat_trendstable"]+vcov(model)$cond["temp:stat_trendstable","temp:stat_trendstable"]),
sqrt(vcov(model)$cond["temp","temp"]+2*vcov(model)$cond["temp","temp:stat_trendincrease"]+vcov(model)$cond["temp:stat_trendincrease","temp:stat_trendincrease"]),
sqrt(vcov(model)$cond["temp_trend","temp_trend"]),
sqrt(vcov(model)$cond["temp_trend","temp_trend"]+2*vcov(model)$cond["temp_trend","temp_trend:stat_trendstable"]+vcov(model)$cond["temp_trend:stat_trendstable","temp_trend:stat_trendstable"]),
sqrt(vcov(model)$cond["temp_trend","temp_trend"]+2*vcov(model)$cond["temp_trend","temp_trend:stat_trendincrease"]+vcov(model)$cond["temp_trend:stat_trendincrease","temp_trend:stat_trendincrease"]))
est=c(summary(model)$coeff$cond["ratio",1],
summary(model)$coeff$cond["ratio",1]+summary(model)$coeff$cond["ratio:stat_trendstable",1],
summary(model)$coeff$cond["ratio",1]+summary(model)$coeff$cond["ratio:stat_trendincrease",1],
summary(model)$coeff$cond["urban",1],
summary(model)$coeff$cond["urban",1]+summary(model)$coeff$cond["urban:stat_trendstable",1],
summary(model)$coeff$cond["urban",1]+summary(model)$coeff$cond["urban:stat_trendincrease",1],
summary(model)$coeff$cond["temp",1],
summary(model)$coeff$cond["temp",1]+summary(model)$coeff$cond["temp:stat_trendstable",1],
summary(model)$coeff$cond["temp",1]+summary(model)$coeff$cond["temp:stat_trendincrease",1],
summary(model)$coeff$cond["temp_trend",1],
summary(model)$coeff$cond["temp_trend",1]+summary(model)$coeff$cond["temp_trend:stat_trendstable",1],
summary(model)$coeff$cond["temp_trend",1]+summary(model)$coeff$cond["temp_trend:stat_trendincrease",1])
dat1=data.frame(est=est,sde=sde,group=rep(c("decline","stable","increase"),4),
varia=rep(c("Agriculture intensification","Urbanization","Inter-annual temp. changes","Temperature trend"),each=3),model="lmer")
dat1$cate="occupancy"
dat1$lwr=dat1$est-1.96*dat1$sde
dat1$upr=dat1$est+1.96*dat1$sde
dat1$signi=">0.05"
dat1$signi[which(dat1$upr<0)]="<0.05"
dat1$signi[which(dat1$lwr>0)]="<0.05"
rm(model)
bidonb$Annee2=numFactor(bidonb$Annee-1950)
model=glmmTMB(pheno_derivs~(ratio+temp+temp_trend+urban)*stat_year+ou(Annee2+0|species),data=bidonb,weights=1/pheno_derivs_er,
control=glmmTMBControl(optCtrl = list(iter.max=100000000, eval.max=100000000)))
sde=c(sqrt(vcov(model)$cond["ratio","ratio"]),
sqrt(vcov(model)$cond["ratio","ratio"]+2*vcov(model)$cond["ratio","ratio:stat_yearunaffected"]+vcov(model)$cond["ratio:stat_yearunaffected","ratio:stat_yearunaffected"]),
sqrt(vcov(model)$cond["ratio","ratio"]+2*vcov(model)$cond["ratio","ratio:stat_yeardelay"]+vcov(model)$cond["ratio:stat_yeardelay","ratio:stat_yeardelay"]),
sqrt(vcov(model)$cond["urban","urban"]),
sqrt(vcov(model)$cond["urban","urban"]+2*vcov(model)$cond["urban","urban:stat_yearunaffected"]+vcov(model)$cond["urban:stat_yearunaffected","urban:stat_yearunaffected"]),
sqrt(vcov(model)$cond["urban","urban"]+2*vcov(model)$cond["urban","urban:stat_yeardelay"]+vcov(model)$cond["urban:stat_yeardelay","urban:stat_yeardelay"]),
sqrt(vcov(model)$cond["temp","temp"]),
sqrt(vcov(model)$cond["temp","temp"]+2*vcov(model)$cond["temp","temp:stat_yearunaffected"]+vcov(model)$cond["temp:stat_yearunaffected","temp:stat_yearunaffected"]),
sqrt(vcov(model)$cond["temp","temp"]+2*vcov(model)$cond["temp","temp:stat_yeardelay"]+vcov(model)$cond["temp:stat_yeardelay","temp:stat_yeardelay"]),
sqrt(vcov(model)$cond["temp_trend","temp_trend"]),
sqrt(vcov(model)$cond["temp_trend","temp_trend"]+2*vcov(model)$cond["temp_trend","temp_trend:stat_yearunaffected"]+vcov(model)$cond["temp_trend:stat_yearunaffected","temp_trend:stat_yearunaffected"]),
sqrt(vcov(model)$cond["temp_trend","temp_trend"]+2*vcov(model)$cond["temp_trend","temp_trend:stat_yeardelay"]+vcov(model)$cond["temp_trend:stat_yeardelay","temp_trend:stat_yeardelay"]))
est=c(summary(model)$coeff$cond["ratio",1],
summary(model)$coeff$cond["ratio",1]+summary(model)$coeff$cond["ratio:stat_yearunaffected",1],
summary(model)$coeff$cond["ratio",1]+summary(model)$coeff$cond["ratio:stat_yeardelay",1],
summary(model)$coeff$cond["urban",1],
summary(model)$coeff$cond["urban",1]+summary(model)$coeff$cond["urban:stat_yearunaffected",1],
summary(model)$coeff$cond["urban",1]+summary(model)$coeff$cond["urban:stat_yeardelay",1],
summary(model)$coeff$cond["temp",1],
summary(model)$coeff$cond["temp",1]+summary(model)$coeff$cond["temp:stat_yearunaffected",1],
summary(model)$coeff$cond["temp",1]+summary(model)$coeff$cond["temp:stat_yeardelay",1],
summary(model)$coeff$cond["temp_trend",1],
summary(model)$coeff$cond["temp_trend",1]+summary(model)$coeff$cond["temp_trend:stat_yearunaffected",1],
summary(model)$coeff$cond["temp_trend",1]+summary(model)$coeff$cond["temp_trend:stat_yeardelay",1])
dat2=data.frame(est=est,sde=sde,group=rep(c("advance","unaffected","delay"),4),
varia=rep(c("Agriculture intensification","Urbanization","Inter-annual temp. changes","Temperature trend"),each=3),model="lmer")
dat2$cate="phenology"
dat2$lwr=dat2$est-1.96*dat2$sde
dat2$upr=dat2$est+1.96*dat2$sde
dat2$signi=">0.05"
dat2$signi[which(dat2$upr<0)]="<0.05"
dat2$signi[which(dat2$lwr>0)]="<0.05"
b=rbind(dat1,dat2)
b$nderivs_pheno=nrow(bidonb)
b$nderivs_occ=nrow(bidono)
b$spani=j
if(j==0.1){bf=b}else{bf=rbind(bf,b)}
}
bf$varia=factor(bf$varia,c("Inter-annual temp. changes","Temperature trend","Urbanization","Agriculture intensification"))
bf$moy=bf$est
ponds1=unique(bidono[,c("species","ndelta2","stat_trend"),]) %>%
dplyr::group_by(stat_trend) %>% dplyr::summarise(n=length(species))
ponds1$cate="occupancy"
names(ponds1)[1]="group"
ponds2=unique(bidonb[,c("species","ndelta","stat_year"),]) %>%
dplyr::group_by(stat_year) %>% dplyr::summarise(n=length(species))
ponds2$cate="phenology"
names(ponds2)[1]="group"
bf=merge(bf,rbind(ponds1,ponds2),by=c("group","cate"))
bf$stat=factor(bf$group,c("decline","advance","stable","unaffected","increase","delay"))
bf=bf[order(bf$stat),]
bf$stat2=bf$stat
bf$stat=paste0(bf$stat," (n=",bf$n,")")
bf$stat=factor(bf$stat,unique(bf$stat))
bf$signi=factor(bf$signi,c(">0.05","<0.05"))
bf$star="ns"
bf$star[which(bf$pvalinter<0.05)]="*"
bf$star[which(bf$pvalinter<0.01)]="**"
bf$star[which(bf$pvalinter<0.001)]="***"
labo=unique(bf[,c("cate","varia","star")])
labo=labo[order(labo$varia),]
pl1=ggplot(data=subset(bf,cate=="occupancy" & varia!="inter"),aes(x=as.factor(spani),y=moy,col=stat,shape=signi))+
geom_hline(yintercept=0,size=1.2)+
scale_shape_manual(values=c(19,21),guide=F)+scale_shape_manual(values=c(21,19),guide=F,na.value=15,drop=F)+
geom_pointrange(aes(ymin=lwr,ymax=upr),position=position_dodge(width = 0.50),fill="white")+
theme_bw()+ylab("Standardised effects")+
theme(panel.grid.minor=element_blank(),plot.title=element_text(size=14,face="bold"),
legend.title = element_blank(),axis.title.x=element_blank(),
strip.background = element_blank(),legend.position="bottom")+
scale_color_discrete()+ggtitle("a")+xlab("Maximum time-lag allowed (in years)")+
scale_colour_manual(values=c("darkorchid4","dodgerblue3",
"azure4"))+facet_wrap(~varia,nrow=1)
pl2=ggplot(data=subset(bf,cate=="phenology" & varia!="inter"),aes(x=as.factor(spani),y=moy,col=stat,shape=signi))+
geom_hline(yintercept=0,size=1.2)+
geom_pointrange(aes(ymin=lwr,ymax=upr),position=position_dodge(width = 0.50),fill="white")+
scale_shape_manual(values=c(19,21),guide=F)+scale_shape_manual(values=c(21,19),guide=F,na.value=15,drop=F)+
theme_bw()+ylab("Standardised effects \n")+
theme(panel.grid.minor=element_blank(),plot.title=element_text(size=14,face="bold"),legend.position="bottom",axis.title.x=element_blank(),
strip.background = element_blank(),legend.title = element_blank())+
scale_color_discrete()+ggtitle("b")+xlab("Maximum time-lag allowed (in years)")+
scale_colour_manual(values=c("firebrick4","orange","lemonchiffon3"))+
facet_wrap(~varia,nrow=1)
gridExtra::grid.arrange(pl1,pl2,bottom="Smoothing parameter value",nrow=2)
png(paste0("fig_s5.png"),width=1200,height=1000,res=140)
gridExtra::grid.arrange(pl1,pl2,bottom="Smoothing parameter value",nrow=2)
dev.off();
|
#arrays
# 2 com, each comp hae 3 dept, each dept has 4 salesmen
?length
company=c("c1","c2")
dept=c("d1","d2","d3")
salesman=c("s1","s2","s3","s4")
company
dept
salesman
set.seed(1234) #keep amount constant
sales=ceiling(runif(2*3*4,50,100)) #assign random sales values between 50 and 100
sales
cat(sales)
mean(sales)
set.seed(1234)
sales=ceiling(runif(2*3*4,50,100))
sales
?array
salesarray=array(sales,c(4,3,2),dimnames=list(salesman,dept,company))
salesarray
dimnames(salesarray)[3]
salesarray[,2,]
apply(salesarray,c(1,2,3),length)
apply(salesarray,c(2,3),sum)
| /Data structures/arrays.R | no_license | Shubham-Pujan/Practicing_R | R | false | false | 567 | r | #arrays
# 2 com, each comp hae 3 dept, each dept has 4 salesmen
?length
company=c("c1","c2")
dept=c("d1","d2","d3")
salesman=c("s1","s2","s3","s4")
company
dept
salesman
set.seed(1234) #keep amount constant
sales=ceiling(runif(2*3*4,50,100)) #assign random sales values between 50 and 100
sales
cat(sales)
mean(sales)
set.seed(1234)
sales=ceiling(runif(2*3*4,50,100))
sales
?array
salesarray=array(sales,c(4,3,2),dimnames=list(salesman,dept,company))
salesarray
dimnames(salesarray)[3]
salesarray[,2,]
apply(salesarray,c(1,2,3),length)
apply(salesarray,c(2,3),sum)
|
library(shiny)
library(plotly)
#install.packages("shinythemes")
library(shinythemes)
navbarPage(theme = shinytheme("darkly"), "Crime Report",
tabPanel("About",
fluidRow(
column(5,includeMarkdown("report.md")
)
# column(7, img(class="img-polaroid",
# src=("https://www.brennancenter.org/sites/default/files/styles/individual_node_page/public/blog/crime%20cuffs.jpg?itok=WP0o5xht")
# )
# )
)
),
tabPanel("Search Share of Each Race",
sidebarLayout(
sidebarPanel(
selectInput("Type",
label = "Choose a Crime Type:",
choices = c(crimeType),
selected = "Robbery"
),
hr(),
radioButtons("Age",
label = "Choose an Age Range: ",
choices = c("Total arrests" = "total",
"Under 18" = "under",
"Above 18" = "over"
)
),
helpText(hr("Note: click on a race type on the legend bar to exclude it from the graph"))
),
mainPanel(
tabsetPanel(type = "tabs",
tabPanel("Pie Chart", plotlyOutput("pie"),textOutput("pieAnalysis1"),
textOutput("pieAnalysis2"),textOutput("pieAnalysis3"),textOutput("pieAnalysis4")),
tabPanel("Table", tableOutput("pieTable"))
)
)
)
),
tabPanel("Search Top Crime",
sidebarLayout(
sidebarPanel(
selectInput("Race",
label = "Choose a Race:",
choices = c(race),
selected = "White"
),
hr(),
sliderInput("Num", label = "Choose a number of crime you want ",
min = 1, max = 30, value = 5
),
helpText("View the top number of crimes criminals in this race were arrested for.")
),
mainPanel(
tabsetPanel(type = "tabs",
tabPanel("Bubble Chart", plotlyOutput("bubble"), textOutput("text1"),
textOutput("text2"),textOutput("text3")),
tabPanel("Table", tableOutput("bubbleTable"))
)
)
)
)
)
| /ui.R | no_license | chl0908/Final-Project | R | false | false | 3,540 | r | library(shiny)
library(plotly)
#install.packages("shinythemes")
library(shinythemes)
navbarPage(theme = shinytheme("darkly"), "Crime Report",
tabPanel("About",
fluidRow(
column(5,includeMarkdown("report.md")
)
# column(7, img(class="img-polaroid",
# src=("https://www.brennancenter.org/sites/default/files/styles/individual_node_page/public/blog/crime%20cuffs.jpg?itok=WP0o5xht")
# )
# )
)
),
tabPanel("Search Share of Each Race",
sidebarLayout(
sidebarPanel(
selectInput("Type",
label = "Choose a Crime Type:",
choices = c(crimeType),
selected = "Robbery"
),
hr(),
radioButtons("Age",
label = "Choose an Age Range: ",
choices = c("Total arrests" = "total",
"Under 18" = "under",
"Above 18" = "over"
)
),
helpText(hr("Note: click on a race type on the legend bar to exclude it from the graph"))
),
mainPanel(
tabsetPanel(type = "tabs",
tabPanel("Pie Chart", plotlyOutput("pie"),textOutput("pieAnalysis1"),
textOutput("pieAnalysis2"),textOutput("pieAnalysis3"),textOutput("pieAnalysis4")),
tabPanel("Table", tableOutput("pieTable"))
)
)
)
),
tabPanel("Search Top Crime",
sidebarLayout(
sidebarPanel(
selectInput("Race",
label = "Choose a Race:",
choices = c(race),
selected = "White"
),
hr(),
sliderInput("Num", label = "Choose a number of crime you want ",
min = 1, max = 30, value = 5
),
helpText("View the top number of crimes criminals in this race were arrested for.")
),
mainPanel(
tabsetPanel(type = "tabs",
tabPanel("Bubble Chart", plotlyOutput("bubble"), textOutput("text1"),
textOutput("text2"),textOutput("text3")),
tabPanel("Table", tableOutput("bubbleTable"))
)
)
)
)
)
|
operations <- c("Event Statistic Evaluation", "Object Statistic Evaluation")
events <- c("Fixation", "Saccade", "Glissade", "Smooth Pursuit", "Artifact", "Gap")
factor_types <- c("numeric", "integer", "factor", "ordFactor")
factor_owners <- c("Experiment", "Subject", "Trial", "Stimulus", "Event Group", "Data Record")
applications <- c("EyesData", "EventData", "AOISequence", "AOIMatrix", "AOIVector")
# Sub Function structure and examples:
## Examples for data smoothing
movAvgFiltering <- new(Class = "SubFunction",
fun = movAvgFilt, # a function to evaluate
name = "Running Average Filtering", # name of a function
settings = list(fl = 3), # default settings to apply evaluating a function
description = "Smooth Trajectory using Running Average Filter", # description of a function
type = list(operation = "Trajectory Smoothing" # type of operation: one of c("Trajectory Smoothing", "Event Detection", "Event Statistic Evaluation", "Object Statistic Evaluation")
)
)
medFiltering <- new(Class = "SubFunction",
fun = medianFilt, # a function to evaluate
name = "Median Filtering", # name of a function
settings = list(fl = 3), # default settings to apply evaluating a function
description = "Smooth Trajectory using Median Filter", # description of a function
type = list(operation = "Trajectory Smoothing" # type of operation: one of c("Trajectory Smoothing", "Event Detection", "Event Statistic Evaluation", "Object Statistic Evaluation")
)
)
savGolFiltering <- new(Class = "SubFunction",
fun = savGolFiltering, # a function to evaluate
name = "Savitzky-Golay Filtering", # name of a function
settings = list(fl = 3, forder = 2, dorder = 1), # default settings to apply evaluating a function
description = "Smooth Trajectory using Savitzky-Golay Filter", # description of a function
type = list(operation = "Trajectory Smoothing" # type of operation: one of c("Trajectory Smoothing", "Event Detection", "Event Statistic Evaluation", "Object Statistic Evaluation")
)
)
## Examples for event detection
IVTDetection <- new(Class = "SubFunction",
fun = IVT, # a function to evaluate
name = "IVT Event Detector", # name of a function
settings = list(postProcess = F,
VT = 30,
angular = T,
screenDist = 100,
screenDim = c(1280, 1024),
screenSize = c(33.7, 27),
MaxTBetFix = 0.075,
MaxDistBetFix = 0.5,
minFixLen = 0.05,
maxGapLen = 0.07,
maxVel = 1000,
maxAccel = 1000000,
classifyGaps = F), # default settings to apply evaluating a function
description = "Events Detection by Velocity Threshold Algorithm", # description of a function
type = list(operation = "Event Detection" # type of operation: one of c("Trajectory Smoothing", "Event Detection", "Event Statistic Evaluation", "Object Statistic Evaluation")
)
)
## Examples for event parameters evaluation
valCode <- new(Class = "SubFunction",
fun = getValCode, # a function to evaluate
name = "Validity Code", # name of a function
settings = list(), # settings to apply evaluating a function
description = "Get validity code of event", # description of a function
type = list(operation = "Event Statistic Evaluation", # type of operation: one of c("Trajectory Smoothing", "Event Detection", "Event Statistic Evaluation", "Object Statistic Evaluation")
events = c("Fixation", "Saccade", "Glissade", "Smooth Pursuit"), # to which event groups the fun should be applied
output = c(new(Class = "Factor",
varName = "valCode", # name of resulting statistic
description = "Validity code of event", # description of resulting statistic
type = "factor", # type of resulting statistic: one of c("numeric", "integer", "factor", "ordFactor")
levels = c("Invalid", "Valid"), # levels of resulting factor/ordFactor statistic
owner = "Event Group"
)
)
)
)
onOffsetDuration <- new(Class = "SubFunction",
fun = getOnOffSetDuration, # a function to evaluate
name = "On, OffSet and Duration", # name of a function
settings = list(), # settings to apply evaluating a function
description = "Get onset, offset and duration of event", # description of a function
type = list(operation = "Event Statistic Evaluation", # type of operation: one of c("Trajectory Smoothing", "Event Detection", "Event Statistic Evaluation", "Object Statistic Evaluation")
events = c("Fixation", "Saccade", "Glissade", "Smooth Pursuit"), # to which event groups the fun should be applied
output = c(new(Class = "Factor",
varName = "Onset", # name of resulting statistic
description = "Onset of event", # description of resulting statistic
type = "numeric", # type of resulting statistic: one of c("numeric", "integer", "factor", "ordFactor")
levels = NA, # levels of resulting factor/ordFactor statistic
owner = "Event Group"
),
new(Class = "Factor",
varName = "Offset", # name of resulting statistic
description = "Offset of event", # description of resulting statistic
type = "numeric", # type of resulting statistic: one of c("numeric", "integer", "factor", "ordFactor")
levels = NA, # levels of resulting factor/ordFactor statistic
owner = "Event Group"
),
new(Class = "Factor",
varName = "Duration", # name of resulting statistic
description = "Duration of event", # description of resulting statistic
type = "numeric", # type of resulting statistic: one of c("numeric", "integer", "factor", "ordFactor")
levels = NA, # levels of resulting factor/ordFactor statistic
owner = "Event Group"
)
)
)
)
## Example for EyesData object statistic evaluation
trajDuration <- new(Class = "SubFunction",
fun = trajDurationEstimator, # a function to evaluate
name = "Trajectory Duration", # name of a function
settings = list(), # settings to apply evaluating a function
description = "Get duration of a gaze trajectory", # description of a function
type = list(operation = "Object Statistic Evaluation", # type of operation: one of c("Trajectory Smoothing", "Event Detection", "Event Statistic Evaluation", "Object Statistic Evaluation")
# events = c("Fixation", "Saccade", "Glissade", "Smooth Pursuit"), # to which event groups the fun should be applied
output = c(new(Class = "Factor",
varName = "trajDuration", # name of resulting statistic
description = "Trajectory Duration", # description of resulting statistic
type = "numeric", # type of resulting statistic: one of c("numeric", "integer", "factor", "ordFactor")
levels = NA, # levels of resulting factor/ordFactor statistic
owner = "Data Record"
)
),
applyTo = c("EyesData") # to which object a function should be applied to:
)
)
| /SubFunctionsExamples.R | no_license | deslion/EyeTrackingProject | R | false | false | 9,424 | r | operations <- c("Event Statistic Evaluation", "Object Statistic Evaluation")
events <- c("Fixation", "Saccade", "Glissade", "Smooth Pursuit", "Artifact", "Gap")
factor_types <- c("numeric", "integer", "factor", "ordFactor")
factor_owners <- c("Experiment", "Subject", "Trial", "Stimulus", "Event Group", "Data Record")
applications <- c("EyesData", "EventData", "AOISequence", "AOIMatrix", "AOIVector")
# Sub Function structure and examples:
## Examples for data smoothing
movAvgFiltering <- new(Class = "SubFunction",
fun = movAvgFilt, # a function to evaluate
name = "Running Average Filtering", # name of a function
settings = list(fl = 3), # default settings to apply evaluating a function
description = "Smooth Trajectory using Running Average Filter", # description of a function
type = list(operation = "Trajectory Smoothing" # type of operation: one of c("Trajectory Smoothing", "Event Detection", "Event Statistic Evaluation", "Object Statistic Evaluation")
)
)
medFiltering <- new(Class = "SubFunction",
fun = medianFilt, # a function to evaluate
name = "Median Filtering", # name of a function
settings = list(fl = 3), # default settings to apply evaluating a function
description = "Smooth Trajectory using Median Filter", # description of a function
type = list(operation = "Trajectory Smoothing" # type of operation: one of c("Trajectory Smoothing", "Event Detection", "Event Statistic Evaluation", "Object Statistic Evaluation")
)
)
savGolFiltering <- new(Class = "SubFunction",
fun = savGolFiltering, # a function to evaluate
name = "Savitzky-Golay Filtering", # name of a function
settings = list(fl = 3, forder = 2, dorder = 1), # default settings to apply evaluating a function
description = "Smooth Trajectory using Savitzky-Golay Filter", # description of a function
type = list(operation = "Trajectory Smoothing" # type of operation: one of c("Trajectory Smoothing", "Event Detection", "Event Statistic Evaluation", "Object Statistic Evaluation")
)
)
## Examples for event detection
IVTDetection <- new(Class = "SubFunction",
fun = IVT, # a function to evaluate
name = "IVT Event Detector", # name of a function
settings = list(postProcess = F,
VT = 30,
angular = T,
screenDist = 100,
screenDim = c(1280, 1024),
screenSize = c(33.7, 27),
MaxTBetFix = 0.075,
MaxDistBetFix = 0.5,
minFixLen = 0.05,
maxGapLen = 0.07,
maxVel = 1000,
maxAccel = 1000000,
classifyGaps = F), # default settings to apply evaluating a function
description = "Events Detection by Velocity Threshold Algorithm", # description of a function
type = list(operation = "Event Detection" # type of operation: one of c("Trajectory Smoothing", "Event Detection", "Event Statistic Evaluation", "Object Statistic Evaluation")
)
)
## Examples for event parameters evaluation
valCode <- new(Class = "SubFunction",
fun = getValCode, # a function to evaluate
name = "Validity Code", # name of a function
settings = list(), # settings to apply evaluating a function
description = "Get validity code of event", # description of a function
type = list(operation = "Event Statistic Evaluation", # type of operation: one of c("Trajectory Smoothing", "Event Detection", "Event Statistic Evaluation", "Object Statistic Evaluation")
events = c("Fixation", "Saccade", "Glissade", "Smooth Pursuit"), # to which event groups the fun should be applied
output = c(new(Class = "Factor",
varName = "valCode", # name of resulting statistic
description = "Validity code of event", # description of resulting statistic
type = "factor", # type of resulting statistic: one of c("numeric", "integer", "factor", "ordFactor")
levels = c("Invalid", "Valid"), # levels of resulting factor/ordFactor statistic
owner = "Event Group"
)
)
)
)
onOffsetDuration <- new(Class = "SubFunction",
fun = getOnOffSetDuration, # a function to evaluate
name = "On, OffSet and Duration", # name of a function
settings = list(), # settings to apply evaluating a function
description = "Get onset, offset and duration of event", # description of a function
type = list(operation = "Event Statistic Evaluation", # type of operation: one of c("Trajectory Smoothing", "Event Detection", "Event Statistic Evaluation", "Object Statistic Evaluation")
events = c("Fixation", "Saccade", "Glissade", "Smooth Pursuit"), # to which event groups the fun should be applied
output = c(new(Class = "Factor",
varName = "Onset", # name of resulting statistic
description = "Onset of event", # description of resulting statistic
type = "numeric", # type of resulting statistic: one of c("numeric", "integer", "factor", "ordFactor")
levels = NA, # levels of resulting factor/ordFactor statistic
owner = "Event Group"
),
new(Class = "Factor",
varName = "Offset", # name of resulting statistic
description = "Offset of event", # description of resulting statistic
type = "numeric", # type of resulting statistic: one of c("numeric", "integer", "factor", "ordFactor")
levels = NA, # levels of resulting factor/ordFactor statistic
owner = "Event Group"
),
new(Class = "Factor",
varName = "Duration", # name of resulting statistic
description = "Duration of event", # description of resulting statistic
type = "numeric", # type of resulting statistic: one of c("numeric", "integer", "factor", "ordFactor")
levels = NA, # levels of resulting factor/ordFactor statistic
owner = "Event Group"
)
)
)
)
## Example for EyesData object statistic evaluation
trajDuration <- new(Class = "SubFunction",
fun = trajDurationEstimator, # a function to evaluate
name = "Trajectory Duration", # name of a function
settings = list(), # settings to apply evaluating a function
description = "Get duration of a gaze trajectory", # description of a function
type = list(operation = "Object Statistic Evaluation", # type of operation: one of c("Trajectory Smoothing", "Event Detection", "Event Statistic Evaluation", "Object Statistic Evaluation")
# events = c("Fixation", "Saccade", "Glissade", "Smooth Pursuit"), # to which event groups the fun should be applied
output = c(new(Class = "Factor",
varName = "trajDuration", # name of resulting statistic
description = "Trajectory Duration", # description of resulting statistic
type = "numeric", # type of resulting statistic: one of c("numeric", "integer", "factor", "ordFactor")
levels = NA, # levels of resulting factor/ordFactor statistic
owner = "Data Record"
)
),
applyTo = c("EyesData") # to which object a function should be applied to:
)
)
|
fullData <- read.csv("~/Programming/Coursera/Exploratory Data Analysis/household_power_consumption.txt", sep = ";")
sData <- fullData[fullData$Date == "1/2/2007" | fullData$Date == "2/2/2007",]
png(filename = "plot1.png")
hist(as.numeric(as.character(sData$Global_active_power)), breaks = 17, main = "Global Active Power", xlab = "Global Active Power (kilowatts)", col = "red")
dev.off() | /plot1.R | no_license | trident01/ExData_Plotting1 | R | false | false | 389 | r | fullData <- read.csv("~/Programming/Coursera/Exploratory Data Analysis/household_power_consumption.txt", sep = ";")
sData <- fullData[fullData$Date == "1/2/2007" | fullData$Date == "2/2/2007",]
png(filename = "plot1.png")
hist(as.numeric(as.character(sData$Global_active_power)), breaks = 17, main = "Global Active Power", xlab = "Global Active Power (kilowatts)", col = "red")
dev.off() |
#librerías utiliadas (puede que necesiten instalar una o más de estas librerías,
# en ese caso, utilicen install.packages)
library(caTools)
library(rpart)
library(rpart.plot)
library(ROCR)
library(dplyr)
#cargue el archivo a una variable que se llame bcw usando la función read.csv
#usen el parámetro col.names para pasarle un vector con los nombres de las
#columnas. nombres para las columnas: Sample.number, Thickness, Uniformity.Size,
#Uniformity.Shape, Adhesion, Epithelial.Size, Nuclei, Chromatin, Nucleoli,
#Mitoses, Class
#ejemplo: col.names = c('nombre1', 'nombre2')
#usen el parámetro na.strings = '?' para que interprete los signos de pregunta
#como valores faltantes
bcw <- read.csv('datos/bcw.csv', header = F, col.names = c(
'Sample.number', 'Thickness', 'Uniformity.Size', 'Uniformity.Shape',
'Adhesion', 'Epithelial.Size', 'Nuclei', 'Chromatin', 'Nucleoli', 'Mitoses',
'Class'), na.strings = '?')
#sobreescriban la columna Class con el factor de esa columna
bcw$Class <- factor(bcw$Class)
# Renmbrar la columna diagnosis por Class
#bcw <- bcw %>%
# rename(Class = diagnosis)
#Utilice la función str() para ver la estructura del conjunto de datos:
str(bcw)
glimpse(bcw)
# utilicen la función table() para generar un resumen de las observaciones en
# bcw por la variable clase. Deberían ver 458 valores con clase = 2 y 241 con
# clase = 4
table(bcw$Class)
# utilicen la tabla generada en el paso anterior para generar un gráfico de
# barras usando la función barplot(). Recuerden incluir los parámetros main,
# xlab y ylab para agregar el título y las etiquetas.
barplot(table(bcw$Class), main = 'Distribución de las clases',
ylab = 'Observaciones', xlab = 'Clase')
#usen la función set.seed para establecer la semilla con el valor 4161
set.seed(4161)
# las siguientes líneas de código van a crear un vector de valores lógicos este
# vector lo vamos a utilizar para dividir nuestro conjunto de datos original en
# dos: uno de entrenamiento para nuestro modelo y uno de prueba. la división se
# va a hacer con respecto a la columna Class, y vamos a dejar 70% de las
# observaciones en el de entrenamiento y 30% en el de prueba.
#paso 1: crear el vector lógico
splt <- sample.split(bcw$Class , SplitRatio = 0.7)
# paso 2: crear el data frame de entrenamiento usando los valores TRUE del
# vector splt solo las observaciones para las cuales el vector splt es
# verdadero, y todas las columnas.
bcw.entrenamiento <- bcw[splt,]
# paso 3: crear el data frame de prueba negando los valores de splt, para usar
# las observaciones que en el paso anterior eran falsas
bcw.prueba <- bcw[!splt,]
# Utilicen la función nrow() para demostrar que en total seguimos trabajando con
# 699 registros aunque ahora tengamos 2 datasets.
nrow(bcw.entrenamiento) + nrow(bcw.prueba)
table(bcw.entrenamiento$Class)
table(bcw.prueba$Class)
# Creen dos gráficos de barra usando barplot(), uno sobre bcw.entrenamiento y
# otro bcw.prueba para demostrar que se mantiene (o es similar) la proporción de
# clase = 2 y clase = 4 en los 2 datasets.
barplot(table(bcw.entrenamiento$Class),
main = 'Distribución de las clases en bcw.entrenamiento',
ylab = 'Observaciones', xlab = 'Clase')
barplot(table(bcw.prueba$Class),
main = 'Distribución de las clases en bcw.prueba',
ylab = 'Observaciones', xlab = 'Clase')
## Modelo
# crear el modelo (esto lo veremos en detalle luego, pero debería haber algunas
# partes de la sintaxis que ya entiendan)
modelo.arbol <- rpart(Class ~ .,
data = bcw.entrenamiento[,-which(colnames(bcw.entrenamiento) == "Sample.number")],
method = 'class')
# predecir utilizando el conjunto de datos de prueba
predicciones <- predict(modelo.arbol, newdata = bcw.prueba, type = 'prob')
predicciones
rpart.plot(modelo.arbol,
shadow.col = "gray", #Agregar sombras
main = "Clasificación cáncer de mama \n(Arbol de decisión)\n")
## Evaluacion
# Utilicen la función table() para comparar el resultado de las predicciones con
# el valor de la columna Class en el conjunto de datos de prueba
# ejemplo: table(vector1, vector2)
# el resultado les va a decir cuántas observaciones eran realmente 2 y fueron
# clasificadas como 2, y cuántas eran 4 y fueron clasificadas como 4
# también les va a decir cuántas eran 2 y fueron clasificadas como 4, y cuáles
# eran 4 y fueron clasificadas como 2
predicciones <- predict(modelo.arbol, newdata = bcw.prueba, type = 'class')
data <- table(bcw.prueba$Class, predicciones)
# Las filas son los reales y las columnas son los predecidos.
print(data)
## Prediccion ROC
prediccionesROC = prediction(c(predicciones), c(bcw.prueba[,'Class']))
as.numeric(performance(prediccionesROC, "auc")@y.values)
plot(performance(prediccionesROC, "tpr", "fpr"),
colorize = T,
print.cutoffs.at = seq(0,1,by = 0.1),
text.adj = c(-0.2,1.7),
main = 'Curva ROC del modelo')
| /Clase5/arbol_de_decision.R | no_license | zamorraf/clases | R | false | false | 4,986 | r | #librerías utiliadas (puede que necesiten instalar una o más de estas librerías,
# en ese caso, utilicen install.packages)
library(caTools)
library(rpart)
library(rpart.plot)
library(ROCR)
library(dplyr)
#cargue el archivo a una variable que se llame bcw usando la función read.csv
#usen el parámetro col.names para pasarle un vector con los nombres de las
#columnas. nombres para las columnas: Sample.number, Thickness, Uniformity.Size,
#Uniformity.Shape, Adhesion, Epithelial.Size, Nuclei, Chromatin, Nucleoli,
#Mitoses, Class
#ejemplo: col.names = c('nombre1', 'nombre2')
#usen el parámetro na.strings = '?' para que interprete los signos de pregunta
#como valores faltantes
bcw <- read.csv('datos/bcw.csv', header = F, col.names = c(
'Sample.number', 'Thickness', 'Uniformity.Size', 'Uniformity.Shape',
'Adhesion', 'Epithelial.Size', 'Nuclei', 'Chromatin', 'Nucleoli', 'Mitoses',
'Class'), na.strings = '?')
#sobreescriban la columna Class con el factor de esa columna
bcw$Class <- factor(bcw$Class)
# Renmbrar la columna diagnosis por Class
#bcw <- bcw %>%
# rename(Class = diagnosis)
#Utilice la función str() para ver la estructura del conjunto de datos:
str(bcw)
glimpse(bcw)
# utilicen la función table() para generar un resumen de las observaciones en
# bcw por la variable clase. Deberían ver 458 valores con clase = 2 y 241 con
# clase = 4
table(bcw$Class)
# utilicen la tabla generada en el paso anterior para generar un gráfico de
# barras usando la función barplot(). Recuerden incluir los parámetros main,
# xlab y ylab para agregar el título y las etiquetas.
barplot(table(bcw$Class), main = 'Distribución de las clases',
ylab = 'Observaciones', xlab = 'Clase')
#usen la función set.seed para establecer la semilla con el valor 4161
set.seed(4161)
# las siguientes líneas de código van a crear un vector de valores lógicos este
# vector lo vamos a utilizar para dividir nuestro conjunto de datos original en
# dos: uno de entrenamiento para nuestro modelo y uno de prueba. la división se
# va a hacer con respecto a la columna Class, y vamos a dejar 70% de las
# observaciones en el de entrenamiento y 30% en el de prueba.
#paso 1: crear el vector lógico
splt <- sample.split(bcw$Class , SplitRatio = 0.7)
# paso 2: crear el data frame de entrenamiento usando los valores TRUE del
# vector splt solo las observaciones para las cuales el vector splt es
# verdadero, y todas las columnas.
bcw.entrenamiento <- bcw[splt,]
# paso 3: crear el data frame de prueba negando los valores de splt, para usar
# las observaciones que en el paso anterior eran falsas
bcw.prueba <- bcw[!splt,]
# Utilicen la función nrow() para demostrar que en total seguimos trabajando con
# 699 registros aunque ahora tengamos 2 datasets.
nrow(bcw.entrenamiento) + nrow(bcw.prueba)
table(bcw.entrenamiento$Class)
table(bcw.prueba$Class)
# Creen dos gráficos de barra usando barplot(), uno sobre bcw.entrenamiento y
# otro bcw.prueba para demostrar que se mantiene (o es similar) la proporción de
# clase = 2 y clase = 4 en los 2 datasets.
barplot(table(bcw.entrenamiento$Class),
main = 'Distribución de las clases en bcw.entrenamiento',
ylab = 'Observaciones', xlab = 'Clase')
barplot(table(bcw.prueba$Class),
main = 'Distribución de las clases en bcw.prueba',
ylab = 'Observaciones', xlab = 'Clase')
## Modelo
# crear el modelo (esto lo veremos en detalle luego, pero debería haber algunas
# partes de la sintaxis que ya entiendan)
modelo.arbol <- rpart(Class ~ .,
data = bcw.entrenamiento[,-which(colnames(bcw.entrenamiento) == "Sample.number")],
method = 'class')
# predecir utilizando el conjunto de datos de prueba
predicciones <- predict(modelo.arbol, newdata = bcw.prueba, type = 'prob')
predicciones
rpart.plot(modelo.arbol,
shadow.col = "gray", #Agregar sombras
main = "Clasificación cáncer de mama \n(Arbol de decisión)\n")
## Evaluacion
# Utilicen la función table() para comparar el resultado de las predicciones con
# el valor de la columna Class en el conjunto de datos de prueba
# ejemplo: table(vector1, vector2)
# el resultado les va a decir cuántas observaciones eran realmente 2 y fueron
# clasificadas como 2, y cuántas eran 4 y fueron clasificadas como 4
# también les va a decir cuántas eran 2 y fueron clasificadas como 4, y cuáles
# eran 4 y fueron clasificadas como 2
predicciones <- predict(modelo.arbol, newdata = bcw.prueba, type = 'class')
data <- table(bcw.prueba$Class, predicciones)
# Las filas son los reales y las columnas son los predecidos.
print(data)
## Prediccion ROC
prediccionesROC = prediction(c(predicciones), c(bcw.prueba[,'Class']))
as.numeric(performance(prediccionesROC, "auc")@y.values)
plot(performance(prediccionesROC, "tpr", "fpr"),
colorize = T,
print.cutoffs.at = seq(0,1,by = 0.1),
text.adj = c(-0.2,1.7),
main = 'Curva ROC del modelo')
|
#' function to sample from a specified probability density function
#' @param n number of samples desired
#' @param pdf probability density function (pois1, poisson, normal, unif.disc, unif.cont)
#' @param cur.par a vector giving parameters for the specified distribution; only the first is used for single parameter distributions
#' @param RE random effects, if present
#' @return a vector of length n samples from the desired distribution
#' @export
#' @keywords probability density
#' @author Paul B. Conn
switch_sample<-function(n,pdf,cur.par,RE){
switch(pdf,
pois1=rpois(n,cur.par[1])+1,
poisson=rpois(n,cur.par[1]),
pois1_ln=rpois(n,exp(cur.par[1]+cur.par[2]*RE))+1,
poisson_ln=rpois(n,exp(cur.par[1]+cur.par[2]*RE)),
normal=rnorm(n,cur.par[1],cur.par[2]),
unif.disc=sample(cur.par[1]:cur.par[2],n,replace=TRUE),
unif.cont=runif(n,cur.par[1],cur.par[2]),
multinom=sample(c(1:length(cur.par)),n,replace=TRUE,prob=cur.par)
)
}
#' function to sample from hyperpriors of a specified probability density function; note that
#' initial values for sigma of lognormal random effects are fixed to a small value (0.05) to
#' prevent numerical errors
#' @param pdf probability density function (pois1, poisson, normal, unif.disc, unif.cont)
#' @param cur.par a vector giving parameters for the specified distribution; only the first is used for single parameter distributions
#' @return a vector of length n samples from the desired distribution
#' @export
#' @keywords probability density
#' @author Paul B. Conn
switch_sample_prior<-function(pdf,cur.par){
require(mc2d)
switch(pdf,
pois1=rgamma(1,cur.par[1],cur.par[2]),
poisson=rgamma(1,cur.par[1],cur.par[2]),
pois1_ln=c(rnorm(1,cur.par[1],cur.par[2]),0.05),
poisson_ln=c(rnorm(1,cur.par[1],cur.par[2]),0.05),
multinom=rdirichlet(1,cur.par)
)
}
#' function to calculate the joint pdf for a sample of values from one of a number of pdfs
#' @param x values to be evaluated
#' @param pdf probability density function (pois1, poisson, pois1_ln, poisson_ln, normal, multinom)
#' @param cur.par a vector giving parameters for the specified distribution; only the first is used for single parameter distributions
#' @param RE random effects, if present
#' @return total log likelihood of points
#' @export
#' @keywords probability density
#' @author Paul B. Conn
switch_pdf<-function(x,pdf,cur.par,RE){
switch(pdf,
pois1=sum(dpois(x-1,cur.par[1],log=1)),
poisson=sum(dpois(x,cur.par[1],log=1)),
pois1_ln=sum(dpois(x-1,exp(cur.par[1]+cur.par[2]*RE),log=1)),
poisson_ln=sum(dpois(x,exp(cur.par[1]+cur.par[2]*RE),log=1)),
normal=sum(dnorm(x,cur.par[1],cur.par[2],log=1)),
multinom=sum(log(cur.par[x]))
)
}
#' function to stack data (going from three dimensional array to a two dimensional array including only "existing" animals
#' @param Data three-d dataset
#' @param Obs.transect current number of observations of animals in each transect (vector)
#' @param n.transects number of transects
#' @param stacked.names column names for new stacked dataset
#' @param factor.ind a vector of indicator variables (1 = factor/categorical variable, 0 = continuous variable)
#' @return a stacked dataset
#' @export
#' @keywords stack data
#' @author Paul B. Conn
stack_data<-function(Data,Obs.transect,n.transects,stacked.names,factor.ind){
#convert from "sparse" 3-d data augmentation array to a rich 2-d dataframe for updating beta parameters
if(n.transects==1)Stacked=Data
else{
Stacked=as.data.frame(Data[1,1:2,])
for(itrans in 1:n.transects){
if(Obs.transect[itrans]>0)Stacked=rbind(Stacked,Data[itrans,1:Obs.transect[itrans],])
}
Stacked=Stacked[-c(1,2),]
}
colnames(Stacked)=stacked.names #gotta reestablish variable type since 3-d array doesn't hold it
factor.cols=which(factor.ind[stacked.names]==TRUE)
if(length(factor.cols)>0){
for(icol in 1:length(factor.cols)){
Stacked[,factor.cols[icol]]=as.factor(Stacked[,factor.cols[icol]])
}
}
Stacked
}
#' function to stack data for midID updates (going from four dimensional array to a two dimensional array including observed groups
#' @param Data 4-d dataset
#' @param G.obs matrix giving the total numer of groups observed at least once by species and transect
#' @param g.tot.obs total number of observations for animals seen at least once
#' @param n.Observers vector giving number of observers per transect
#' @param n.transects number of transects
#' @param n.species number of species
#' @param stacked.names column names for new stacked dataset
#' @param factor.ind a vector of indicator variables (1 = factor/categorical variable, 0 = continuous variable)
#' @return a stacked dataset (in matrix form)
#' @export
#' @keywords stack data
#' @author Paul B. Conn
stack_data_misID<-function(Data,G.obs,g.tot.obs,n.Observers,n.transects,n.species,stacked.names,factor.ind){
#convert from "sparse" 4-d data augmentation array to a rich 2-d dataframe for updating misID parameters
if(n.transects==1 & n.species==1)Stacked=Data[1,1,,]
else{
G.tot.obs=G.obs
Stacked=matrix(0,g.tot.obs,length(Data[1,1,1,]))
ipl=1
for(isp in 1:n.species){
G.tot.obs[isp,]=G.obs[isp,]*n.Observers
for(itrans in 1:n.transects){
if(G.obs[isp,itrans]>0)Stacked[ipl:(ipl+G.tot.obs[isp,itrans]-1),]=Data[isp,itrans,1:G.tot.obs[isp,itrans],]
ipl=ipl+G.tot.obs[isp,itrans]
}
}
}
Stacked
}
#' function to produce a design matrix given a dataset and user-specified formula object
#' @param Cur.dat current dataset
#' @param stacked.names column names for current dataset
#' @param factor.ind a list of indicator variables (1 = factor/categorical variable, 0 = continuous variable)
#' @param Det.formula a formula object
#' @param Levels A list object giving the number of levels for factor variables
#' @return a design matrix
#' @export
#' @keywords model matrix
#' @author Paul B. Conn
get_mod_matrix<-function(Cur.dat,stacked.names,factor.ind,Det.formula,Levels){
Cur.dat=as.data.frame(Cur.dat)
colnames(Cur.dat)=stacked.names
factor.cols=which(factor.ind[stacked.names]==TRUE)
if(length(factor.cols)>0){
for(icol in 1:length(factor.cols)){
Cur.dat[,factor.cols[icol]]=eval(parse(text=paste('factor(Cur.dat[,factor.cols[icol]],levels=Levels$',names(factor.cols)[icol],')',sep='')))
}
}
DM=model.matrix(Det.formula,data=Cur.dat)
DM
}
#' generate initial values for MCMC chain if not already specified by user
#' @param DM.hab design matrix for habitat model
#' @param DM.det design matrix for detection model
#' @param G.transect a vector of the number of groups of animals in area covered by each transect
#' @param Area.trans a vector giving the proportion of a strata covered by each transect
#' @param Area.hab a vector of the relative areas of each strata
#' @param Mapping a vector mapping each transect to it's associated strata
#' @param point.ind is point independence assumed (TRUE/FALSE)
#' @param spat.ind is spatial independence assumed? (TRUE/FALSE)
#' @param grp.mean pois1 parameter for group size
#' @return a list of initial parameter values
#' @export
#' @keywords initial values, mcmc
#' @author Paul B. Conn
generate_inits<-function(DM.hab,DM.det,G.transect,Area.trans,Area.hab,Mapping,point.ind,spat.ind,grp.mean){
Par=list(det=rnorm(ncol(DM.det),0,1),hab=rep(0,ncol(DM.hab)),cor=ifelse(point.ind,runif(1,0,.8),0),
Nu=log(max(G.transect)/mean(Area.trans)*exp(rnorm(length(Area.hab)))),Eta=rnorm(length(Area.hab)),
tau.eta=runif(1,0.5,2),tau.nu=runif(1,0.5,2))
Par$hab[1]=mean(G.transect)/(mean(Area.trans)*mean(Area.hab))*exp(rnorm(1,0,1))
Par$G=round(exp(Par$Nu)*Area.hab*exp(rnorm(length(Par$Nu))))
Par$N=Par$G+rpois(length(Par$G),grp.mean*Par$G)
if(spat.ind==1)Par$Eta=0*Par$Eta
Par
}
#' generate initial values for misID model if not already specified by user
#' @param DM.hab.pois a list of design matrices for the Poisson habitat model (elements are named sp1,sp2, etc.)
#' @param DM.hab.bern If a hurdle model, a list of design matrices for the Bernoulli habitat model (elements are named sp1,sp2, etc.) (NULL if not hurdle)
#' @param DM.det design matrix for detection model
#' @param N.hab.pois.par vector giving number of parameters in the Poisson habitat model for each species
#' @param N.hab.bern.par vector giving number of parameters in the Bernoulli habitat model for each species (NULL if not hurdle)
#' @param G.transect a matrix of the number of groups of animals in area covered by each transect; each row gives a separate species
#' @param Area.trans a vector giving the proportion of a strata covered by each transect
#' @param Area.hab a vector of the relative areas of each strata
#' @param Mapping a vector mapping each transect to it's associated strata
#' @param point.ind is point independence assumed (TRUE/FALSE)
#' @param spat.ind is spatial independence assumed? (TRUE/FALSE)
#' @param grp.mean a vector giving the pois1 parameter for group size (one entry for each species)
#' @param misID if TRUE, indicates that misidentification is incorporated into modeling
#' @param misID.mat a matrix specifying which elements of the misID matrix are linked to model equations
#' @param N.par.misID a vector giving the number of parameters for each misID model (in multinomial logit space)
#' @return a list of initial parameter values
#' @export
#' @keywords initial values, mcmc
#' @author Paul B. Conn
generate_inits_misID<-function(DM.hab.pois,DM.hab.bern,DM.det,N.hab.pois.par,N.hab.bern.par,G.transect,Area.trans,Area.hab,Mapping,point.ind,spat.ind,grp.mean,misID,misID.mat,N.par.misID){
i.hurdle=1-is.null(DM.hab.bern)
n.species=nrow(G.transect)
n.cells=length(Area.hab)
if(misID){
n.misID.eq=max(misID.mat)
MisID=vector("list",n.misID.eq)
for(itmp in 1:n.misID.eq)MisID[[itmp]]=runif(N.par.misID[itmp],-.5,.5)
diag.mods=diag(misID.mat)
diag.mods=diag.mods[which(diag.mods>0)]
if(length(diag.mods)>0){
for(itmp in 1:length(diag.mods))MisID[[diag.mods[itmp]]][1]=MisID[[diag.mods[itmp]]][1]+2 #ensure that the highest probability is for a non-misID
}
}
hab.pois=matrix(0,n.species,max(N.hab.pois.par))
hab.bern=NULL
tau.eta.bern=NULL
Eta.bern=NULL
if(i.hurdle==1){
hab.bern=matrix(0,n.species,max(N.hab.bern.par))
tau.eta.bern=runif(n.species,0.5,2)
Eta.bern=matrix(rnorm(n.species*n.cells),n.species,n.cells)
}
Nu=matrix(0,n.species,n.cells)
for(isp in 1:n.species){
Nu[isp,]=log(max(G.transect[isp,])/mean(Area.trans)*exp(rnorm(length(Area.hab),0,0.1)))
}
Par=list(det=rnorm(ncol(DM.det),0,1),hab.pois=hab.pois,hab.bern=hab.bern,cor=ifelse(point.ind,runif(1,0,.8),0),
Nu=Nu,Eta.pois=matrix(rnorm(n.species*n.cells),n.species,n.cells),Eta.bern=Eta.bern,
tau.eta.pois=runif(n.species,0.5,2),tau.eta.bern=tau.eta.bern,tau.nu=runif(n.species,0.5,2),MisID=MisID)
Par$hab.pois[,1]=log(apply(G.transect,1,'mean')/(mean(Area.trans)*mean(Area.hab))*exp(rnorm(n.species,0,1)))
Par$G=round(exp(Par$Nu)*Area.hab*exp(rnorm(length(Par$Nu))))
for(isp in 1:n.species)Par$N[isp,]=Par$G[isp,]+rpois(n.cells,grp.mean[isp]*Par$G[isp,])
if(spat.ind==1){
Par$Eta.bern=0*Par$Eta.bern
Par$Eta.pois=0*Par$Eta.pois
}
Par
}
#' Fill confusion array - one confusion matrix for each individual (DEPRECATED)
#' @param Confuse An 3-dimensional array, with dimensions (# of individuals, # of rows in misID.mat, # of cols of misID.mat)
#' @param Cov Data frame including all covariates for the misclassification model (individuals are on rows)
#' @param Beta A list where each entry is a vector giving the parameters of the misID model
#' @param n.indiv Integer giving the number of individuals
#' @param misID.mat With true state on rows and assigned state on column, each positive entry provides an index to misID.models (i.e. what model to assume on multinomial logit space); a 0 indicates an impossible assigment; a negative number designates which column is to be obtained via subtraction
#' @param misID.formulas A formula vector providing linear model-type formulas for each positive value of misID.mat. If the same model is used in multiple columns it is assumed that all fixed effects (except the intercept) are shared
#' @param symm if TRUE, symmetric classification probabilities are applied (e.g. pi^12=pi^21)
#' @return A filled version of Confuse
#' @export
#' @author Paul B. Conn
get_confusion_array<-function(Confuse,Cov=NULL,Beta,n.indiv,misID.mat,misID.formulas,symm=TRUE){
if(is.null(Cov)==1)Cov=data.frame(matrix(1,n.indiv,1))
DM=vector("list",max(misID.mat))
Pi=DM
ind.mat=matrix(c(1:length(misID.mat)),nrow(misID.mat),ncol(misID.mat))
for(ipar in 1:length(misID.mat)){
if(misID.mat[ipar]==0)Pi[[ipar]]=rep(0,n.indiv)
if(misID.mat[ipar]<0)Pi[[ipar]]=rep(1,n.indiv)
if(misID.mat[ipar]>0){
DM[[ipar]]=model.matrix(misID.formulas[[misID.mat[ipar]]],data=Cov)
Pi[[ipar]]=exp(DM[[ipar]]%*%Beta[[misID.mat[ipar]]])
}
}
if(symm==TRUE){
for(iind in 1:n.indiv){
for(icol in 1:ncol(misID.mat)){
Confuse[iind,1,icol]=Pi[[ind.mat[1,icol]]][iind]
}
Confuse[iind,1,]=Confuse[iind,1,]/sum(Confuse[iind,1,])
Pi[[ind.mat[2,3]]]=rep(1,n.indiv)
Pi[[ind.mat[2,1]]]=(Confuse[iind,1,2]+Confuse[iind,1,2]*Pi[[ind.mat[2,2]]])/(1-Confuse[iind,1,2])
for(icol in 1:ncol(misID.mat))Confuse[iind,2,icol]=Pi[[ind.mat[2,icol]]][iind]
Confuse[iind,2,]=Confuse[iind,2,]/sum(Confuse[iind,2,])
}
}
else{
for(iind in 1:n.indiv){
for(irow in 1:nrow(misID.mat)){
for(icol in 1:ncol(misID.mat))Confuse[iind,irow,icol]=Pi[[ind.mat[irow,icol]]][iind]
Confuse[iind,irow,]=Confuse[iind,irow,]/sum(Confuse[iind,irow,])
}
}
}
Confuse
}
#' Fill a list with confusion matrices for each record
#' @param Cur.dat Matrix giving data (records and covariates) - multiple rows can be given (e.g. reflecting different observers)
#' @param stacked.names A character vector giving column names for the data
#' @param factor.ind An integer vector holding whehter each column of data is to be treated as numeric or factor
#' @param Levels A list, each entry of which corresponds to a column name for factor variables and gives the possible levels of those factors
#' @param Beta A list where each entry is a vector giving the parameters of the misID model
#' @param misID.mat With true state on rows and assigned state on column, each positive entry provides an index to misID.models (i.e. what model to assume on multinomial logit space); a 0 indicates an impossible assigment; a negative number designates which column is to be obtained via subtraction
#' @param misID.models A formula vector providing linear model-type formulas for each positive value of misID.mat. If the same model is used in multiple columns it is assumed that all fixed effects (except the intercept) are shared
#' @param misID.symm if TRUE, symmetric classification probabilities are applied (e.g. pi^12=pi^21)
#' @return A list of confusion matrices, one for each row in Cur.dat
#' @export
#' @author Paul B. Conn
get_confusion_mat<-function(Cur.dat,Beta,misID.mat,misID.models,misID.symm=TRUE,stacked.names,factor.ind,Levels){
Pi=vector("list",length(misID.mat))
n.obs=nrow(Cur.dat)
ind.mat=matrix(c(1:length(misID.mat)),nrow(misID.mat),ncol(misID.mat))
Confuse=vector("list",n.obs)
for(ipar in 1:length(misID.mat)){
if(misID.mat[ipar]==0)Pi[[ipar]]=rep(0,n.obs)
if(misID.mat[ipar]<0)Pi[[ipar]]=rep(1,n.obs)
if(misID.mat[ipar]>0){
DM=get_mod_matrix(Cur.dat=Cur.dat,stacked.names=stacked.names,factor.ind=factor.ind,Det.formula=misID.models[[misID.mat[ipar]]],Levels=Levels)
Pi[[ipar]]=exp(DM%*%Beta[[misID.mat[ipar]]])
}
}
if(misID.symm==TRUE){
for(irow in 2:nrow(misID.mat)){
for(icol in 1:(irow-1))Pi[[ind.mat[irow,icol]]]=rep(0,n.obs) #initialize to zero for entries set with symmetry constraint
}
for(iobs in 1:n.obs){
Confuse[[iobs]]=matrix(0,nrow(misID.mat),ncol(misID.mat))
#step one, calculate assignment probabilities for first row of confusion array
for(icol in 1:ncol(misID.mat))Confuse[[iobs]][1,icol]=Pi[[ind.mat[1,icol]]][iobs]
Confuse[[iobs]][1,]=Confuse[[iobs]][1,]/sum(Confuse[[iobs]][1,])
#now, for remaining rows, substitute in confusion values from previous rows and calculate Pi values
for(irow in 2:nrow(misID.mat)){
sum.pi=0
for(icol in 1:ncol(misID.mat))sum.pi=sum.pi+Pi[[ind.mat[irow,icol]]][iobs]
for(icol in 1:(irow-1))Confuse[[iobs]][irow,icol]=Confuse[[iobs]][icol,irow]
sum.Conf=sum(Confuse[[iobs]][irow,])
for(icol in 1:(irow-1))Pi[[ind.mat[irow,icol]]][iobs]=Confuse[[iobs]][icol,irow]*sum.pi/(1-sum.Conf)
for(icol in 1:ncol(misID.mat))Confuse[[iobs]][irow,icol]=Pi[[ind.mat[irow,icol]]][iobs]
Confuse[[iobs]][irow,]=Confuse[[iobs]][irow,]/sum(Confuse[[iobs]][irow,])
}
}
}
else{
for(iobs in 1:n.obs){
Confuse[[iobs]]=matrix(0,dim(misID.mat))
for(irow in 1:nrow(misID.mat)){
for(icol in 1:ncol(misID.mat))Confuse[[iobs]][irow,icol]=Pi[[ind.mat[irow,icol]]][iobs]
Confuse[[iobs]][irow,]=Confuse[[iobs]][irow,]/sum(Confuse[[iobs]][irow,])
}
}
}
Confuse
}
#' compute the first derivative of log_lambda likelihood component for Langevin-Hastings
#' @param Mu expected value for all cells
#' @param Nu current observed valus (all cells)
#' @param Sampled Vector giving the cell identities for all sampled cells
#' @param Area Proportional area of each sampled cell that is covered by one or more transects
#' @param N number of groups in each transect
#' @param var.nu variance of the overdispersion process
#' @return a gradient value
#' @export
#' @keywords gradient, Langevin-Hastings
#' @author Paul B. Conn
log_lambda_gradient<-function(Mu,Nu,Sampled,Area,N,var.nu){
Grad=(Mu[Sampled]-Nu[Sampled])/var.nu+N-Area*exp(Nu[Sampled])
Grad
}
#' compute the likelihood for nu parameters
#' @param Log.lambda Log of poisson intensities for total areas sampled in each sampled strata
#' @param DM the design matrix
#' @param Beta linear predictor parameters for the log of abundance intensity
#' @param Eta a vector of spatial random effects
#' @param SD standard deviation of the overdispersion process
#' @param N a vector giving the current iteration's number of groups in the area
#' @param Sampled Index for which cells were actually sampled
#' @param Area Total area sampled in each sampled cell
#' @return the log likelihood associated with the data and the current set of parameters
#' @export
#' @keywords log likelihood
#' @author Paul B. Conn
log_lambda_log_likelihood<-function(Log.lambda,DM,Beta,Eta=0,SD,N,Sampled,Area){
Pred.log.lam=(DM%*%Beta+Eta)[Sampled]
logL=sum(dnorm(Log.lambda,Pred.log.lam,SD,log=1)) #normal component
logL=logL+sum(N*Log.lambda-Area*exp(Log.lambda))
return(logL)
}
#' SIMULATE AN ICAR PROCESS
#' @param Q Precision matrix for the ICAR process
#' @return Spatial random effects
#' @export
#' @keywords ICAR, simulation
#' @author Devin Johnson
rrw <- function(Q){
v <- eigen(Q, TRUE)
val.inv <- sqrt(ifelse(v$values>sqrt(.Machine$double.eps), 1/v$values, 0))
P <- v$vectors
sim <- P%*%diag(val.inv)%*%rnorm(dim(Q)[1], 0, 1)
X <- rep(1,length(sim))
if(sum(val.inv==0)==2) X <- cbind(X, 1:length(sim))
sim <- sim-X%*%solve(crossprod(X), crossprod(X,sim))
return(sim)
}
#' Produce an adjacency matrix for a vector
#' @param x length of vector
#' @return adjacency matrix
#' @export
#' @keywords adjacency
#' @author Paul Conn
linear_adj <- function(x){
Adj1=matrix(0,x,x)
Adj2=matrix(0,x,x)
diag.min.1=diag(x-1)
Adj1[2:x,1:(x-1)]=diag.min.1
Adj2[1:(x-1),2:x]=diag.min.1
Adj=Adj1+Adj2
Adj
}
#' Produce an adjacency matrix for a square grid
#' @param x number of cells on side of grid
#' @return adjacency matrix
#' @export
#' @keywords adjacency
#' @author Paul Conn
square_adj <- function(x){
Ind=matrix(c(1:x^2),x,x)
Adj=matrix(0,x^2,x^2)
for(i in 1:x){
for(j in 1:x){
if(i==1 & j==1){
Adj[Ind[i,j],Ind[i,j]+1]=1
Adj[Ind[i,j],Ind[i,j]+x]=1
Adj[Ind[i,j],Ind[i,j]+x+1]=1
}
if(i==1 & j>1 & j<x){
Adj[Ind[i,j],Ind[i,j]+1]=1
Adj[Ind[i,j],Ind[i,j]+x]=1
Adj[Ind[i,j],Ind[i,j]-x]=1
Adj[Ind[i,j],Ind[i,j]+x+1]=1
Adj[Ind[i,j],Ind[i,j]-x+1]=1
}
if(i==1 & j==x){
Adj[Ind[i,j],Ind[i,j]+1]=1
Adj[Ind[i,j],Ind[i,j]-x]=1
Adj[Ind[i,j],Ind[i,j]-x+1]=1
}
if(i>1 & i<x & j==1){
Adj[Ind[i,j],Ind[i,j]+1]=1
Adj[Ind[i,j],Ind[i,j]+x]=1
Adj[Ind[i,j],Ind[i,j]-1]=1
Adj[Ind[i,j],Ind[i,j]+x-1]=1
Adj[Ind[i,j],Ind[i,j]+x+1]=1
}
if(i>1 & i<x & j>1 & j<x){
cur.nums=c(Ind[i,j]-x-1,Ind[i,j]-x,Ind[i,j]-x+1,Ind[i,j]-1,Ind[i,j]+1,Ind[i,j]+x-1,Ind[i,j]+x,Ind[i,j]+x+1)
Adj[Ind[i,j],cur.nums]=1
}
if(i>1 & i<x & j==x){
Adj[Ind[i,j],Ind[i,j]+1]=1
Adj[Ind[i,j],Ind[i,j]-x]=1
Adj[Ind[i,j],Ind[i,j]-1]=1
Adj[Ind[i,j],Ind[i,j]-x-1]=1
Adj[Ind[i,j],Ind[i,j]-x+1]=1
}
if(i==x & j==1){
Adj[Ind[i,j],Ind[i,j]+x]=1
Adj[Ind[i,j],Ind[i,j]-1]=1
Adj[Ind[i,j],Ind[i,j]+x-1]=1
}
if(i==x & j>1 & j<x){
Adj[Ind[i,j],Ind[i,j]+x]=1
Adj[Ind[i,j],Ind[i,j]-1]=1
Adj[Ind[i,j],Ind[i,j]-x]=1
Adj[Ind[i,j],Ind[i,j]+x-1]=1
Adj[Ind[i,j],Ind[i,j]-x-1]=1
}
if(i==x & j==x){
Adj[Ind[i,j],Ind[i,j]-1]=1
Adj[Ind[i,j],Ind[i,j]-x]=1
Adj[Ind[i,j],Ind[i,j]-x-1]=1
}
}
}
return(Adj)
}
#' Produce an RW1 adjacency matrix for a rectangular grid for use with areal spatial models (queens move)
#' @param x number of cells on horizontal side of grid
#' @param y number of cells on vertical side of grid
#' @param byrow If TRUE, cell indices are filled along rows (default is FALSE)
#' @return adjacency matrix
#' @export
#' @keywords adjacency
#' @author Paul Conn \email{paul.conn@@noaa.gov}
rect_adj <- function(x,y,byrow=FALSE){
Ind=matrix(c(1:(x*y)),y,x,byrow=byrow)
if(byrow==TRUE)Ind=t(Ind)
n.row=nrow(Ind)
n.col=ncol(Ind)
Adj=matrix(0,x*y,x*y)
for(i in 1:n.row){
for(j in 1:n.col){
if(i==1 & j==1){
Adj[Ind[i,j],Ind[i,j]+1]=1
Adj[Ind[i,j],Ind[i,j]+n.row]=1
Adj[Ind[i,j],Ind[i,j]+n.row+1]=1
}
if(i==1 & j>1 & j<n.col){
Adj[Ind[i,j],Ind[i,j]+1]=1
Adj[Ind[i,j],Ind[i,j]+n.row]=1
Adj[Ind[i,j],Ind[i,j]-n.row]=1
Adj[Ind[i,j],Ind[i,j]+n.row+1]=1
Adj[Ind[i,j],Ind[i,j]-n.row+1]=1
}
if(i==1 & j==n.col){
Adj[Ind[i,j],Ind[i,j]+1]=1
Adj[Ind[i,j],Ind[i,j]-n.row]=1
Adj[Ind[i,j],Ind[i,j]-n.row+1]=1
}
if(i>1 & i<n.row & j==1){
Adj[Ind[i,j],Ind[i,j]+1]=1
Adj[Ind[i,j],Ind[i,j]+n.row]=1
Adj[Ind[i,j],Ind[i,j]-1]=1
Adj[Ind[i,j],Ind[i,j]+n.row-1]=1
Adj[Ind[i,j],Ind[i,j]+n.row+1]=1
}
if(i>1 & i<n.row & j>1 & j<n.col){
cur.nums=c(Ind[i,j]-n.row-1,Ind[i,j]-n.row,Ind[i,j]-n.row+1,Ind[i,j]-1,Ind[i,j]+1,Ind[i,j]+n.row-1,Ind[i,j]+n.row,Ind[i,j]+n.row+1)
Adj[Ind[i,j],cur.nums]=1
}
if(i>1 & i<n.row & j==n.col){
Adj[Ind[i,j],Ind[i,j]+1]=1
Adj[Ind[i,j],Ind[i,j]-n.row]=1
Adj[Ind[i,j],Ind[i,j]-1]=1
Adj[Ind[i,j],Ind[i,j]-n.row-1]=1
Adj[Ind[i,j],Ind[i,j]-n.row+1]=1
}
if(i==n.row & j==1){
Adj[Ind[i,j],Ind[i,j]+n.row]=1
Adj[Ind[i,j],Ind[i,j]-1]=1
Adj[Ind[i,j],Ind[i,j]+n.row-1]=1
}
if(i==n.row & j>1 & j<n.col){
Adj[Ind[i,j],Ind[i,j]+n.row]=1
Adj[Ind[i,j],Ind[i,j]-1]=1
Adj[Ind[i,j],Ind[i,j]-n.row]=1
Adj[Ind[i,j],Ind[i,j]+n.row-1]=1
Adj[Ind[i,j],Ind[i,j]-n.row-1]=1
}
if(i==n.row & j==n.col){
Adj[Ind[i,j],Ind[i,j]-1]=1
Adj[Ind[i,j],Ind[i,j]-n.row]=1
Adj[Ind[i,j],Ind[i,j]-n.row-1]=1
}
}
}
if(byrow==TRUE)Adj=t(Adj)
return(Adj)
}
#' Produce an RW2 Adjacency matrix for a rectangular grid for use with areal spatial models.
#' This formulation uses cofficients inspired by a thin plate spline, as described in Rue & Held, section 3.4.2
#' Here I'm outputting an adjacency matrix of 'neighbor weights' which makes Q construction for regular latices
#' easy to do when not trying to make inference about all cells (i.e., one can just
#' eliminate rows and columns associated with cells one isn't interested in and set Q=-Adj+Diag(sum(Adj))
#' @param x number of cells on horizontal side of grid
#' @param y number of cells on vertical side of grid
#' @param byrow If TRUE, cell indices are filled along rows (default is FALSE)
#' @return adjacency matrix
#' @export
#' @keywords adjacency
#' @author Paul Conn \email{paul.conn@@noaa.gov}
rect_adj_RW2 <- function(x,y,byrow=FALSE){
cur.x=x+4 #make calculations on a larger grid and then cut off rows/columns at end
cur.y=y+4
Ind=matrix(c(1:(cur.x*cur.y)),cur.y,cur.x,byrow=byrow)
if(byrow==TRUE)Ind=t(Ind)
n.row=nrow(Ind)
n.col=ncol(Ind)
Adj=matrix(0,cur.x*cur.y,cur.x*cur.y)
for(i in 3:(n.row-2)){
for(j in 3:(n.col-2)){
#kings move
Adj[Ind[i,j],Ind[i,j]+1]=8
Adj[Ind[i,j],Ind[i,j]+n.row]=8
Adj[Ind[i,j],Ind[i,j]-n.row]=8
Adj[Ind[i,j],Ind[i,j]-1]=8
#bishops move
Adj[Ind[i,j],Ind[i,j]+n.row-1]=-2
Adj[Ind[i,j],Ind[i,j]+n.row+1]=-2
Adj[Ind[i,j],Ind[i,j]-n.row-1]=-2
Adj[Ind[i,j],Ind[i,j]-n.row+1]=-2
#kings move + 1
Adj[Ind[i,j],Ind[i,j]+2]=-1
Adj[Ind[i,j],Ind[i,j]+2*n.row]=-1
Adj[Ind[i,j],Ind[i,j]-2]=-1
Adj[Ind[i,j],Ind[i,j]-2*n.row]=-1
}
}
#compile list of cells that need to be removed
I.rem=matrix(0,n.row,n.col)
I.rem[c(1,2,n.row-1,n.row),]=1
I.rem[,c(1,2,n.col-1,n.col)]=1
Adj=Adj[-which(I.rem==1),-which(I.rem==1)]
if(byrow==TRUE)Adj=t(Adj)
return(Adj)
}
#' estimate optimal 'a' parameter for linex loss function
#' @param Pred.G Predicted group abundance
#' @param Obs.G Observed group abundance
#' @param min.a Minimum value for linex 'a' parameter
#' @param max.a Maximum value for linex 'a' parameter
#' @return The optimal tuning parameter for linex loss function as determined by minimum sum of squares
#' @export
#' @keywords linex
#' @author Paul B. Conn
calc_linex_a<-function(Pred.G,Obs.G,min.a=0.00001,max.a=1.0){
Y=apply(Obs.G,2,mean)
linex_ssq<-function(a,X,Y){
Theta=exp(-a*X)
Theta=-1/a*log(apply(Theta,2,'mean'))
return(sum((Y-Theta)^2))
}
a=optimize(f=linex_ssq,interval=c(min.a,max.a),X=Pred.G,Y=Y)
a
}
#' plot 'observed' versus predicted values for abundance of each species at each transect
#' @param Out Output list from "mcmc_ds.R"
#' @return NULL
#' @export
#' @keywords diagnostics, plot
#' @author Paul B. Conn
plot_obs_pred<-function(Out){
n.species=dim(Out$Pred.N)[1]
par(mfrow=c(n.species,1))
for(isp in 1:n.species){
a.linex=calc_linex_a(Out$Pred.N[isp,,],Out$Obs.N[isp,,])$minimum
max.x=max(c(apply(Out$Obs.N[isp,,],2,'mean'),apply(Out$Pred.N[isp,,],2,'mean')))
plot(apply(Out$Obs.N[isp,,],2,'mean'),apply(Out$Pred.N[isp,,],2,'mean'),pch=1,xlim=c(0,max.x),ylim=c(0,max.x),xlab="Observed",ylab="Predicted")
points(apply(Out$Obs.N[isp,,],2,'mean'),apply(Out$Pred.N[isp,,],2,'median'),pch=2)
Theta=exp(-a.linex*Out$Pred.N[isp,,])
Theta=-1/a.linex*log(apply(Theta,2,'mean'))
points(apply(Out$Obs.N[isp,,],2,'mean'),Theta,pch=3)
abline(a=0,b=1)
legend(max.x*.1,max.x*.8,c("Mean","Median","Linex"),pch=c(1,2,3))
}
}
#' calculate parameter estimates and confidence intervals for various loss functions
#' @param Out Output list from "mcmc_ds.R"
#' @return summary.N list vector, with the first list index indicating species
#' @export
#' @keywords summary
#' @author Paul B. Conn
summary_N<-function(Out){
n.species=dim(Out$Pred.N)[1]
summary.N=vector('list',n.species)
for(isp in 1:n.species){
a.linex=calc_linex_a(Out$Pred.N[isp,,],Out$Obs.N[isp,,])$minimum
Theta=exp(-a.linex*Out$Post$N[isp,,])
Theta=-1/a.linex*log(apply(Theta,2,'mean'))
summary.N[[isp]]=list(mean=sum(apply(Out$Post$N[isp,,],2,'mean')),median=sum(apply(Out$Post$N[isp,,],2,'median')),linex=sum(Theta))
}
summary.N
}
#' Mrds probit detection and related functions
#'
#' For independent observers, probit.fct computes observer-specific detection functions,
#' conditional detection functions, delta dependence function, duplicate detection function (seen by both),
#' and pooled detection function (seen by at least one).
#'
#' The vectors of covariate values can be of different lengths because expand.grid is used to create a
#' dataframe of all unique combinations of the distances and covariate values and the detection and related
#' values are computed for each combination. The covariate vector observer=1:2 is automatically included.
#'
#' @param x vector of perpendicular distances
#' @param formula linear probit formula for detection using distance and other covariates
#' @param beta parameter values
#' @param rho maximum correlation at largest distance
#' @param ... any number of named vectors of covariates used in the formula
#' @return dat dataframe with distance, observer, any covariates specified in ... and detection probability p,
#' conditional detection probability pc, dupiicate detection dup, pooled detection pool and
#' dependence pc/p=delta.
#' @export
#' @author Jeff Laake
#' @examples
#' test=probit.fct(0:10,~distance,c(1,-.15),.8,size=1:3)
#' par(mfrow=c(1,2))
#' with(test[test$observer==1,],
#' {plot(distance,p,ylim=c(0,1),xlab="Distance",ylab="Detection probability")
#' points(distance,pc,pch=2)
#' points(distance,dup,pch=3)
#' points(distance,pool,pch=4)
#' legend(1,.2,legend=c("Detection","Conditional detection","Duplicate detection","Pooled detection"),pch=1:4,bty="n")
#' plot(distance,delta,xlab="Distance",ylab="Dependence")
#' })
probit.fct=function(x,formula,beta,rho,...)
{
require(mvtnorm)
# Create dataframe and apply formula to get design matrix
dat=expand.grid(distance=x,observer=1:2,...)
xmat=model.matrix(formula,dat)
# Make sure length of beta matches number of columns of design matrix
if(ncol(xmat)!=length(beta))stop("Mismatch between beta and formula")
# Compute XB and partition for 2 observers
xbeta=xmat%*%beta
xbeta1=xbeta[dat$observer==1]
xbeta2=xbeta[dat$observer==2]
# Compute rho values
distance=dat$distance[dat$observer==1]
rhox=rho*distance/max(distance)
# Compute detection observer-specific p1,p2 and duplicate p3
p1=pnorm(xbeta1,0,1)
p2=pnorm(xbeta2,0,1)
p3=apply(cbind(xbeta1,xbeta2,rhox),1,function(x)
pmvnorm(lower=c(-x[1],-x[2]),corr=matrix(c(1,x[3],x[3],1),ncol=2,nrow=2)))
# Compute conditional detection prob
p1c2=p3/p2
p2c1=p3/p1
# Store values in dataframe
dat$p[dat$observer==1]=p1
dat$p[dat$observer==2]=p2
dat$pc[dat$observer==1]=p1c2
dat$pc[dat$observer==2]=p2c1
dat$dup[dat$observer==1]=p3
dat$dup[dat$observer==2]=p3
dat$pool[dat$observer==1]=p1+p2-p3
dat$pool[dat$observer==2]=p1+p2-p3
dat$delta=dat$pc/dat$p
return(dat)
}
#' function to convert HierarchicalDS MCMC list vector (used in estimation) into an mcmc object (cf. coda package)
#' @param MCMC list vector providing MCMC samples for each parameter type
#' @param N.hab.pois.par see help for mcmc_ds.R
#' @param N.hab.bern.par see help for mcmc_ds.R
#' @param Cov.par.n see help for mcmc_ds.R
#' @param Hab.pois.names see help for mcmc_ds.R
#' @param Hab.bern.names see help for mcmc_ds.R
#' @param Cov.names see help for mcmc_ds.R
#' @param Det.names see help for mcmc_ds.R
#' @param MisID.names see help for mcmc_ds.R
#' @param N.par.misID see help for mcmc_ds.R
#' @param misID.mat see help for mcmc_ds.R
#' @param misID see help for mcmc_ds.R
#' @param fix.tau.nu see help for mcmc_ds.R
#' @param spat.ind see help for mcmc_ds.R
#' @param point.ind see help for mcmc_ds.R
#' @export
#' @keywords MCMC, coda
#' @author Paul B. Conn
convert.HDS.to.mcmc<-function(MCMC,N.hab.pois.par,N.hab.bern.par,Cov.par.n,Hab.pois.names,Hab.bern.names,Det.names,Cov.names,MisID.names,N.par.misID=NULL,misID.mat=NULL,fix.tau.nu=FALSE,misID=TRUE,spat.ind=TRUE,point.ind=TRUE){
require(coda)
if(misID==TRUE & (is.null(N.par.misID)|is.null(misID.mat)))cat("\n Error: must provide N.par.misID and misID.mat whenever misID=TRUE \n")
i.ZIP=!is.na(N.hab.bern.par)[1]
n.species=nrow(MCMC$Hab.pois)
n.iter=length(MCMC$Hab.pois[1,,1])
n.col=n.species*2+sum(N.hab.pois.par)+ncol(MCMC$Det)+point.ind+(1-spat.ind)*n.species+(1-fix.tau.nu)*n.species+sum(Cov.par.n)*n.species+misID*sum(N.par.misID)
if(i.ZIP)n.col=n.col+sum(N.hab.bern.par)+(1-spat.ind)*n.species #for ZIP model
n.cells=dim(MCMC$G)[3]
Mat=matrix(0,n.iter,n.col)
Mat[,1:n.species]=t(MCMC$N.tot)
counter=n.species
col.names=paste("Abund.sp",c(1:n.species),sep='')
for(isp in 1:n.species){
Mat[,counter+isp]=rowSums(as.matrix(MCMC$G[isp,,],nrow=n.iter,ncol=n.cells)) #total abundance of groups
col.names=c(col.names,paste("Groups.sp",isp,sep=''))
}
counter=counter+n.species
for(isp in 1:n.species){ #habitat parameters
Mat[,(counter+1):(counter+N.hab.pois.par[isp])]=MCMC$Hab.pois[isp,,1:N.hab.pois.par[isp]]
col.names=c(col.names,paste("Hab.pois.sp",isp,Hab.pois.names[[isp]],sep=''))
counter=counter+sum(N.hab.pois.par[isp])
}
if(i.ZIP){
for(isp in 1:n.species){ #habitat parameters
Mat[,(counter+1):(counter+N.hab.bern.par[isp])]=MCMC$Hab.bern[isp,,1:N.hab.bern.par[isp]]
col.names=c(col.names,paste("Hab.bern.sp",isp,Hab.bern.names[[isp]],sep=''))
counter=counter+sum(N.hab.bern.par[isp])
}
}
Mat[,(counter+1):(counter+ncol(MCMC$Det))]=as.matrix(MCMC$Det)
col.names=c(col.names,paste("Det.",Det.names,sep=''))
counter=counter+ncol(MCMC$Det)
if(point.ind==TRUE){
Mat[,counter+1]=MCMC$cor
col.names=c(col.names,"rho")
counter=counter+1
}
if(spat.ind==FALSE){
Mat[,(counter+1):(counter+n.species)]=t(MCMC$tau.eta.pois)
col.names=c(col.names,paste("tau.eta.pois.sp",c(1:n.species),sep=''))
counter=counter+n.species
}
if(spat.ind==FALSE & i.ZIP){
Mat[,(counter+1):(counter+n.species)]=t(MCMC$tau.eta.bern)
col.names=c(col.names,paste("tau.eta.bern.sp",c(1:n.species),sep=''))
counter=counter+n.species
}
if(fix.tau.nu==FALSE){
Mat[,(counter+1):(counter+n.species)]=t(MCMC$tau.nu)
col.names=c(col.names,paste("tau.nu.sp",c(1:n.species),sep=''))
counter=counter+n.species
}
if(is.null(Cov.par.n)==FALSE){
max.par=max(Cov.par.n)
for(isp in 1:n.species){
for(ipar in 1:length(Cov.par.n)){
Mat[,(counter+1):(counter+Cov.par.n[ipar])]=MCMC$Cov.par[isp,,((ipar-1)*max.par+1):((ipar-1)*max.par+Cov.par.n[ipar])]
counter=counter+Cov.par.n[ipar]
col.names=c(col.names,paste("Cov.sp",isp,".",Cov.names[[ipar]],sep=''))
}
}
}
if(misID==TRUE){
for(imod in 1:max(misID.mat)){
Mat[,(counter+1):(counter+N.par.misID[imod])]=MCMC$MisID[[imod]]
counter=counter+N.par.misID[imod]
col.names=c(col.names,paste("misID.mod",imod,".",MisID.names[[imod]],sep=''))
}
}
colnames(Mat)=col.names
Mat=mcmc(Mat)
Mat
}
#' function to export posterior summaries from an mcmc object to a table
#' @aliases table.mcmc
#' @S3method table mcmc
#' @method table mcmc
#' @param MCMC An mcmc object with columns referencing different parameter types (column names are used for plotting labels)
#' @param file A file name to ouput to (including path); if null (default), outputs to screen
#' @param type What type of table to produce (either "csv" or "tex")
#' @param a Value to use for credible intervals. For example, alpha=0.05 results in 95\% credible intervals
#' @export
#' @keywords MCMC, table
#' @author Paul B. Conn
table.mcmc<-function(MCMC,file=NULL,type="csv",a=0.05){
require(xtable)
Out.tab=data.frame(matrix(0,ncol(MCMC),5))
colnames(Out.tab)=c("Parameter","Mean","Median","Lower","Upper")
MCMC=as.matrix(MCMC)
Out.tab[,1]=colnames(MCMC)
Out.tab[,2]=colMeans(MCMC)
Out.tab[,3]=apply(MCMC,2,'median')
Out.tab[,4]=apply(MCMC,2,'quantile',a/2)
Out.tab[,5]=apply(MCMC,2,'quantile',1-a/2)
if(is.null(file))print(Out.tab)
else{
if(type=="csv")write.csv(Out.tab,file=file)
if(type=="tex"){
Out.tab=xtable(Out.tab)
print(Out.tab,file=file)
}
if(type!="csv" & type!="tex")cat("\n Error: unknown table type. No table was printed to file.")
}
}
#' function to calculate posterior predictive loss given the output object from hierarchicalDS
#' @param Out Output object from running hierarchicalDS
#' @param burnin Any additional #'s of values from beginning of chain to discard before calculating PPL statistic (default is 0)
#' @return A matrix with posterior variance (P), sums of squares (G) for the posterior mean and median predictions (compared to Observations), and total posterior loss (D)
#' @export
#' @keywords Posterior predictive loss
#' @author Paul B. Conn
post_loss<-function(Out,burnin=0){
dims.Pred=dim(Out$Pred.det)
median.Pred=array(0,dim=dims.Pred[2:4])
mean.Pred=median.Pred
var.Pred=mean.Pred
for(itrans in 1:dims.Pred[2]){
for(isp1 in 1:dims.Pred[3]){
for(isp2 in 1:dims.Pred[4]){
median.Pred[itrans,isp1,isp2]=median(Out$Pred.det[(burnin+1):dims.Pred[1],itrans,isp1,isp2])
mean.Pred[itrans,isp1,isp2]=mean(Out$Pred.det[(burnin+1):dims.Pred[1],itrans,isp1,isp2])
var.Pred[itrans,isp1,isp2]=var(Out$Pred.det[(burnin+1):dims.Pred[1],itrans,isp1,isp2])
}
}
}
sum.sq.mean=sum((Out$Obs.det-mean.Pred)^2)
sum.sq.median=sum((Out$Obs.det-median.Pred)^2)
Loss=matrix(0,2,3)
colnames(Loss)=c("P","G","D")
rownames(Loss)=c("mean","median")
Loss[,1]=sum(var.Pred)
Loss[1,2]=sum.sq.mean
Loss[2,2]=sum.sq.median
Loss[,3]=rowSums(Loss[1:2,1:2])
Loss
}
#' MCMC output from running example in Hierarchical DS
#'
#' @name sim_out
#' @docType data
#' @author Paul Conn \email{paul.conn@@noaa.gov}
#' @keywords data
NULL | /HierarchicalDS/R/spat_funcs.R | no_license | joshuaeveleth/Hierarchical_DS | R | false | false | 39,461 | r | #' function to sample from a specified probability density function
#' @param n number of samples desired
#' @param pdf probability density function (pois1, poisson, normal, unif.disc, unif.cont)
#' @param cur.par a vector giving parameters for the specified distribution; only the first is used for single parameter distributions
#' @param RE random effects, if present
#' @return a vector of length n samples from the desired distribution
#' @export
#' @keywords probability density
#' @author Paul B. Conn
switch_sample<-function(n,pdf,cur.par,RE){
switch(pdf,
pois1=rpois(n,cur.par[1])+1,
poisson=rpois(n,cur.par[1]),
pois1_ln=rpois(n,exp(cur.par[1]+cur.par[2]*RE))+1,
poisson_ln=rpois(n,exp(cur.par[1]+cur.par[2]*RE)),
normal=rnorm(n,cur.par[1],cur.par[2]),
unif.disc=sample(cur.par[1]:cur.par[2],n,replace=TRUE),
unif.cont=runif(n,cur.par[1],cur.par[2]),
multinom=sample(c(1:length(cur.par)),n,replace=TRUE,prob=cur.par)
)
}
#' function to sample from hyperpriors of a specified probability density function; note that
#' initial values for sigma of lognormal random effects are fixed to a small value (0.05) to
#' prevent numerical errors
#' @param pdf probability density function (pois1, poisson, normal, unif.disc, unif.cont)
#' @param cur.par a vector giving parameters for the specified distribution; only the first is used for single parameter distributions
#' @return a vector of length n samples from the desired distribution
#' @export
#' @keywords probability density
#' @author Paul B. Conn
switch_sample_prior<-function(pdf,cur.par){
require(mc2d)
switch(pdf,
pois1=rgamma(1,cur.par[1],cur.par[2]),
poisson=rgamma(1,cur.par[1],cur.par[2]),
pois1_ln=c(rnorm(1,cur.par[1],cur.par[2]),0.05),
poisson_ln=c(rnorm(1,cur.par[1],cur.par[2]),0.05),
multinom=rdirichlet(1,cur.par)
)
}
#' function to calculate the joint pdf for a sample of values from one of a number of pdfs
#' @param x values to be evaluated
#' @param pdf probability density function (pois1, poisson, pois1_ln, poisson_ln, normal, multinom)
#' @param cur.par a vector giving parameters for the specified distribution; only the first is used for single parameter distributions
#' @param RE random effects, if present
#' @return total log likelihood of points
#' @export
#' @keywords probability density
#' @author Paul B. Conn
switch_pdf<-function(x,pdf,cur.par,RE){
switch(pdf,
pois1=sum(dpois(x-1,cur.par[1],log=1)),
poisson=sum(dpois(x,cur.par[1],log=1)),
pois1_ln=sum(dpois(x-1,exp(cur.par[1]+cur.par[2]*RE),log=1)),
poisson_ln=sum(dpois(x,exp(cur.par[1]+cur.par[2]*RE),log=1)),
normal=sum(dnorm(x,cur.par[1],cur.par[2],log=1)),
multinom=sum(log(cur.par[x]))
)
}
#' function to stack data (going from three dimensional array to a two dimensional array including only "existing" animals
#' @param Data three-d dataset
#' @param Obs.transect current number of observations of animals in each transect (vector)
#' @param n.transects number of transects
#' @param stacked.names column names for new stacked dataset
#' @param factor.ind a vector of indicator variables (1 = factor/categorical variable, 0 = continuous variable)
#' @return a stacked dataset
#' @export
#' @keywords stack data
#' @author Paul B. Conn
stack_data<-function(Data,Obs.transect,n.transects,stacked.names,factor.ind){
#convert from "sparse" 3-d data augmentation array to a rich 2-d dataframe for updating beta parameters
if(n.transects==1)Stacked=Data
else{
Stacked=as.data.frame(Data[1,1:2,])
for(itrans in 1:n.transects){
if(Obs.transect[itrans]>0)Stacked=rbind(Stacked,Data[itrans,1:Obs.transect[itrans],])
}
Stacked=Stacked[-c(1,2),]
}
colnames(Stacked)=stacked.names #gotta reestablish variable type since 3-d array doesn't hold it
factor.cols=which(factor.ind[stacked.names]==TRUE)
if(length(factor.cols)>0){
for(icol in 1:length(factor.cols)){
Stacked[,factor.cols[icol]]=as.factor(Stacked[,factor.cols[icol]])
}
}
Stacked
}
#' function to stack data for midID updates (going from four dimensional array to a two dimensional array including observed groups
#' @param Data 4-d dataset
#' @param G.obs matrix giving the total numer of groups observed at least once by species and transect
#' @param g.tot.obs total number of observations for animals seen at least once
#' @param n.Observers vector giving number of observers per transect
#' @param n.transects number of transects
#' @param n.species number of species
#' @param stacked.names column names for new stacked dataset
#' @param factor.ind a vector of indicator variables (1 = factor/categorical variable, 0 = continuous variable)
#' @return a stacked dataset (in matrix form)
#' @export
#' @keywords stack data
#' @author Paul B. Conn
stack_data_misID<-function(Data,G.obs,g.tot.obs,n.Observers,n.transects,n.species,stacked.names,factor.ind){
#convert from "sparse" 4-d data augmentation array to a rich 2-d dataframe for updating misID parameters
if(n.transects==1 & n.species==1)Stacked=Data[1,1,,]
else{
G.tot.obs=G.obs
Stacked=matrix(0,g.tot.obs,length(Data[1,1,1,]))
ipl=1
for(isp in 1:n.species){
G.tot.obs[isp,]=G.obs[isp,]*n.Observers
for(itrans in 1:n.transects){
if(G.obs[isp,itrans]>0)Stacked[ipl:(ipl+G.tot.obs[isp,itrans]-1),]=Data[isp,itrans,1:G.tot.obs[isp,itrans],]
ipl=ipl+G.tot.obs[isp,itrans]
}
}
}
Stacked
}
#' function to produce a design matrix given a dataset and user-specified formula object
#' @param Cur.dat current dataset
#' @param stacked.names column names for current dataset
#' @param factor.ind a list of indicator variables (1 = factor/categorical variable, 0 = continuous variable)
#' @param Det.formula a formula object
#' @param Levels A list object giving the number of levels for factor variables
#' @return a design matrix
#' @export
#' @keywords model matrix
#' @author Paul B. Conn
get_mod_matrix<-function(Cur.dat,stacked.names,factor.ind,Det.formula,Levels){
Cur.dat=as.data.frame(Cur.dat)
colnames(Cur.dat)=stacked.names
factor.cols=which(factor.ind[stacked.names]==TRUE)
if(length(factor.cols)>0){
for(icol in 1:length(factor.cols)){
Cur.dat[,factor.cols[icol]]=eval(parse(text=paste('factor(Cur.dat[,factor.cols[icol]],levels=Levels$',names(factor.cols)[icol],')',sep='')))
}
}
DM=model.matrix(Det.formula,data=Cur.dat)
DM
}
#' generate initial values for MCMC chain if not already specified by user
#' @param DM.hab design matrix for habitat model
#' @param DM.det design matrix for detection model
#' @param G.transect a vector of the number of groups of animals in area covered by each transect
#' @param Area.trans a vector giving the proportion of a strata covered by each transect
#' @param Area.hab a vector of the relative areas of each strata
#' @param Mapping a vector mapping each transect to it's associated strata
#' @param point.ind is point independence assumed (TRUE/FALSE)
#' @param spat.ind is spatial independence assumed? (TRUE/FALSE)
#' @param grp.mean pois1 parameter for group size
#' @return a list of initial parameter values
#' @export
#' @keywords initial values, mcmc
#' @author Paul B. Conn
generate_inits<-function(DM.hab,DM.det,G.transect,Area.trans,Area.hab,Mapping,point.ind,spat.ind,grp.mean){
Par=list(det=rnorm(ncol(DM.det),0,1),hab=rep(0,ncol(DM.hab)),cor=ifelse(point.ind,runif(1,0,.8),0),
Nu=log(max(G.transect)/mean(Area.trans)*exp(rnorm(length(Area.hab)))),Eta=rnorm(length(Area.hab)),
tau.eta=runif(1,0.5,2),tau.nu=runif(1,0.5,2))
Par$hab[1]=mean(G.transect)/(mean(Area.trans)*mean(Area.hab))*exp(rnorm(1,0,1))
Par$G=round(exp(Par$Nu)*Area.hab*exp(rnorm(length(Par$Nu))))
Par$N=Par$G+rpois(length(Par$G),grp.mean*Par$G)
if(spat.ind==1)Par$Eta=0*Par$Eta
Par
}
#' generate initial values for misID model if not already specified by user
#' @param DM.hab.pois a list of design matrices for the Poisson habitat model (elements are named sp1,sp2, etc.)
#' @param DM.hab.bern If a hurdle model, a list of design matrices for the Bernoulli habitat model (elements are named sp1,sp2, etc.) (NULL if not hurdle)
#' @param DM.det design matrix for detection model
#' @param N.hab.pois.par vector giving number of parameters in the Poisson habitat model for each species
#' @param N.hab.bern.par vector giving number of parameters in the Bernoulli habitat model for each species (NULL if not hurdle)
#' @param G.transect a matrix of the number of groups of animals in area covered by each transect; each row gives a separate species
#' @param Area.trans a vector giving the proportion of a strata covered by each transect
#' @param Area.hab a vector of the relative areas of each strata
#' @param Mapping a vector mapping each transect to it's associated strata
#' @param point.ind is point independence assumed (TRUE/FALSE)
#' @param spat.ind is spatial independence assumed? (TRUE/FALSE)
#' @param grp.mean a vector giving the pois1 parameter for group size (one entry for each species)
#' @param misID if TRUE, indicates that misidentification is incorporated into modeling
#' @param misID.mat a matrix specifying which elements of the misID matrix are linked to model equations
#' @param N.par.misID a vector giving the number of parameters for each misID model (in multinomial logit space)
#' @return a list of initial parameter values
#' @export
#' @keywords initial values, mcmc
#' @author Paul B. Conn
generate_inits_misID<-function(DM.hab.pois,DM.hab.bern,DM.det,N.hab.pois.par,N.hab.bern.par,G.transect,Area.trans,Area.hab,Mapping,point.ind,spat.ind,grp.mean,misID,misID.mat,N.par.misID){
i.hurdle=1-is.null(DM.hab.bern)
n.species=nrow(G.transect)
n.cells=length(Area.hab)
if(misID){
n.misID.eq=max(misID.mat)
MisID=vector("list",n.misID.eq)
for(itmp in 1:n.misID.eq)MisID[[itmp]]=runif(N.par.misID[itmp],-.5,.5)
diag.mods=diag(misID.mat)
diag.mods=diag.mods[which(diag.mods>0)]
if(length(diag.mods)>0){
for(itmp in 1:length(diag.mods))MisID[[diag.mods[itmp]]][1]=MisID[[diag.mods[itmp]]][1]+2 #ensure that the highest probability is for a non-misID
}
}
hab.pois=matrix(0,n.species,max(N.hab.pois.par))
hab.bern=NULL
tau.eta.bern=NULL
Eta.bern=NULL
if(i.hurdle==1){
hab.bern=matrix(0,n.species,max(N.hab.bern.par))
tau.eta.bern=runif(n.species,0.5,2)
Eta.bern=matrix(rnorm(n.species*n.cells),n.species,n.cells)
}
Nu=matrix(0,n.species,n.cells)
for(isp in 1:n.species){
Nu[isp,]=log(max(G.transect[isp,])/mean(Area.trans)*exp(rnorm(length(Area.hab),0,0.1)))
}
Par=list(det=rnorm(ncol(DM.det),0,1),hab.pois=hab.pois,hab.bern=hab.bern,cor=ifelse(point.ind,runif(1,0,.8),0),
Nu=Nu,Eta.pois=matrix(rnorm(n.species*n.cells),n.species,n.cells),Eta.bern=Eta.bern,
tau.eta.pois=runif(n.species,0.5,2),tau.eta.bern=tau.eta.bern,tau.nu=runif(n.species,0.5,2),MisID=MisID)
Par$hab.pois[,1]=log(apply(G.transect,1,'mean')/(mean(Area.trans)*mean(Area.hab))*exp(rnorm(n.species,0,1)))
Par$G=round(exp(Par$Nu)*Area.hab*exp(rnorm(length(Par$Nu))))
for(isp in 1:n.species)Par$N[isp,]=Par$G[isp,]+rpois(n.cells,grp.mean[isp]*Par$G[isp,])
if(spat.ind==1){
Par$Eta.bern=0*Par$Eta.bern
Par$Eta.pois=0*Par$Eta.pois
}
Par
}
#' Fill confusion array - one confusion matrix for each individual (DEPRECATED)
#' @param Confuse An 3-dimensional array, with dimensions (# of individuals, # of rows in misID.mat, # of cols of misID.mat)
#' @param Cov Data frame including all covariates for the misclassification model (individuals are on rows)
#' @param Beta A list where each entry is a vector giving the parameters of the misID model
#' @param n.indiv Integer giving the number of individuals
#' @param misID.mat With true state on rows and assigned state on column, each positive entry provides an index to misID.models (i.e. what model to assume on multinomial logit space); a 0 indicates an impossible assigment; a negative number designates which column is to be obtained via subtraction
#' @param misID.formulas A formula vector providing linear model-type formulas for each positive value of misID.mat. If the same model is used in multiple columns it is assumed that all fixed effects (except the intercept) are shared
#' @param symm if TRUE, symmetric classification probabilities are applied (e.g. pi^12=pi^21)
#' @return A filled version of Confuse
#' @export
#' @author Paul B. Conn
get_confusion_array<-function(Confuse,Cov=NULL,Beta,n.indiv,misID.mat,misID.formulas,symm=TRUE){
if(is.null(Cov)==1)Cov=data.frame(matrix(1,n.indiv,1))
DM=vector("list",max(misID.mat))
Pi=DM
ind.mat=matrix(c(1:length(misID.mat)),nrow(misID.mat),ncol(misID.mat))
for(ipar in 1:length(misID.mat)){
if(misID.mat[ipar]==0)Pi[[ipar]]=rep(0,n.indiv)
if(misID.mat[ipar]<0)Pi[[ipar]]=rep(1,n.indiv)
if(misID.mat[ipar]>0){
DM[[ipar]]=model.matrix(misID.formulas[[misID.mat[ipar]]],data=Cov)
Pi[[ipar]]=exp(DM[[ipar]]%*%Beta[[misID.mat[ipar]]])
}
}
if(symm==TRUE){
for(iind in 1:n.indiv){
for(icol in 1:ncol(misID.mat)){
Confuse[iind,1,icol]=Pi[[ind.mat[1,icol]]][iind]
}
Confuse[iind,1,]=Confuse[iind,1,]/sum(Confuse[iind,1,])
Pi[[ind.mat[2,3]]]=rep(1,n.indiv)
Pi[[ind.mat[2,1]]]=(Confuse[iind,1,2]+Confuse[iind,1,2]*Pi[[ind.mat[2,2]]])/(1-Confuse[iind,1,2])
for(icol in 1:ncol(misID.mat))Confuse[iind,2,icol]=Pi[[ind.mat[2,icol]]][iind]
Confuse[iind,2,]=Confuse[iind,2,]/sum(Confuse[iind,2,])
}
}
else{
for(iind in 1:n.indiv){
for(irow in 1:nrow(misID.mat)){
for(icol in 1:ncol(misID.mat))Confuse[iind,irow,icol]=Pi[[ind.mat[irow,icol]]][iind]
Confuse[iind,irow,]=Confuse[iind,irow,]/sum(Confuse[iind,irow,])
}
}
}
Confuse
}
#' Fill a list with confusion matrices for each record
#' @param Cur.dat Matrix giving data (records and covariates) - multiple rows can be given (e.g. reflecting different observers)
#' @param stacked.names A character vector giving column names for the data
#' @param factor.ind An integer vector holding whehter each column of data is to be treated as numeric or factor
#' @param Levels A list, each entry of which corresponds to a column name for factor variables and gives the possible levels of those factors
#' @param Beta A list where each entry is a vector giving the parameters of the misID model
#' @param misID.mat With true state on rows and assigned state on column, each positive entry provides an index to misID.models (i.e. what model to assume on multinomial logit space); a 0 indicates an impossible assigment; a negative number designates which column is to be obtained via subtraction
#' @param misID.models A formula vector providing linear model-type formulas for each positive value of misID.mat. If the same model is used in multiple columns it is assumed that all fixed effects (except the intercept) are shared
#' @param misID.symm if TRUE, symmetric classification probabilities are applied (e.g. pi^12=pi^21)
#' @return A list of confusion matrices, one for each row in Cur.dat
#' @export
#' @author Paul B. Conn
get_confusion_mat<-function(Cur.dat,Beta,misID.mat,misID.models,misID.symm=TRUE,stacked.names,factor.ind,Levels){
Pi=vector("list",length(misID.mat))
n.obs=nrow(Cur.dat)
ind.mat=matrix(c(1:length(misID.mat)),nrow(misID.mat),ncol(misID.mat))
Confuse=vector("list",n.obs)
for(ipar in 1:length(misID.mat)){
if(misID.mat[ipar]==0)Pi[[ipar]]=rep(0,n.obs)
if(misID.mat[ipar]<0)Pi[[ipar]]=rep(1,n.obs)
if(misID.mat[ipar]>0){
DM=get_mod_matrix(Cur.dat=Cur.dat,stacked.names=stacked.names,factor.ind=factor.ind,Det.formula=misID.models[[misID.mat[ipar]]],Levels=Levels)
Pi[[ipar]]=exp(DM%*%Beta[[misID.mat[ipar]]])
}
}
if(misID.symm==TRUE){
for(irow in 2:nrow(misID.mat)){
for(icol in 1:(irow-1))Pi[[ind.mat[irow,icol]]]=rep(0,n.obs) #initialize to zero for entries set with symmetry constraint
}
for(iobs in 1:n.obs){
Confuse[[iobs]]=matrix(0,nrow(misID.mat),ncol(misID.mat))
#step one, calculate assignment probabilities for first row of confusion array
for(icol in 1:ncol(misID.mat))Confuse[[iobs]][1,icol]=Pi[[ind.mat[1,icol]]][iobs]
Confuse[[iobs]][1,]=Confuse[[iobs]][1,]/sum(Confuse[[iobs]][1,])
#now, for remaining rows, substitute in confusion values from previous rows and calculate Pi values
for(irow in 2:nrow(misID.mat)){
sum.pi=0
for(icol in 1:ncol(misID.mat))sum.pi=sum.pi+Pi[[ind.mat[irow,icol]]][iobs]
for(icol in 1:(irow-1))Confuse[[iobs]][irow,icol]=Confuse[[iobs]][icol,irow]
sum.Conf=sum(Confuse[[iobs]][irow,])
for(icol in 1:(irow-1))Pi[[ind.mat[irow,icol]]][iobs]=Confuse[[iobs]][icol,irow]*sum.pi/(1-sum.Conf)
for(icol in 1:ncol(misID.mat))Confuse[[iobs]][irow,icol]=Pi[[ind.mat[irow,icol]]][iobs]
Confuse[[iobs]][irow,]=Confuse[[iobs]][irow,]/sum(Confuse[[iobs]][irow,])
}
}
}
else{
for(iobs in 1:n.obs){
Confuse[[iobs]]=matrix(0,dim(misID.mat))
for(irow in 1:nrow(misID.mat)){
for(icol in 1:ncol(misID.mat))Confuse[[iobs]][irow,icol]=Pi[[ind.mat[irow,icol]]][iobs]
Confuse[[iobs]][irow,]=Confuse[[iobs]][irow,]/sum(Confuse[[iobs]][irow,])
}
}
}
Confuse
}
#' compute the first derivative of log_lambda likelihood component for Langevin-Hastings
#' @param Mu expected value for all cells
#' @param Nu current observed valus (all cells)
#' @param Sampled Vector giving the cell identities for all sampled cells
#' @param Area Proportional area of each sampled cell that is covered by one or more transects
#' @param N number of groups in each transect
#' @param var.nu variance of the overdispersion process
#' @return a gradient value
#' @export
#' @keywords gradient, Langevin-Hastings
#' @author Paul B. Conn
log_lambda_gradient<-function(Mu,Nu,Sampled,Area,N,var.nu){
Grad=(Mu[Sampled]-Nu[Sampled])/var.nu+N-Area*exp(Nu[Sampled])
Grad
}
#' compute the likelihood for nu parameters
#' @param Log.lambda Log of poisson intensities for total areas sampled in each sampled strata
#' @param DM the design matrix
#' @param Beta linear predictor parameters for the log of abundance intensity
#' @param Eta a vector of spatial random effects
#' @param SD standard deviation of the overdispersion process
#' @param N a vector giving the current iteration's number of groups in the area
#' @param Sampled Index for which cells were actually sampled
#' @param Area Total area sampled in each sampled cell
#' @return the log likelihood associated with the data and the current set of parameters
#' @export
#' @keywords log likelihood
#' @author Paul B. Conn
log_lambda_log_likelihood<-function(Log.lambda,DM,Beta,Eta=0,SD,N,Sampled,Area){
Pred.log.lam=(DM%*%Beta+Eta)[Sampled]
logL=sum(dnorm(Log.lambda,Pred.log.lam,SD,log=1)) #normal component
logL=logL+sum(N*Log.lambda-Area*exp(Log.lambda))
return(logL)
}
#' SIMULATE AN ICAR PROCESS
#' @param Q Precision matrix for the ICAR process
#' @return Spatial random effects
#' @export
#' @keywords ICAR, simulation
#' @author Devin Johnson
rrw <- function(Q){
v <- eigen(Q, TRUE)
val.inv <- sqrt(ifelse(v$values>sqrt(.Machine$double.eps), 1/v$values, 0))
P <- v$vectors
sim <- P%*%diag(val.inv)%*%rnorm(dim(Q)[1], 0, 1)
X <- rep(1,length(sim))
if(sum(val.inv==0)==2) X <- cbind(X, 1:length(sim))
sim <- sim-X%*%solve(crossprod(X), crossprod(X,sim))
return(sim)
}
#' Produce an adjacency matrix for a vector
#' @param x length of vector
#' @return adjacency matrix
#' @export
#' @keywords adjacency
#' @author Paul Conn
linear_adj <- function(x){
Adj1=matrix(0,x,x)
Adj2=matrix(0,x,x)
diag.min.1=diag(x-1)
Adj1[2:x,1:(x-1)]=diag.min.1
Adj2[1:(x-1),2:x]=diag.min.1
Adj=Adj1+Adj2
Adj
}
#' Produce an adjacency matrix for a square grid
#' @param x number of cells on side of grid
#' @return adjacency matrix
#' @export
#' @keywords adjacency
#' @author Paul Conn
square_adj <- function(x){
Ind=matrix(c(1:x^2),x,x)
Adj=matrix(0,x^2,x^2)
for(i in 1:x){
for(j in 1:x){
if(i==1 & j==1){
Adj[Ind[i,j],Ind[i,j]+1]=1
Adj[Ind[i,j],Ind[i,j]+x]=1
Adj[Ind[i,j],Ind[i,j]+x+1]=1
}
if(i==1 & j>1 & j<x){
Adj[Ind[i,j],Ind[i,j]+1]=1
Adj[Ind[i,j],Ind[i,j]+x]=1
Adj[Ind[i,j],Ind[i,j]-x]=1
Adj[Ind[i,j],Ind[i,j]+x+1]=1
Adj[Ind[i,j],Ind[i,j]-x+1]=1
}
if(i==1 & j==x){
Adj[Ind[i,j],Ind[i,j]+1]=1
Adj[Ind[i,j],Ind[i,j]-x]=1
Adj[Ind[i,j],Ind[i,j]-x+1]=1
}
if(i>1 & i<x & j==1){
Adj[Ind[i,j],Ind[i,j]+1]=1
Adj[Ind[i,j],Ind[i,j]+x]=1
Adj[Ind[i,j],Ind[i,j]-1]=1
Adj[Ind[i,j],Ind[i,j]+x-1]=1
Adj[Ind[i,j],Ind[i,j]+x+1]=1
}
if(i>1 & i<x & j>1 & j<x){
cur.nums=c(Ind[i,j]-x-1,Ind[i,j]-x,Ind[i,j]-x+1,Ind[i,j]-1,Ind[i,j]+1,Ind[i,j]+x-1,Ind[i,j]+x,Ind[i,j]+x+1)
Adj[Ind[i,j],cur.nums]=1
}
if(i>1 & i<x & j==x){
Adj[Ind[i,j],Ind[i,j]+1]=1
Adj[Ind[i,j],Ind[i,j]-x]=1
Adj[Ind[i,j],Ind[i,j]-1]=1
Adj[Ind[i,j],Ind[i,j]-x-1]=1
Adj[Ind[i,j],Ind[i,j]-x+1]=1
}
if(i==x & j==1){
Adj[Ind[i,j],Ind[i,j]+x]=1
Adj[Ind[i,j],Ind[i,j]-1]=1
Adj[Ind[i,j],Ind[i,j]+x-1]=1
}
if(i==x & j>1 & j<x){
Adj[Ind[i,j],Ind[i,j]+x]=1
Adj[Ind[i,j],Ind[i,j]-1]=1
Adj[Ind[i,j],Ind[i,j]-x]=1
Adj[Ind[i,j],Ind[i,j]+x-1]=1
Adj[Ind[i,j],Ind[i,j]-x-1]=1
}
if(i==x & j==x){
Adj[Ind[i,j],Ind[i,j]-1]=1
Adj[Ind[i,j],Ind[i,j]-x]=1
Adj[Ind[i,j],Ind[i,j]-x-1]=1
}
}
}
return(Adj)
}
#' Produce an RW1 adjacency matrix for a rectangular grid for use with areal spatial models (queens move)
#' @param x number of cells on horizontal side of grid
#' @param y number of cells on vertical side of grid
#' @param byrow If TRUE, cell indices are filled along rows (default is FALSE)
#' @return adjacency matrix
#' @export
#' @keywords adjacency
#' @author Paul Conn \email{paul.conn@@noaa.gov}
rect_adj <- function(x,y,byrow=FALSE){
Ind=matrix(c(1:(x*y)),y,x,byrow=byrow)
if(byrow==TRUE)Ind=t(Ind)
n.row=nrow(Ind)
n.col=ncol(Ind)
Adj=matrix(0,x*y,x*y)
for(i in 1:n.row){
for(j in 1:n.col){
if(i==1 & j==1){
Adj[Ind[i,j],Ind[i,j]+1]=1
Adj[Ind[i,j],Ind[i,j]+n.row]=1
Adj[Ind[i,j],Ind[i,j]+n.row+1]=1
}
if(i==1 & j>1 & j<n.col){
Adj[Ind[i,j],Ind[i,j]+1]=1
Adj[Ind[i,j],Ind[i,j]+n.row]=1
Adj[Ind[i,j],Ind[i,j]-n.row]=1
Adj[Ind[i,j],Ind[i,j]+n.row+1]=1
Adj[Ind[i,j],Ind[i,j]-n.row+1]=1
}
if(i==1 & j==n.col){
Adj[Ind[i,j],Ind[i,j]+1]=1
Adj[Ind[i,j],Ind[i,j]-n.row]=1
Adj[Ind[i,j],Ind[i,j]-n.row+1]=1
}
if(i>1 & i<n.row & j==1){
Adj[Ind[i,j],Ind[i,j]+1]=1
Adj[Ind[i,j],Ind[i,j]+n.row]=1
Adj[Ind[i,j],Ind[i,j]-1]=1
Adj[Ind[i,j],Ind[i,j]+n.row-1]=1
Adj[Ind[i,j],Ind[i,j]+n.row+1]=1
}
if(i>1 & i<n.row & j>1 & j<n.col){
cur.nums=c(Ind[i,j]-n.row-1,Ind[i,j]-n.row,Ind[i,j]-n.row+1,Ind[i,j]-1,Ind[i,j]+1,Ind[i,j]+n.row-1,Ind[i,j]+n.row,Ind[i,j]+n.row+1)
Adj[Ind[i,j],cur.nums]=1
}
if(i>1 & i<n.row & j==n.col){
Adj[Ind[i,j],Ind[i,j]+1]=1
Adj[Ind[i,j],Ind[i,j]-n.row]=1
Adj[Ind[i,j],Ind[i,j]-1]=1
Adj[Ind[i,j],Ind[i,j]-n.row-1]=1
Adj[Ind[i,j],Ind[i,j]-n.row+1]=1
}
if(i==n.row & j==1){
Adj[Ind[i,j],Ind[i,j]+n.row]=1
Adj[Ind[i,j],Ind[i,j]-1]=1
Adj[Ind[i,j],Ind[i,j]+n.row-1]=1
}
if(i==n.row & j>1 & j<n.col){
Adj[Ind[i,j],Ind[i,j]+n.row]=1
Adj[Ind[i,j],Ind[i,j]-1]=1
Adj[Ind[i,j],Ind[i,j]-n.row]=1
Adj[Ind[i,j],Ind[i,j]+n.row-1]=1
Adj[Ind[i,j],Ind[i,j]-n.row-1]=1
}
if(i==n.row & j==n.col){
Adj[Ind[i,j],Ind[i,j]-1]=1
Adj[Ind[i,j],Ind[i,j]-n.row]=1
Adj[Ind[i,j],Ind[i,j]-n.row-1]=1
}
}
}
if(byrow==TRUE)Adj=t(Adj)
return(Adj)
}
#' Produce an RW2 Adjacency matrix for a rectangular grid for use with areal spatial models.
#' This formulation uses cofficients inspired by a thin plate spline, as described in Rue & Held, section 3.4.2
#' Here I'm outputting an adjacency matrix of 'neighbor weights' which makes Q construction for regular latices
#' easy to do when not trying to make inference about all cells (i.e., one can just
#' eliminate rows and columns associated with cells one isn't interested in and set Q=-Adj+Diag(sum(Adj))
#' @param x number of cells on horizontal side of grid
#' @param y number of cells on vertical side of grid
#' @param byrow If TRUE, cell indices are filled along rows (default is FALSE)
#' @return adjacency matrix
#' @export
#' @keywords adjacency
#' @author Paul Conn \email{paul.conn@@noaa.gov}
rect_adj_RW2 <- function(x,y,byrow=FALSE){
cur.x=x+4 #make calculations on a larger grid and then cut off rows/columns at end
cur.y=y+4
Ind=matrix(c(1:(cur.x*cur.y)),cur.y,cur.x,byrow=byrow)
if(byrow==TRUE)Ind=t(Ind)
n.row=nrow(Ind)
n.col=ncol(Ind)
Adj=matrix(0,cur.x*cur.y,cur.x*cur.y)
for(i in 3:(n.row-2)){
for(j in 3:(n.col-2)){
#kings move
Adj[Ind[i,j],Ind[i,j]+1]=8
Adj[Ind[i,j],Ind[i,j]+n.row]=8
Adj[Ind[i,j],Ind[i,j]-n.row]=8
Adj[Ind[i,j],Ind[i,j]-1]=8
#bishops move
Adj[Ind[i,j],Ind[i,j]+n.row-1]=-2
Adj[Ind[i,j],Ind[i,j]+n.row+1]=-2
Adj[Ind[i,j],Ind[i,j]-n.row-1]=-2
Adj[Ind[i,j],Ind[i,j]-n.row+1]=-2
#kings move + 1
Adj[Ind[i,j],Ind[i,j]+2]=-1
Adj[Ind[i,j],Ind[i,j]+2*n.row]=-1
Adj[Ind[i,j],Ind[i,j]-2]=-1
Adj[Ind[i,j],Ind[i,j]-2*n.row]=-1
}
}
#compile list of cells that need to be removed
I.rem=matrix(0,n.row,n.col)
I.rem[c(1,2,n.row-1,n.row),]=1
I.rem[,c(1,2,n.col-1,n.col)]=1
Adj=Adj[-which(I.rem==1),-which(I.rem==1)]
if(byrow==TRUE)Adj=t(Adj)
return(Adj)
}
#' estimate optimal 'a' parameter for linex loss function
#' @param Pred.G Predicted group abundance
#' @param Obs.G Observed group abundance
#' @param min.a Minimum value for linex 'a' parameter
#' @param max.a Maximum value for linex 'a' parameter
#' @return The optimal tuning parameter for linex loss function as determined by minimum sum of squares
#' @export
#' @keywords linex
#' @author Paul B. Conn
calc_linex_a<-function(Pred.G,Obs.G,min.a=0.00001,max.a=1.0){
Y=apply(Obs.G,2,mean)
linex_ssq<-function(a,X,Y){
Theta=exp(-a*X)
Theta=-1/a*log(apply(Theta,2,'mean'))
return(sum((Y-Theta)^2))
}
a=optimize(f=linex_ssq,interval=c(min.a,max.a),X=Pred.G,Y=Y)
a
}
#' plot 'observed' versus predicted values for abundance of each species at each transect
#' @param Out Output list from "mcmc_ds.R"
#' @return NULL
#' @export
#' @keywords diagnostics, plot
#' @author Paul B. Conn
plot_obs_pred<-function(Out){
n.species=dim(Out$Pred.N)[1]
par(mfrow=c(n.species,1))
for(isp in 1:n.species){
a.linex=calc_linex_a(Out$Pred.N[isp,,],Out$Obs.N[isp,,])$minimum
max.x=max(c(apply(Out$Obs.N[isp,,],2,'mean'),apply(Out$Pred.N[isp,,],2,'mean')))
plot(apply(Out$Obs.N[isp,,],2,'mean'),apply(Out$Pred.N[isp,,],2,'mean'),pch=1,xlim=c(0,max.x),ylim=c(0,max.x),xlab="Observed",ylab="Predicted")
points(apply(Out$Obs.N[isp,,],2,'mean'),apply(Out$Pred.N[isp,,],2,'median'),pch=2)
Theta=exp(-a.linex*Out$Pred.N[isp,,])
Theta=-1/a.linex*log(apply(Theta,2,'mean'))
points(apply(Out$Obs.N[isp,,],2,'mean'),Theta,pch=3)
abline(a=0,b=1)
legend(max.x*.1,max.x*.8,c("Mean","Median","Linex"),pch=c(1,2,3))
}
}
#' calculate parameter estimates and confidence intervals for various loss functions
#' @param Out Output list from "mcmc_ds.R"
#' @return summary.N list vector, with the first list index indicating species
#' @export
#' @keywords summary
#' @author Paul B. Conn
summary_N<-function(Out){
n.species=dim(Out$Pred.N)[1]
summary.N=vector('list',n.species)
for(isp in 1:n.species){
a.linex=calc_linex_a(Out$Pred.N[isp,,],Out$Obs.N[isp,,])$minimum
Theta=exp(-a.linex*Out$Post$N[isp,,])
Theta=-1/a.linex*log(apply(Theta,2,'mean'))
summary.N[[isp]]=list(mean=sum(apply(Out$Post$N[isp,,],2,'mean')),median=sum(apply(Out$Post$N[isp,,],2,'median')),linex=sum(Theta))
}
summary.N
}
#' Mrds probit detection and related functions
#'
#' For independent observers, probit.fct computes observer-specific detection functions,
#' conditional detection functions, delta dependence function, duplicate detection function (seen by both),
#' and pooled detection function (seen by at least one).
#'
#' The vectors of covariate values can be of different lengths because expand.grid is used to create a
#' dataframe of all unique combinations of the distances and covariate values and the detection and related
#' values are computed for each combination. The covariate vector observer=1:2 is automatically included.
#'
#' @param x vector of perpendicular distances
#' @param formula linear probit formula for detection using distance and other covariates
#' @param beta parameter values
#' @param rho maximum correlation at largest distance
#' @param ... any number of named vectors of covariates used in the formula
#' @return dat dataframe with distance, observer, any covariates specified in ... and detection probability p,
#' conditional detection probability pc, dupiicate detection dup, pooled detection pool and
#' dependence pc/p=delta.
#' @export
#' @author Jeff Laake
#' @examples
#' test=probit.fct(0:10,~distance,c(1,-.15),.8,size=1:3)
#' par(mfrow=c(1,2))
#' with(test[test$observer==1,],
#' {plot(distance,p,ylim=c(0,1),xlab="Distance",ylab="Detection probability")
#' points(distance,pc,pch=2)
#' points(distance,dup,pch=3)
#' points(distance,pool,pch=4)
#' legend(1,.2,legend=c("Detection","Conditional detection","Duplicate detection","Pooled detection"),pch=1:4,bty="n")
#' plot(distance,delta,xlab="Distance",ylab="Dependence")
#' })
probit.fct=function(x,formula,beta,rho,...)
{
require(mvtnorm)
# Create dataframe and apply formula to get design matrix
dat=expand.grid(distance=x,observer=1:2,...)
xmat=model.matrix(formula,dat)
# Make sure length of beta matches number of columns of design matrix
if(ncol(xmat)!=length(beta))stop("Mismatch between beta and formula")
# Compute XB and partition for 2 observers
xbeta=xmat%*%beta
xbeta1=xbeta[dat$observer==1]
xbeta2=xbeta[dat$observer==2]
# Compute rho values
distance=dat$distance[dat$observer==1]
rhox=rho*distance/max(distance)
# Compute detection observer-specific p1,p2 and duplicate p3
p1=pnorm(xbeta1,0,1)
p2=pnorm(xbeta2,0,1)
p3=apply(cbind(xbeta1,xbeta2,rhox),1,function(x)
pmvnorm(lower=c(-x[1],-x[2]),corr=matrix(c(1,x[3],x[3],1),ncol=2,nrow=2)))
# Compute conditional detection prob
p1c2=p3/p2
p2c1=p3/p1
# Store values in dataframe
dat$p[dat$observer==1]=p1
dat$p[dat$observer==2]=p2
dat$pc[dat$observer==1]=p1c2
dat$pc[dat$observer==2]=p2c1
dat$dup[dat$observer==1]=p3
dat$dup[dat$observer==2]=p3
dat$pool[dat$observer==1]=p1+p2-p3
dat$pool[dat$observer==2]=p1+p2-p3
dat$delta=dat$pc/dat$p
return(dat)
}
#' function to convert HierarchicalDS MCMC list vector (used in estimation) into an mcmc object (cf. coda package)
#' @param MCMC list vector providing MCMC samples for each parameter type
#' @param N.hab.pois.par see help for mcmc_ds.R
#' @param N.hab.bern.par see help for mcmc_ds.R
#' @param Cov.par.n see help for mcmc_ds.R
#' @param Hab.pois.names see help for mcmc_ds.R
#' @param Hab.bern.names see help for mcmc_ds.R
#' @param Cov.names see help for mcmc_ds.R
#' @param Det.names see help for mcmc_ds.R
#' @param MisID.names see help for mcmc_ds.R
#' @param N.par.misID see help for mcmc_ds.R
#' @param misID.mat see help for mcmc_ds.R
#' @param misID see help for mcmc_ds.R
#' @param fix.tau.nu see help for mcmc_ds.R
#' @param spat.ind see help for mcmc_ds.R
#' @param point.ind see help for mcmc_ds.R
#' @export
#' @keywords MCMC, coda
#' @author Paul B. Conn
convert.HDS.to.mcmc<-function(MCMC,N.hab.pois.par,N.hab.bern.par,Cov.par.n,Hab.pois.names,Hab.bern.names,Det.names,Cov.names,MisID.names,N.par.misID=NULL,misID.mat=NULL,fix.tau.nu=FALSE,misID=TRUE,spat.ind=TRUE,point.ind=TRUE){
require(coda)
if(misID==TRUE & (is.null(N.par.misID)|is.null(misID.mat)))cat("\n Error: must provide N.par.misID and misID.mat whenever misID=TRUE \n")
i.ZIP=!is.na(N.hab.bern.par)[1]
n.species=nrow(MCMC$Hab.pois)
n.iter=length(MCMC$Hab.pois[1,,1])
n.col=n.species*2+sum(N.hab.pois.par)+ncol(MCMC$Det)+point.ind+(1-spat.ind)*n.species+(1-fix.tau.nu)*n.species+sum(Cov.par.n)*n.species+misID*sum(N.par.misID)
if(i.ZIP)n.col=n.col+sum(N.hab.bern.par)+(1-spat.ind)*n.species #for ZIP model
n.cells=dim(MCMC$G)[3]
Mat=matrix(0,n.iter,n.col)
Mat[,1:n.species]=t(MCMC$N.tot)
counter=n.species
col.names=paste("Abund.sp",c(1:n.species),sep='')
for(isp in 1:n.species){
Mat[,counter+isp]=rowSums(as.matrix(MCMC$G[isp,,],nrow=n.iter,ncol=n.cells)) #total abundance of groups
col.names=c(col.names,paste("Groups.sp",isp,sep=''))
}
counter=counter+n.species
for(isp in 1:n.species){ #habitat parameters
Mat[,(counter+1):(counter+N.hab.pois.par[isp])]=MCMC$Hab.pois[isp,,1:N.hab.pois.par[isp]]
col.names=c(col.names,paste("Hab.pois.sp",isp,Hab.pois.names[[isp]],sep=''))
counter=counter+sum(N.hab.pois.par[isp])
}
if(i.ZIP){
for(isp in 1:n.species){ #habitat parameters
Mat[,(counter+1):(counter+N.hab.bern.par[isp])]=MCMC$Hab.bern[isp,,1:N.hab.bern.par[isp]]
col.names=c(col.names,paste("Hab.bern.sp",isp,Hab.bern.names[[isp]],sep=''))
counter=counter+sum(N.hab.bern.par[isp])
}
}
Mat[,(counter+1):(counter+ncol(MCMC$Det))]=as.matrix(MCMC$Det)
col.names=c(col.names,paste("Det.",Det.names,sep=''))
counter=counter+ncol(MCMC$Det)
if(point.ind==TRUE){
Mat[,counter+1]=MCMC$cor
col.names=c(col.names,"rho")
counter=counter+1
}
if(spat.ind==FALSE){
Mat[,(counter+1):(counter+n.species)]=t(MCMC$tau.eta.pois)
col.names=c(col.names,paste("tau.eta.pois.sp",c(1:n.species),sep=''))
counter=counter+n.species
}
if(spat.ind==FALSE & i.ZIP){
Mat[,(counter+1):(counter+n.species)]=t(MCMC$tau.eta.bern)
col.names=c(col.names,paste("tau.eta.bern.sp",c(1:n.species),sep=''))
counter=counter+n.species
}
if(fix.tau.nu==FALSE){
Mat[,(counter+1):(counter+n.species)]=t(MCMC$tau.nu)
col.names=c(col.names,paste("tau.nu.sp",c(1:n.species),sep=''))
counter=counter+n.species
}
if(is.null(Cov.par.n)==FALSE){
max.par=max(Cov.par.n)
for(isp in 1:n.species){
for(ipar in 1:length(Cov.par.n)){
Mat[,(counter+1):(counter+Cov.par.n[ipar])]=MCMC$Cov.par[isp,,((ipar-1)*max.par+1):((ipar-1)*max.par+Cov.par.n[ipar])]
counter=counter+Cov.par.n[ipar]
col.names=c(col.names,paste("Cov.sp",isp,".",Cov.names[[ipar]],sep=''))
}
}
}
if(misID==TRUE){
for(imod in 1:max(misID.mat)){
Mat[,(counter+1):(counter+N.par.misID[imod])]=MCMC$MisID[[imod]]
counter=counter+N.par.misID[imod]
col.names=c(col.names,paste("misID.mod",imod,".",MisID.names[[imod]],sep=''))
}
}
colnames(Mat)=col.names
Mat=mcmc(Mat)
Mat
}
#' function to export posterior summaries from an mcmc object to a table
#' @aliases table.mcmc
#' @S3method table mcmc
#' @method table mcmc
#' @param MCMC An mcmc object with columns referencing different parameter types (column names are used for plotting labels)
#' @param file A file name to ouput to (including path); if null (default), outputs to screen
#' @param type What type of table to produce (either "csv" or "tex")
#' @param a Value to use for credible intervals. For example, alpha=0.05 results in 95\% credible intervals
#' @export
#' @keywords MCMC, table
#' @author Paul B. Conn
table.mcmc<-function(MCMC,file=NULL,type="csv",a=0.05){
require(xtable)
Out.tab=data.frame(matrix(0,ncol(MCMC),5))
colnames(Out.tab)=c("Parameter","Mean","Median","Lower","Upper")
MCMC=as.matrix(MCMC)
Out.tab[,1]=colnames(MCMC)
Out.tab[,2]=colMeans(MCMC)
Out.tab[,3]=apply(MCMC,2,'median')
Out.tab[,4]=apply(MCMC,2,'quantile',a/2)
Out.tab[,5]=apply(MCMC,2,'quantile',1-a/2)
if(is.null(file))print(Out.tab)
else{
if(type=="csv")write.csv(Out.tab,file=file)
if(type=="tex"){
Out.tab=xtable(Out.tab)
print(Out.tab,file=file)
}
if(type!="csv" & type!="tex")cat("\n Error: unknown table type. No table was printed to file.")
}
}
#' function to calculate posterior predictive loss given the output object from hierarchicalDS
#' @param Out Output object from running hierarchicalDS
#' @param burnin Any additional #'s of values from beginning of chain to discard before calculating PPL statistic (default is 0)
#' @return A matrix with posterior variance (P), sums of squares (G) for the posterior mean and median predictions (compared to Observations), and total posterior loss (D)
#' @export
#' @keywords Posterior predictive loss
#' @author Paul B. Conn
post_loss<-function(Out,burnin=0){
dims.Pred=dim(Out$Pred.det)
median.Pred=array(0,dim=dims.Pred[2:4])
mean.Pred=median.Pred
var.Pred=mean.Pred
for(itrans in 1:dims.Pred[2]){
for(isp1 in 1:dims.Pred[3]){
for(isp2 in 1:dims.Pred[4]){
median.Pred[itrans,isp1,isp2]=median(Out$Pred.det[(burnin+1):dims.Pred[1],itrans,isp1,isp2])
mean.Pred[itrans,isp1,isp2]=mean(Out$Pred.det[(burnin+1):dims.Pred[1],itrans,isp1,isp2])
var.Pred[itrans,isp1,isp2]=var(Out$Pred.det[(burnin+1):dims.Pred[1],itrans,isp1,isp2])
}
}
}
sum.sq.mean=sum((Out$Obs.det-mean.Pred)^2)
sum.sq.median=sum((Out$Obs.det-median.Pred)^2)
Loss=matrix(0,2,3)
colnames(Loss)=c("P","G","D")
rownames(Loss)=c("mean","median")
Loss[,1]=sum(var.Pred)
Loss[1,2]=sum.sq.mean
Loss[2,2]=sum.sq.median
Loss[,3]=rowSums(Loss[1:2,1:2])
Loss
}
#' MCMC output from running example in Hierarchical DS
#'
#' @name sim_out
#' @docType data
#' @author Paul Conn \email{paul.conn@@noaa.gov}
#' @keywords data
NULL |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/schedule_operations.R
\name{document_op_skip}
\alias{document_op_skip}
\title{Document if an operation scheduling did not result in an operation date}
\usage{
document_op_skip(op_skip, attribute_hru_i, mgt_j, prev_op, j_op, version)
}
\arguments{
\item{op_skip}{Tibble that documents the skipped operations}
\item{attribute_hru_i}{Tibble with one line that provides the static HRU}
\item{mgt_j}{j_th line of the mgt table that should be scheduled}
\item{prev_op}{Date of the previous opeation in ymd() format.}
\item{j_op}{index of the operation in the mgt schedule table.}
\item{version}{Text string that provides the SWAT version}
}
\description{
Document if an operation scheduling did not result in an operation date
}
\keyword{internal}
| /man/document_op_skip.Rd | no_license | chrisschuerz/SWATfarmR | R | false | true | 825 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/schedule_operations.R
\name{document_op_skip}
\alias{document_op_skip}
\title{Document if an operation scheduling did not result in an operation date}
\usage{
document_op_skip(op_skip, attribute_hru_i, mgt_j, prev_op, j_op, version)
}
\arguments{
\item{op_skip}{Tibble that documents the skipped operations}
\item{attribute_hru_i}{Tibble with one line that provides the static HRU}
\item{mgt_j}{j_th line of the mgt table that should be scheduled}
\item{prev_op}{Date of the previous opeation in ymd() format.}
\item{j_op}{index of the operation in the mgt schedule table.}
\item{version}{Text string that provides the SWAT version}
}
\description{
Document if an operation scheduling did not result in an operation date
}
\keyword{internal}
|
# Quick-11b
# based on mtp.gtxr0
# 2019-03-01
# Jiangtao Gou
# Example: mtp.quick11b(pvec.sorted=c(0.01, 0.06,0.3, 0.7, 0.8), alpha=0.61,TRUE)
#
mtp.quick11b <- function (pvec.sorted, alpha, gc.is.included=FALSE) {
if (gc.is.included) {
pkev$global.count.FS <- 0
pkev$global.count.IS <- 0
}
#
pvec.length <- length(pvec.sorted)
#
if (pvec.length >= 5) {
ca <- alpha*(pvec.length/2/(pvec.length-1) + alpha/12*(1 + 3/(pvec.length-1) + 2/(pvec.length-2)^2 -6/(pvec.length-1)/(pvec.length-2)^2))
} else {
ca <- alpha*(pvec.length/2/(pvec.length-1))
}
#
if (pvec.sorted[pvec.length] <= alpha) {
#
rej.idx <- pvec.length
#
return (list(rej.idx=rej.idx, init.count=1))
} # End of if
#
if (pvec.sorted[1] > ca) {
#
rej.idx <- 0
#
return (list(rej.idx=rej.idx, init.count=1))
} # End of if
#
det.idx <- BinarySearch(a=pvec.sorted[1:(pvec.length-1)], value=ca, low=1, high=pvec.length-1, gc.is.included, secondStage=FALSE)
#
#print(det.idx)
#
rej.idx <- BinarySearch(a=pvec.sorted[1:det.idx], value=alpha/(pvec.length-det.idx+1), low=1, high=det.idx, gc.is.included, secondStage=TRUE)
#
return (list(rej.idx=rej.idx, init.count=pkev$global.count.FS+1))
}
| /R/mtp_quick11b.R | no_license | cran/elitism | R | false | false | 1,245 | r | # Quick-11b
# based on mtp.gtxr0
# 2019-03-01
# Jiangtao Gou
# Example: mtp.quick11b(pvec.sorted=c(0.01, 0.06,0.3, 0.7, 0.8), alpha=0.61,TRUE)
#
mtp.quick11b <- function (pvec.sorted, alpha, gc.is.included=FALSE) {
if (gc.is.included) {
pkev$global.count.FS <- 0
pkev$global.count.IS <- 0
}
#
pvec.length <- length(pvec.sorted)
#
if (pvec.length >= 5) {
ca <- alpha*(pvec.length/2/(pvec.length-1) + alpha/12*(1 + 3/(pvec.length-1) + 2/(pvec.length-2)^2 -6/(pvec.length-1)/(pvec.length-2)^2))
} else {
ca <- alpha*(pvec.length/2/(pvec.length-1))
}
#
if (pvec.sorted[pvec.length] <= alpha) {
#
rej.idx <- pvec.length
#
return (list(rej.idx=rej.idx, init.count=1))
} # End of if
#
if (pvec.sorted[1] > ca) {
#
rej.idx <- 0
#
return (list(rej.idx=rej.idx, init.count=1))
} # End of if
#
det.idx <- BinarySearch(a=pvec.sorted[1:(pvec.length-1)], value=ca, low=1, high=pvec.length-1, gc.is.included, secondStage=FALSE)
#
#print(det.idx)
#
rej.idx <- BinarySearch(a=pvec.sorted[1:det.idx], value=alpha/(pvec.length-det.idx+1), low=1, high=det.idx, gc.is.included, secondStage=TRUE)
#
return (list(rej.idx=rej.idx, init.count=pkev$global.count.FS+1))
}
|
# Stacked calibrations of soil compositional properties with Alpha-MIR spectra
# M. Walsh, October 2019
# Required packages -------------------------------------------------------
is.installed <- function(pkg) {is.element(pkg, installed.packages()[,1] )}
packages <- c("devtools","caret","pls","glmnet","randomForest","gbm","Cubist","bartMachine","plyr","doParallel")
install <- which(!is.installed(packages) == TRUE)
if (length(install) > 0) {install.packages(packages[install] )}
suppressPackageStartupMessages ({
require(devtools)
require(caret)
require(pls)
require(glmnet)
require(randomForest)
require(gbm)
require(Cubist)
require(bartMachine)
require(plyr)
require(doParallel) })
# Data setup --------------------------------------------------------------
# Run this first: https://github.com/mgwalsh/Soils/blob/master/Alpha_recal_data.R
# ... or
# source_https <- function(url, ...) {
# # load package
# require(RCurl)
# # parse and evaluate .R script
# sapply(c(url, ...), function(u) {
# eval(parse(text = getURL(u, followlocation = TRUE, cainfo = system.file("CurlSSL", "cacert.pem", package = "RCurl"))), envir = .GlobalEnv)
# })
# }
# source_https("https://github.com/mgwalsh/Soils/blob/master/Alpha_recal_data.R")
rm(list=setdiff(ls(), c("nbal"))) ## scrubs extraneous objects in memory
# set randomization seed
seed <- 1385321
set.seed(seed)
# split data into calibration and validation sets
gsIndex <- createDataPartition(nbal$Fv, p = 8/10, list=F, times = 1)
cal <- nbal[ gsIndex,]
val <- nbal[-gsIndex,]
# calibration labels
labs <- c("C") ## insert other labels (N,P,K ...) here!
lcal <- as.vector(t(cal[labs]))
# spectral calibration features
fcal <- cal[,15:1728]
fpca <- cal[,1729:1748] ## PCA variables
# PLS <pls> --------------------------------------------------------------
# start doParallel to parallelize model fitting
mc <- makeCluster(detectCores())
registerDoParallel(mc)
# control setup
set.seed(seed)
tc <- trainControl(method="repeatedcv", number=10, repeats=3, allowParallel=T)
tg <- expand.grid(ncomp=seq(2,80, by=2)) ## model tuning steps
pl <- train(fcal, lcal,
method = "pls",
preProc = c("center", "scale"),
tuneGrid = tg,
trControl = tc)
print(pl)
stopCluster(mc)
fname <- paste("./Results/", labs, "_pl.rds", sep = "")
saveRDS(pl, fname)
# Elastic net <glmnet> ----------------------------------------------------
# start doParallel to parallelize model fitting
mc <- makeCluster(detectCores())
registerDoParallel(mc)
# control setup
set.seed(seed)
tc <- trainControl(method="cv", allowParallel=T)
tg <- expand.grid(alpha = 0:1, lambda = seq(0.0001, 1, length = 10))
# model training
en <- train(fcal, lcal,
method = "glmnet",
preProc = c("center", "scale"),
family = "gaussian",
tuneGrid = tg,
trControl = tc)
print(en)
stopCluster(mc)
fname <- paste("./Results/", labs, "_en.rds", sep = "")
saveRDS(en, fname)
# Random forest <randomForest> --------------------------------------------
# Random forest with spectral PCA covariates
# start doParallel to parallelize model fitting
mc <- makeCluster(detectCores())
registerDoParallel(mc)
# control setup
set.seed(seed)
tc <- trainControl(method="cv", allowParallel=T)
tg <- expand.grid(mtry = seq(2,20, by=2)) ## model tuning
# model training
rf <- train(fpca, lcal,
method = "rf",
ntree = 501,
tuneGrid = tg,
trControl = tc)
print(rf)
stopCluster(mc)
fname <- paste("./Results/", labs, "_rf.rds", sep = "")
saveRDS(rf, fname)
# Generalized boosting <gbm> ----------------------------------------------
# Generalized boosting with spectral PCA variables
# start doParallel to parallelize model fitting
mc <- makeCluster(detectCores())
registerDoParallel(mc)
# control setup
set.seed(seed)
tc <- trainControl(method = "cv", allowParallel = T)
tg <- expand.grid(interaction.depth = seq(2,20, by=2), shrinkage = seq(0.02,0.1, by=0.02), n.trees = 501,
n.minobsinnode = 25) ## model tuning steps
gb <- train(fpca, lcal,
method = "gbm",
trControl = tc,
tuneGrid = tg)
print(gb)
stopCluster(mc)
fname <- paste("./Results/", labs, "_gb.rds", sep = "")
saveRDS(gb, fname)
# Cubist <Cubist> ---------------------------------------------------------
# Cubist with spectral PCA variables
# start doParallel to parallelize model fitting
mc <- makeCluster(detectCores())
registerDoParallel(mc)
# control setup
set.seed(seed)
tc <- trainControl(method="repeatedcv", number=10, repeats=3, allowParallel = T)
# tg <- needs tuning
cu <- train(fpca, lcal,
method = "cubist",
trControl = tc)
print(cu)
stopCluster(mc)
fname <- paste("./Results/", labs, "_cu.rds", sep = "")
saveRDS(cu, fname)
# BART <bartMachine> ------------------------------------------------------
# bartMachine with spectral PCA variables
# start doParallel to parallelize model fitting
mc <- makeCluster(detectCores())
registerDoParallel(mc)
# control setup
set.seed(seed)
tc <- trainControl(method="cv", 5, allowParallel = T)
# tg <- needs tuning
bm <- train(fpca, lcal,
method = "bartMachine",
trControl = tc)
print(bm)
stopCluster(mc)
fname <- paste("./Results/", labs, "_bm.rds", sep = "")
saveRDS(bm, fname)
# Stacking <glm> ----------------------------------------------------------
# validation-set labels
lval <- as.vector(t(val[labs]))
# validation-set features
fval <- val[,15:1728]
fpca <- val[,1729:1748] ## PCA variables
# validation set predictions
pl.pred <- predict(pl, fval)
en.pred <- predict(en, fval)
rf.pred <- predict(rf, fpca)
gb.pred <- predict(gb, fpca)
cu.pred <- predict(cu, fpca)
bm.pred <- predict(bm, fpca)
stack <- as.data.frame(cbind(pl.pred,en.pred,rf.pred,gb.pred,cu.pred,bm.pred))
names(stack) <- c("pl","en","rf","gb","cu","bm")
# fit stack with cross-validation
# start doParallel to parallelize model fitting
mc <- makeCluster(detectCores())
registerDoParallel(mc)
# model setup
set.seed(seed)
tc <- trainControl(method="repeatedcv", number=10, repeats=3, allowParallel=T)
st <- train(stack, lval,
method = "glmStepAIC",
trControl = tc)
print(st)
summary(st)
stopCluster(mc)
fname <- paste("./Results/", labs, "_st.rds", sep = "")
saveRDS(st, fname)
# write validation-set predictions
st.pred <- predict(st, stack)
preds <- cbind(lval, stack, st.pred)
names(preds) <- c(labs,"pl","en","rf","gb","cu","bm","st")
fname <- paste("./Results/", labs, "_preds.csv", sep = "")
write.csv(preds, fname)
| /Alpha_ens_preds.R | no_license | mgwalsh/Soils | R | false | false | 6,636 | r | # Stacked calibrations of soil compositional properties with Alpha-MIR spectra
# M. Walsh, October 2019
# Required packages -------------------------------------------------------
is.installed <- function(pkg) {is.element(pkg, installed.packages()[,1] )}
packages <- c("devtools","caret","pls","glmnet","randomForest","gbm","Cubist","bartMachine","plyr","doParallel")
install <- which(!is.installed(packages) == TRUE)
if (length(install) > 0) {install.packages(packages[install] )}
suppressPackageStartupMessages ({
require(devtools)
require(caret)
require(pls)
require(glmnet)
require(randomForest)
require(gbm)
require(Cubist)
require(bartMachine)
require(plyr)
require(doParallel) })
# Data setup --------------------------------------------------------------
# Run this first: https://github.com/mgwalsh/Soils/blob/master/Alpha_recal_data.R
# ... or
# source_https <- function(url, ...) {
# # load package
# require(RCurl)
# # parse and evaluate .R script
# sapply(c(url, ...), function(u) {
# eval(parse(text = getURL(u, followlocation = TRUE, cainfo = system.file("CurlSSL", "cacert.pem", package = "RCurl"))), envir = .GlobalEnv)
# })
# }
# source_https("https://github.com/mgwalsh/Soils/blob/master/Alpha_recal_data.R")
rm(list=setdiff(ls(), c("nbal"))) ## scrubs extraneous objects in memory
# set randomization seed
seed <- 1385321
set.seed(seed)
# split data into calibration and validation sets
gsIndex <- createDataPartition(nbal$Fv, p = 8/10, list=F, times = 1)
cal <- nbal[ gsIndex,]
val <- nbal[-gsIndex,]
# calibration labels
labs <- c("C") ## insert other labels (N,P,K ...) here!
lcal <- as.vector(t(cal[labs]))
# spectral calibration features
fcal <- cal[,15:1728]
fpca <- cal[,1729:1748] ## PCA variables
# PLS <pls> --------------------------------------------------------------
# start doParallel to parallelize model fitting
mc <- makeCluster(detectCores())
registerDoParallel(mc)
# control setup
set.seed(seed)
tc <- trainControl(method="repeatedcv", number=10, repeats=3, allowParallel=T)
tg <- expand.grid(ncomp=seq(2,80, by=2)) ## model tuning steps
pl <- train(fcal, lcal,
method = "pls",
preProc = c("center", "scale"),
tuneGrid = tg,
trControl = tc)
print(pl)
stopCluster(mc)
fname <- paste("./Results/", labs, "_pl.rds", sep = "")
saveRDS(pl, fname)
# Elastic net <glmnet> ----------------------------------------------------
# start doParallel to parallelize model fitting
mc <- makeCluster(detectCores())
registerDoParallel(mc)
# control setup
set.seed(seed)
tc <- trainControl(method="cv", allowParallel=T)
tg <- expand.grid(alpha = 0:1, lambda = seq(0.0001, 1, length = 10))
# model training
en <- train(fcal, lcal,
method = "glmnet",
preProc = c("center", "scale"),
family = "gaussian",
tuneGrid = tg,
trControl = tc)
print(en)
stopCluster(mc)
fname <- paste("./Results/", labs, "_en.rds", sep = "")
saveRDS(en, fname)
# Random forest <randomForest> --------------------------------------------
# Random forest with spectral PCA covariates
# start doParallel to parallelize model fitting
mc <- makeCluster(detectCores())
registerDoParallel(mc)
# control setup
set.seed(seed)
tc <- trainControl(method="cv", allowParallel=T)
tg <- expand.grid(mtry = seq(2,20, by=2)) ## model tuning
# model training
rf <- train(fpca, lcal,
method = "rf",
ntree = 501,
tuneGrid = tg,
trControl = tc)
print(rf)
stopCluster(mc)
fname <- paste("./Results/", labs, "_rf.rds", sep = "")
saveRDS(rf, fname)
# Generalized boosting <gbm> ----------------------------------------------
# Generalized boosting with spectral PCA variables
# start doParallel to parallelize model fitting
mc <- makeCluster(detectCores())
registerDoParallel(mc)
# control setup
set.seed(seed)
tc <- trainControl(method = "cv", allowParallel = T)
tg <- expand.grid(interaction.depth = seq(2,20, by=2), shrinkage = seq(0.02,0.1, by=0.02), n.trees = 501,
n.minobsinnode = 25) ## model tuning steps
gb <- train(fpca, lcal,
method = "gbm",
trControl = tc,
tuneGrid = tg)
print(gb)
stopCluster(mc)
fname <- paste("./Results/", labs, "_gb.rds", sep = "")
saveRDS(gb, fname)
# Cubist <Cubist> ---------------------------------------------------------
# Cubist with spectral PCA variables
# start doParallel to parallelize model fitting
mc <- makeCluster(detectCores())
registerDoParallel(mc)
# control setup
set.seed(seed)
tc <- trainControl(method="repeatedcv", number=10, repeats=3, allowParallel = T)
# tg <- needs tuning
cu <- train(fpca, lcal,
method = "cubist",
trControl = tc)
print(cu)
stopCluster(mc)
fname <- paste("./Results/", labs, "_cu.rds", sep = "")
saveRDS(cu, fname)
# BART <bartMachine> ------------------------------------------------------
# bartMachine with spectral PCA variables
# start doParallel to parallelize model fitting
mc <- makeCluster(detectCores())
registerDoParallel(mc)
# control setup
set.seed(seed)
tc <- trainControl(method="cv", 5, allowParallel = T)
# tg <- needs tuning
bm <- train(fpca, lcal,
method = "bartMachine",
trControl = tc)
print(bm)
stopCluster(mc)
fname <- paste("./Results/", labs, "_bm.rds", sep = "")
saveRDS(bm, fname)
# Stacking <glm> ----------------------------------------------------------
# validation-set labels
lval <- as.vector(t(val[labs]))
# validation-set features
fval <- val[,15:1728]
fpca <- val[,1729:1748] ## PCA variables
# validation set predictions
pl.pred <- predict(pl, fval)
en.pred <- predict(en, fval)
rf.pred <- predict(rf, fpca)
gb.pred <- predict(gb, fpca)
cu.pred <- predict(cu, fpca)
bm.pred <- predict(bm, fpca)
stack <- as.data.frame(cbind(pl.pred,en.pred,rf.pred,gb.pred,cu.pred,bm.pred))
names(stack) <- c("pl","en","rf","gb","cu","bm")
# fit stack with cross-validation
# start doParallel to parallelize model fitting
mc <- makeCluster(detectCores())
registerDoParallel(mc)
# model setup
set.seed(seed)
tc <- trainControl(method="repeatedcv", number=10, repeats=3, allowParallel=T)
st <- train(stack, lval,
method = "glmStepAIC",
trControl = tc)
print(st)
summary(st)
stopCluster(mc)
fname <- paste("./Results/", labs, "_st.rds", sep = "")
saveRDS(st, fname)
# write validation-set predictions
st.pred <- predict(st, stack)
preds <- cbind(lval, stack, st.pred)
names(preds) <- c(labs,"pl","en","rf","gb","cu","bm","st")
fname <- paste("./Results/", labs, "_preds.csv", sep = "")
write.csv(preds, fname)
|
aggregate_rows <- function(df.in, agg.var){
df.in.data <- df.in[-dim(df.in)[2]]
df.in.data <- apply(df.in.data, 2, as.numeric)
df.in.agg <- aggregate(df.in.data, list(agg.var), FUN=mean)
rownames(df.in.agg) <- df.in.agg$Group.1
df.in.agg <- df.in.agg[-1] ### This is our final DF
return(df.in.agg)
} | /scripts/aggregate_rows.R | no_license | ruwaa-mohamed/TCGA-BRCA | R | false | false | 313 | r | aggregate_rows <- function(df.in, agg.var){
df.in.data <- df.in[-dim(df.in)[2]]
df.in.data <- apply(df.in.data, 2, as.numeric)
df.in.agg <- aggregate(df.in.data, list(agg.var), FUN=mean)
rownames(df.in.agg) <- df.in.agg$Group.1
df.in.agg <- df.in.agg[-1] ### This is our final DF
return(df.in.agg)
} |
\alias{gdkScreenGetRootWindow}
\name{gdkScreenGetRootWindow}
\title{gdkScreenGetRootWindow}
\description{Gets the root window of \code{screen}.}
\usage{gdkScreenGetRootWindow(object)}
\arguments{\item{\code{object}}{[\code{\link{GdkScreen}}] a \code{\link{GdkScreen}}}}
\details{ Since 2.2}
\value{[\code{\link{GdkWindow}}] the root window}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
| /man/gdkScreenGetRootWindow.Rd | no_license | cran/RGtk2.10 | R | false | false | 416 | rd | \alias{gdkScreenGetRootWindow}
\name{gdkScreenGetRootWindow}
\title{gdkScreenGetRootWindow}
\description{Gets the root window of \code{screen}.}
\usage{gdkScreenGetRootWindow(object)}
\arguments{\item{\code{object}}{[\code{\link{GdkScreen}}] a \code{\link{GdkScreen}}}}
\details{ Since 2.2}
\value{[\code{\link{GdkWindow}}] the root window}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
|
#' BollingerBandBacktest Class
#'
#' Used to Backtest a Bollinger Band strategy with bar data
#' NOTE: Some variables may be legacy.
#' @param symbol Symbol
#' @param start.date Start Date
#' @param end.date End Date
#' @param granularity Granularity (in minutes, example: 15 = using 15 minute bars)
#' @param period Number of Bars used to create the Bollinger Bands
#' @param entry.thres Number of standard deviations used for entry signal
#' @param exit.thres Number of standard deviations used for exit signal
#' @param timeout Number of bars that if exeeded since trade entry, will cause the Trade to be exited
#' @param emode Entry mode (FALSE: enter position on signal, TRUE: enter position on next candle if candle is on uptick)
#' @param shares Quantity to trade
#' @param TickData username
#' @param TickData password
#' @return .Object Copy of the created object
#' @export
Backtest <- setClass(
"BollingerBandBacktest",
slots = c(
NAME = "character",
period = "integer",
entry.thres = "numeric",
exit.thres = "numeric",
timeout = "integer",
entry.mode = "logical",
shares = "integer",
duration = "integer",
wait.uptick.short = "logical",
wait.uptick.long = "logical",
bar = "BarTimeSeries",
symbol = "character",
position = "Position",
username = "character",
password = "character"
),
prototype = list(
NAME = "BollingerBandBacktest"
),
contains = "Backtest"
)
setMethod("initialize", "BollingerBandBacktest", function(.Object, symbol, start.date, end.date, granularity,
period, entry.thres , exit.thres, timeout, emode, shares, username, password){
#Initialize the base backtest class
#All BTs need this line and these variables fed in
.Object <- callNextMethod(.Object, start.date, end.date, period,username,password)
#Set timeseries and positions in backtester
#A BTs require these lines, symbol can be a list of symbols eg. (BAC:US,MHD:US)
.Object <- AddTimeSeries(.Object, symbol, granularity)
.Object <- AddPositions(.Object, symbol)
#This specific strategy's variables
.Object@symbol <- symbol#We do BB test on one symbol
.Object@period <- period#Period we require for BB calc (this sets static queue size)
.Object@entry.thres <- entry.thres #BB param
.Object@exit.thres <- exit.thres #BB param
.Object@timeout <- timeout #BB param
.Object@entry.mode <- emode #BB param
.Object@shares <- shares #Number of shares to trade
#Next thing user should call is Start() (Source in Backtest.R)
#This will:
#0. Initialize the BT
#1. Check if timestamp is within market hour and that there is new data
#2. If so then advance timestamp of all symbols (synchronously) and call Evaluate
#3. Record the equity and go to 1.
#4. Call Finalize and plot equity curve
#Evaluate, which by default in the generic method does nothing.
#Evaluate must be defined here in this class.
return(.Object)
})
#' Initialize BollingerBandBacktest
#' @param Obj BollingerBandBacktest object
#' @return Copy the BollingerBandBacktest object
setMethod(f="Initialize",
signature="BollingerBandBacktest",
definition=function(Obj){
Obj@duration <- 0L
Obj@wait.uptick.short = FALSE
Obj@wait.uptick.long = FALSE
return (Obj)
})
#' Evaluate BollingerBandBacktest
#' This method gets called at every in-market tick and the user
#' is able to pull all desired data to make a choice
#' Which he does in terms of placing trades.
#'
#' @param Obj BollingerBandBacktest object
#' @return Copy the BollingerBandBacktest object
setMethod(f="Evaluate",
signature="BollingerBandBacktest",
definition=function(Obj){
Obj@bar <- GetTimeSeries(Obj,Obj@symbol)# BarTS for our symbol
Obj@position <- GetPosition(Obj,Obj@symbol)# Position object for our symbol
#Have we waited long enough to fill queues? Queue are of length given when we initialized parent Backtest object
if (!isFilled(Obj@bar)){
return (Obj)
}
if(Volume(Obj@bar) <= 0 || Close(Obj@bar) <= 0){#Ticks with zero volume or price? Skip.
print(paste("Skipped:",ToString(Obj@bar)))
return(Obj)
}
#We can get lists of historical ticks ordered from oldest to newest data
#They are of length given when we initialized parent Backtest object
#If we haven't advanced far enough to fill the queue then list contains NAs e.g. (na,na,1,2,3)
closes <- Closes(Obj@bar)
#Timestamps, Opens, Highs, Lows, and Volumes can be loaded similarly.
#timestamps <- Timestamps(Obj@bar) #Maybe want try a time weighting?
#We can also get the most recent Close, Open, High, Low, etc.
#'Close(bar)' returns the same things as 'tail(Closes(bar),n=1)'
#i.e. the last item in the queue is the newest
#typical <- Open(Obj@bar) #Information we should have in reality but performs worse than below
typical <- (High(Obj@bar) + Open(Obj@bar) + Low(Obj@bar))/3 #Typical price within bar for current data point
maind <- mean(c(closes[1:Obj@period-1],typical))#We choose to replace the last point with typical
varstdev <- sd(c(closes[1:Obj@period-1],typical))
#Calculate BBs
upper.band.entry <- maind + (Obj@entry.thres * varstdev)
upper.band.exit <- maind + (Obj@exit.thres * varstdev)
lower.band.entry <- maind - (Obj@entry.thres * varstdev)
lower.band.exit <- maind - (Obj@exit.thres * varstdev)
#We update the unrealized gains of our position before doing any action
Obj@position <- CalcPl(Obj@position, Close(Obj@bar))
cur.time.stamp <- Timestamp(Obj@bar)#This is the timestamp of the last tick
#Standard BB now.
#If we have any shares 'GetPositionSize(Obj@position) > 0' and price above exit, then place a Trade
if(GetPositionSize(Obj@position) > 0 && Close(Obj@bar) >= lower.band.exit){
#Note: At the moment placing a trade guarantees it goes through at the specified price which may not reflect real life
#Here we chose whatever the close is.
#but with a bid-ask spread it might be lower for sells (and higher for buys) of longs.
#Feel free to do implement that in your strategy :)
Obj@position <- Trade(Obj@position, cur.time.stamp, -1L * GetPositionSize(Obj@position), Close(Obj@bar))
Obj@duration <- 0L#If this parameter gets too large we exit our position, see below.
print(paste("(Exit long) ", cur.time.stamp," lower.band.exit: " , lower.band.exit , "price: " , Close(Obj@bar),
", pnl: ", GetRealized(Obj@position) + GetUnrealized(Obj@position)))
}
if(GetPositionSize(Obj@position) < 0 && Close(Obj@bar) <= upper.band.exit){
Obj@position <- Trade(Obj@position, cur.time.stamp, -1L * GetPositionSize(Obj@position), Close(Obj@bar))
Obj@duration <- 0L
print(paste("(Exit short) ", cur.time.stamp," upper.band.exit: " , upper.band.exit , "price: " , Close(Obj@bar),", pnl: ",
GetRealized(Obj@position) + GetUnrealized(Obj@position)))
}
#As a test to yourself, see if you can understand what wait.uptick.short does.
if(GetPositionSize(Obj@position) == 0 && Close(Obj@bar) >= upper.band.entry){
if(Obj@entry.mode && !Obj@wait.uptick.short){
Obj@wait.uptick.short <- TRUE
}else{
Obj@position <- Trade(Obj@position, cur.time.stamp, -1L * Obj@shares, Close(Obj@bar))
Obj@wait.uptick.short <- FALSE
print(paste("(Enter short) ", cur.time.stamp," upper.band.entry: " , upper.band.entry , "price: " , Close(Obj@bar),", pnl: ",
GetRealized(Obj@position) + GetUnrealized(Obj@position)))
}
}
if(GetPositionSize(Obj@position) == 0 && Close(Obj@bar) <= lower.band.entry){
if(Obj@entry.mode&& !Obj@wait.uptick.long){
Obj@wait.uptick.long <- TRUE
}else{
Obj@position <- Trade(Obj@position, cur.time.stamp, Obj@shares, Close(Obj@bar))
Obj@wait.uptick.long <- FALSE
print(paste("(Enter long) ", cur.time.stamp," lower.band.entry: " , lower.band.entry , "price: " , Close(Obj@bar),
", pnl: ", GetRealized(Obj@position) + GetUnrealized(Obj@position)))
}
}
if(GetPositionSize(Obj@position) != 0){
Obj@duration <- Obj@duration + 1L
}
#Exit after a number of ticks
if(Obj@duration > Obj@timeout){
Obj@position <- Trade(Obj@position, cur.time.stamp, -1L * GetPositionSize(Obj@position), Close(Obj@bar))
Obj@duration <- 0L
}
#Tell base backtester of your positions and timeseries if you've made adjustments to them (best practice to always do this)
Obj<-SetPosition(Obj,Obj@symbol,Obj@position)
Obj<-SetTimeSeries(Obj,Obj@symbol,Obj@bar)
return (Obj)
})
#' Finalize BollingerBandBacktest
#' @param Obj BollingerBandBacktest object
#' @return Copy the BollingerBandBacktest object
setMethod(f="Finalize",
signature="BollingerBandBacktest",
definition=function(Obj){
#Obj@position <- Trade(Obj@position, Timestamp(Obj@bar), -1L * GetPositionSize(Obj@position), Close(Obj@bar))
print( paste("date: ", Obj@equity.time[length(Obj@equity.time)]," PnL: ", Obj@equity.value[length(Obj@equity.value)]))
print( paste("pnl: ", GetRealized(Obj@position) + GetUnrealized(Obj@position)))
return (Obj)
})
#' Starts Market Session
#'
#' Simulates the start of the trading day
#' @param Obj BollingerBandBacktest object
#' @return Copy the BollingerBandBacktest object
#' @export
setGeneric(name="StartSession",def=function(Obj){standardGeneric("StartSession")})
setMethod(f="StartSession",
signature="BollingerBandBacktest",
definition=function(Obj)
{
print('Start of day!')
return(Obj)
})
#' Ends Market Session
#'
#' Simulates the end of the trading day
#' @param Obj BollingerBandBacktest object
#' @return Copy the BollingerBandBacktest object
#' @export
setGeneric(name="EndSession",def=function(Obj){standardGeneric("EndSession")})
setMethod(f="EndSession",
signature="BollingerBandBacktest",
definition=function(Obj)
{
print('End of day!')
return(Obj)
})
| /Release/com.tactico.backtest/R/04BollingerBandBacktest.R | no_license | tactico/RTickDataBacktest | R | false | false | 11,143 | r | #' BollingerBandBacktest Class
#'
#' Used to Backtest a Bollinger Band strategy with bar data
#' NOTE: Some variables may be legacy.
#' @param symbol Symbol
#' @param start.date Start Date
#' @param end.date End Date
#' @param granularity Granularity (in minutes, example: 15 = using 15 minute bars)
#' @param period Number of Bars used to create the Bollinger Bands
#' @param entry.thres Number of standard deviations used for entry signal
#' @param exit.thres Number of standard deviations used for exit signal
#' @param timeout Number of bars that if exeeded since trade entry, will cause the Trade to be exited
#' @param emode Entry mode (FALSE: enter position on signal, TRUE: enter position on next candle if candle is on uptick)
#' @param shares Quantity to trade
#' @param TickData username
#' @param TickData password
#' @return .Object Copy of the created object
#' @export
Backtest <- setClass(
"BollingerBandBacktest",
slots = c(
NAME = "character",
period = "integer",
entry.thres = "numeric",
exit.thres = "numeric",
timeout = "integer",
entry.mode = "logical",
shares = "integer",
duration = "integer",
wait.uptick.short = "logical",
wait.uptick.long = "logical",
bar = "BarTimeSeries",
symbol = "character",
position = "Position",
username = "character",
password = "character"
),
prototype = list(
NAME = "BollingerBandBacktest"
),
contains = "Backtest"
)
setMethod("initialize", "BollingerBandBacktest", function(.Object, symbol, start.date, end.date, granularity,
period, entry.thres , exit.thres, timeout, emode, shares, username, password){
#Initialize the base backtest class
#All BTs need this line and these variables fed in
.Object <- callNextMethod(.Object, start.date, end.date, period,username,password)
#Set timeseries and positions in backtester
#A BTs require these lines, symbol can be a list of symbols eg. (BAC:US,MHD:US)
.Object <- AddTimeSeries(.Object, symbol, granularity)
.Object <- AddPositions(.Object, symbol)
#This specific strategy's variables
.Object@symbol <- symbol#We do BB test on one symbol
.Object@period <- period#Period we require for BB calc (this sets static queue size)
.Object@entry.thres <- entry.thres #BB param
.Object@exit.thres <- exit.thres #BB param
.Object@timeout <- timeout #BB param
.Object@entry.mode <- emode #BB param
.Object@shares <- shares #Number of shares to trade
#Next thing user should call is Start() (Source in Backtest.R)
#This will:
#0. Initialize the BT
#1. Check if timestamp is within market hour and that there is new data
#2. If so then advance timestamp of all symbols (synchronously) and call Evaluate
#3. Record the equity and go to 1.
#4. Call Finalize and plot equity curve
#Evaluate, which by default in the generic method does nothing.
#Evaluate must be defined here in this class.
return(.Object)
})
#' Initialize BollingerBandBacktest
#' @param Obj BollingerBandBacktest object
#' @return Copy the BollingerBandBacktest object
setMethod(f="Initialize",
signature="BollingerBandBacktest",
definition=function(Obj){
Obj@duration <- 0L
Obj@wait.uptick.short = FALSE
Obj@wait.uptick.long = FALSE
return (Obj)
})
#' Evaluate BollingerBandBacktest
#' This method gets called at every in-market tick and the user
#' is able to pull all desired data to make a choice
#' Which he does in terms of placing trades.
#'
#' @param Obj BollingerBandBacktest object
#' @return Copy the BollingerBandBacktest object
setMethod(f="Evaluate",
signature="BollingerBandBacktest",
definition=function(Obj){
Obj@bar <- GetTimeSeries(Obj,Obj@symbol)# BarTS for our symbol
Obj@position <- GetPosition(Obj,Obj@symbol)# Position object for our symbol
#Have we waited long enough to fill queues? Queue are of length given when we initialized parent Backtest object
if (!isFilled(Obj@bar)){
return (Obj)
}
if(Volume(Obj@bar) <= 0 || Close(Obj@bar) <= 0){#Ticks with zero volume or price? Skip.
print(paste("Skipped:",ToString(Obj@bar)))
return(Obj)
}
#We can get lists of historical ticks ordered from oldest to newest data
#They are of length given when we initialized parent Backtest object
#If we haven't advanced far enough to fill the queue then list contains NAs e.g. (na,na,1,2,3)
closes <- Closes(Obj@bar)
#Timestamps, Opens, Highs, Lows, and Volumes can be loaded similarly.
#timestamps <- Timestamps(Obj@bar) #Maybe want try a time weighting?
#We can also get the most recent Close, Open, High, Low, etc.
#'Close(bar)' returns the same things as 'tail(Closes(bar),n=1)'
#i.e. the last item in the queue is the newest
#typical <- Open(Obj@bar) #Information we should have in reality but performs worse than below
typical <- (High(Obj@bar) + Open(Obj@bar) + Low(Obj@bar))/3 #Typical price within bar for current data point
maind <- mean(c(closes[1:Obj@period-1],typical))#We choose to replace the last point with typical
varstdev <- sd(c(closes[1:Obj@period-1],typical))
#Calculate BBs
upper.band.entry <- maind + (Obj@entry.thres * varstdev)
upper.band.exit <- maind + (Obj@exit.thres * varstdev)
lower.band.entry <- maind - (Obj@entry.thres * varstdev)
lower.band.exit <- maind - (Obj@exit.thres * varstdev)
#We update the unrealized gains of our position before doing any action
Obj@position <- CalcPl(Obj@position, Close(Obj@bar))
cur.time.stamp <- Timestamp(Obj@bar)#This is the timestamp of the last tick
#Standard BB now.
#If we have any shares 'GetPositionSize(Obj@position) > 0' and price above exit, then place a Trade
if(GetPositionSize(Obj@position) > 0 && Close(Obj@bar) >= lower.band.exit){
#Note: At the moment placing a trade guarantees it goes through at the specified price which may not reflect real life
#Here we chose whatever the close is.
#but with a bid-ask spread it might be lower for sells (and higher for buys) of longs.
#Feel free to do implement that in your strategy :)
Obj@position <- Trade(Obj@position, cur.time.stamp, -1L * GetPositionSize(Obj@position), Close(Obj@bar))
Obj@duration <- 0L#If this parameter gets too large we exit our position, see below.
print(paste("(Exit long) ", cur.time.stamp," lower.band.exit: " , lower.band.exit , "price: " , Close(Obj@bar),
", pnl: ", GetRealized(Obj@position) + GetUnrealized(Obj@position)))
}
if(GetPositionSize(Obj@position) < 0 && Close(Obj@bar) <= upper.band.exit){
Obj@position <- Trade(Obj@position, cur.time.stamp, -1L * GetPositionSize(Obj@position), Close(Obj@bar))
Obj@duration <- 0L
print(paste("(Exit short) ", cur.time.stamp," upper.band.exit: " , upper.band.exit , "price: " , Close(Obj@bar),", pnl: ",
GetRealized(Obj@position) + GetUnrealized(Obj@position)))
}
#As a test to yourself, see if you can understand what wait.uptick.short does.
if(GetPositionSize(Obj@position) == 0 && Close(Obj@bar) >= upper.band.entry){
if(Obj@entry.mode && !Obj@wait.uptick.short){
Obj@wait.uptick.short <- TRUE
}else{
Obj@position <- Trade(Obj@position, cur.time.stamp, -1L * Obj@shares, Close(Obj@bar))
Obj@wait.uptick.short <- FALSE
print(paste("(Enter short) ", cur.time.stamp," upper.band.entry: " , upper.band.entry , "price: " , Close(Obj@bar),", pnl: ",
GetRealized(Obj@position) + GetUnrealized(Obj@position)))
}
}
if(GetPositionSize(Obj@position) == 0 && Close(Obj@bar) <= lower.band.entry){
if(Obj@entry.mode&& !Obj@wait.uptick.long){
Obj@wait.uptick.long <- TRUE
}else{
Obj@position <- Trade(Obj@position, cur.time.stamp, Obj@shares, Close(Obj@bar))
Obj@wait.uptick.long <- FALSE
print(paste("(Enter long) ", cur.time.stamp," lower.band.entry: " , lower.band.entry , "price: " , Close(Obj@bar),
", pnl: ", GetRealized(Obj@position) + GetUnrealized(Obj@position)))
}
}
if(GetPositionSize(Obj@position) != 0){
Obj@duration <- Obj@duration + 1L
}
#Exit after a number of ticks
if(Obj@duration > Obj@timeout){
Obj@position <- Trade(Obj@position, cur.time.stamp, -1L * GetPositionSize(Obj@position), Close(Obj@bar))
Obj@duration <- 0L
}
#Tell base backtester of your positions and timeseries if you've made adjustments to them (best practice to always do this)
Obj<-SetPosition(Obj,Obj@symbol,Obj@position)
Obj<-SetTimeSeries(Obj,Obj@symbol,Obj@bar)
return (Obj)
})
#' Finalize BollingerBandBacktest
#' @param Obj BollingerBandBacktest object
#' @return Copy the BollingerBandBacktest object
setMethod(f="Finalize",
signature="BollingerBandBacktest",
definition=function(Obj){
#Obj@position <- Trade(Obj@position, Timestamp(Obj@bar), -1L * GetPositionSize(Obj@position), Close(Obj@bar))
print( paste("date: ", Obj@equity.time[length(Obj@equity.time)]," PnL: ", Obj@equity.value[length(Obj@equity.value)]))
print( paste("pnl: ", GetRealized(Obj@position) + GetUnrealized(Obj@position)))
return (Obj)
})
#' Starts Market Session
#'
#' Simulates the start of the trading day
#' @param Obj BollingerBandBacktest object
#' @return Copy the BollingerBandBacktest object
#' @export
setGeneric(name="StartSession",def=function(Obj){standardGeneric("StartSession")})
setMethod(f="StartSession",
signature="BollingerBandBacktest",
definition=function(Obj)
{
print('Start of day!')
return(Obj)
})
#' Ends Market Session
#'
#' Simulates the end of the trading day
#' @param Obj BollingerBandBacktest object
#' @return Copy the BollingerBandBacktest object
#' @export
setGeneric(name="EndSession",def=function(Obj){standardGeneric("EndSession")})
setMethod(f="EndSession",
signature="BollingerBandBacktest",
definition=function(Obj)
{
print('End of day!')
return(Obj)
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot_core.R
\name{plot_core}
\alias{plot_core}
\title{Visualize OTU Core}
\usage{
plot_core(x, prevalences = seq(0.1, 1, 0.1), detections = 20,
plot.type = "lineplot", colours = NULL, min.prevalence = NULL,
taxa.order = NULL, horizontal = FALSE)
}
\arguments{
\item{x}{A \code{\link{phyloseq}} object or a core matrix}
\item{prevalences}{a vector of prevalence percentages in [0,1]}
\item{detections}{a vector of intensities around the data range,
or a scalar indicating the number of intervals in the data range.}
\item{plot.type}{Plot type ('lineplot' or 'heatmap')}
\item{colours}{colours for the heatmap}
\item{min.prevalence}{If minimum prevalence is set, then filter out those
rows (taxa) and columns (detections) that never exceed this
prevalence. This helps to zoom in on the actual core region
of the heatmap. Only affects the plot.type='heatmap'.}
\item{taxa.order}{Ordering of the taxa: a vector of names.}
\item{horizontal}{Logical. Horizontal figure.}
}
\value{
A list with three elements: the ggplot object and the data.
The data has a different form for the lineplot and heatmap.
Finally, the applied parameters are returned.
}
\description{
Core visualization (2D).
}
\examples{
data(atlas1006)
pseq <- subset_samples(atlas1006, DNA_extraction_method == 'r')
p <- plot_core(transform(pseq, "compositional"),
prevalences=seq(0.1, 1, .1), detections=seq(0.01, 1, length = 10))
}
\references{
A Salonen et al. The adult intestinal core microbiota is determined by
analysis depth and health status. Clinical Microbiology and Infection
18(S4):16 20, 2012.
To cite the microbiome R package, see citation('microbiome')
}
\author{
Contact: Leo Lahti \email{microbiome-admin@googlegroups.com}
}
\keyword{utilities}
| /man/plot_core.Rd | no_license | jykzel/microbiome | R | false | true | 1,816 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot_core.R
\name{plot_core}
\alias{plot_core}
\title{Visualize OTU Core}
\usage{
plot_core(x, prevalences = seq(0.1, 1, 0.1), detections = 20,
plot.type = "lineplot", colours = NULL, min.prevalence = NULL,
taxa.order = NULL, horizontal = FALSE)
}
\arguments{
\item{x}{A \code{\link{phyloseq}} object or a core matrix}
\item{prevalences}{a vector of prevalence percentages in [0,1]}
\item{detections}{a vector of intensities around the data range,
or a scalar indicating the number of intervals in the data range.}
\item{plot.type}{Plot type ('lineplot' or 'heatmap')}
\item{colours}{colours for the heatmap}
\item{min.prevalence}{If minimum prevalence is set, then filter out those
rows (taxa) and columns (detections) that never exceed this
prevalence. This helps to zoom in on the actual core region
of the heatmap. Only affects the plot.type='heatmap'.}
\item{taxa.order}{Ordering of the taxa: a vector of names.}
\item{horizontal}{Logical. Horizontal figure.}
}
\value{
A list with three elements: the ggplot object and the data.
The data has a different form for the lineplot and heatmap.
Finally, the applied parameters are returned.
}
\description{
Core visualization (2D).
}
\examples{
data(atlas1006)
pseq <- subset_samples(atlas1006, DNA_extraction_method == 'r')
p <- plot_core(transform(pseq, "compositional"),
prevalences=seq(0.1, 1, .1), detections=seq(0.01, 1, length = 10))
}
\references{
A Salonen et al. The adult intestinal core microbiota is determined by
analysis depth and health status. Clinical Microbiology and Infection
18(S4):16 20, 2012.
To cite the microbiome R package, see citation('microbiome')
}
\author{
Contact: Leo Lahti \email{microbiome-admin@googlegroups.com}
}
\keyword{utilities}
|
\name{odfTableCaption}
\alias{odfTableCaption}
\title{Provide a Caption for a Table}
\description{
Provide a numbered caption for a table. Captions are automatically numbered,
and by default using arabic numerals, but letters or roman numerals can also
be specified via the numformat argument.
}
\usage{
odfTableCaption(caption, numformat='1', numlettersync=FALSE,
formula='Table+1', label='Table')
}
\arguments{
\item{caption}{the text portion of the caption}
\item{numformat}{the format to use the table number}
\item{numlettersync}{specifies the style of numbering to use if numformat is 'A' or 'a'}
\item{formula}{the formula to use for computing this table number from the previous}
\item{label}{the label to use for the caption. Defaults to 'Table'.}
}
\details{
This function should be called immediately after a call to odfTable in a code chunk
in an odfWeave document.
Legal values for numformat are 'A', 'a', 'I', 'i', and '1'.
If numformat is 'A' or 'a', numlettersync specifies what style of numbering
to use after the first 26 tables. If numlettersync is true, the next 26
tables will be numbered 'AA', 'BB', ..., 'ZZ', 'AAA', 'BBB', etc.
If numlettersync is false, the subsequent tables will be numbered 'AA', 'AB',
..., 'AZ', 'BA', 'BB', ..., 'BZ', etc.
The default formula, which numbers tables consecutively, is usually desired,
but you could specify a formula of 'Table+10' to have your tables
numbered 1, 11, 21, etc.
}
\examples{
\dontrun{
odfTableCaption("This is a very boring table")
}
}
\keyword{utilities}
| /man/odfTableCaption.Rd | no_license | cran/odfWeave | R | false | false | 1,577 | rd | \name{odfTableCaption}
\alias{odfTableCaption}
\title{Provide a Caption for a Table}
\description{
Provide a numbered caption for a table. Captions are automatically numbered,
and by default using arabic numerals, but letters or roman numerals can also
be specified via the numformat argument.
}
\usage{
odfTableCaption(caption, numformat='1', numlettersync=FALSE,
formula='Table+1', label='Table')
}
\arguments{
\item{caption}{the text portion of the caption}
\item{numformat}{the format to use the table number}
\item{numlettersync}{specifies the style of numbering to use if numformat is 'A' or 'a'}
\item{formula}{the formula to use for computing this table number from the previous}
\item{label}{the label to use for the caption. Defaults to 'Table'.}
}
\details{
This function should be called immediately after a call to odfTable in a code chunk
in an odfWeave document.
Legal values for numformat are 'A', 'a', 'I', 'i', and '1'.
If numformat is 'A' or 'a', numlettersync specifies what style of numbering
to use after the first 26 tables. If numlettersync is true, the next 26
tables will be numbered 'AA', 'BB', ..., 'ZZ', 'AAA', 'BBB', etc.
If numlettersync is false, the subsequent tables will be numbered 'AA', 'AB',
..., 'AZ', 'BA', 'BB', ..., 'BZ', etc.
The default formula, which numbers tables consecutively, is usually desired,
but you could specify a formula of 'Table+10' to have your tables
numbered 1, 11, 21, etc.
}
\examples{
\dontrun{
odfTableCaption("This is a very boring table")
}
}
\keyword{utilities}
|
context('GrabDRCs')
set.seed(100)
#prefix = 'tests/testthat'
prefix = '.'
to = sample(colnames(cellexalObj@data), 100)
part = reduceTo( cellexalObj, what='col', to=to)
merger = GrabDRCs( cellexalObj, part, prefix='part')
for ( i in 1:3){
expect_equal( dim(merger@drc[[i]]), c(1654, 3))
}
for ( i in 4:6){
expect_equal( dim(merger@drc[[i]]), c(100, 3))
}
to2 = colnames(cellexalObj@data)[ c(1:49,150:200) ]
part2 = reduceTo( merger, what='col', to=to2)
OK = colnames(part2@data)
for ( i in 1:3){
expect_equal(rownames( part2@drc[[i]]), OK)
}
OK = intersect(to2, to)
for ( i in 4:6){
expect_equal( rownames(part2@drc[[i]]), OK)
}
for ( i in 1:3){
expect_equal( dim(part2@drc[[i]]), c(100, 3))
}
for ( i in 4:6){
expect_equal( dim(part2@drc[[i]]), c(5, 3))
}
to3 = setdiff(to2, to)
expect_true( length(to3) == 95)
part3 = reduceTo( merger, what='col', to=to3)
for ( i in 1:3){
expect_equal( dim(part3@drc[[i]]), c(95, 3))
}
for ( i in 4:6){
expect_equal( dim(part3@drc[[i]]), c(0, 3))
}
merger@outpath = tempdir(check = FALSE)
context('GrabDRCs - Object usability - linearSelections')
merger = sessionPath(merger)
merger@userGroups=data.frame()
merger@usedObj$groupSelectedFrom = list()
merger@usedObj$linearSelections = list()
#selectionF = file.path(prefix,'data','SelectionHSPC_time.txt')
#merger = getDifferentials(merger, selectionF, 'wilcox')
#check(merger)
| /tests/testthat/test-GrabDRCs.R | no_license | sonejilab/cellexalvrR | R | false | false | 1,482 | r | context('GrabDRCs')
set.seed(100)
#prefix = 'tests/testthat'
prefix = '.'
to = sample(colnames(cellexalObj@data), 100)
part = reduceTo( cellexalObj, what='col', to=to)
merger = GrabDRCs( cellexalObj, part, prefix='part')
for ( i in 1:3){
expect_equal( dim(merger@drc[[i]]), c(1654, 3))
}
for ( i in 4:6){
expect_equal( dim(merger@drc[[i]]), c(100, 3))
}
to2 = colnames(cellexalObj@data)[ c(1:49,150:200) ]
part2 = reduceTo( merger, what='col', to=to2)
OK = colnames(part2@data)
for ( i in 1:3){
expect_equal(rownames( part2@drc[[i]]), OK)
}
OK = intersect(to2, to)
for ( i in 4:6){
expect_equal( rownames(part2@drc[[i]]), OK)
}
for ( i in 1:3){
expect_equal( dim(part2@drc[[i]]), c(100, 3))
}
for ( i in 4:6){
expect_equal( dim(part2@drc[[i]]), c(5, 3))
}
to3 = setdiff(to2, to)
expect_true( length(to3) == 95)
part3 = reduceTo( merger, what='col', to=to3)
for ( i in 1:3){
expect_equal( dim(part3@drc[[i]]), c(95, 3))
}
for ( i in 4:6){
expect_equal( dim(part3@drc[[i]]), c(0, 3))
}
merger@outpath = tempdir(check = FALSE)
context('GrabDRCs - Object usability - linearSelections')
merger = sessionPath(merger)
merger@userGroups=data.frame()
merger@usedObj$groupSelectedFrom = list()
merger@usedObj$linearSelections = list()
#selectionF = file.path(prefix,'data','SelectionHSPC_time.txt')
#merger = getDifferentials(merger, selectionF, 'wilcox')
#check(merger)
|
#' Finds peaks in a time series by using a sliding window.
#'
#' @param x The vector of numbers for which to identify peaks
#' @param npoints The number of points to either side of the local maxima.
#'
#' Author: user 'stas g' on stackexchange at
#'https://stats.stackexchange.com/questions/22974/how-to-find-local-peaks-valleys-in-a-series-of-data
find_local_maxima <- function (x, npoints = 3){
shape <- diff(sign(diff(x, na.pad = FALSE)))
pks <- sapply(which(shape < 0), FUN = function(i){
z <- i - npoints + 1
z <- ifelse(z > 0, z, 1)
w <- i + npoints + 1
w <- ifelse(w < length(x), w, length(x))
if (all(x[c(z : i, (i + 2) : w)] <= x[i + 1]))
return(i + 1)
else
return(numeric(0))
})
pks <- unlist(pks)
pks
}
#' Finds peaks in a time series by using a sliding window.
#' Wrapper around find_local_maxima().
#'
#' @param x The vector of numbers for which to identify peaks
#' @param npoints The number of points to either side of the local maxima.
#'
#' Author: user 'stas g' on stackexchange at
#'https://stats.stackexchange.com/questions/22974/how-to-find-local-peaks-valleys-in-a-series-of-data
find_peaks <- function(x, npoints = 3) {
find_local_maxima(x, npoints)
}
#' Finds troughs in a time series by using a sliding window.
#'
#' @param x The vector of numbers for which to identify peaks
#' @param npoints The number of points to either side of the local minima.
#'
#' Author: user 'stas g' on stackexchange at
#'https://stats.stackexchange.com/questions/22974/how-to-find-local-peaks-valleys-in-a-series-of-data
find_local_minima <- function (x, npoints = 3){
# Negate the input to find local minima with the local maxima function.
find_local_maxima(-x)
}
#' Finds peaks in a time series by using a sliding window.
#' Wrapper around find_local_maxima().
#'
#' @param x The vector of numbers for which to identify peaks
#' @param npoints The number of points to either side of the local maxima.
#'
#' Author: user 'stas g' on stackexchange at
#'https://stats.stackexchange.com/questions/22974/how-to-find-local-peaks-valleys-in-a-series-of-data
find_troughs <- function(x, npoints = 3) {
find_local_minima(x, npoints)
}
| /R/peaks.R | permissive | kahaaga/tstools | R | false | false | 2,192 | r | #' Finds peaks in a time series by using a sliding window.
#'
#' @param x The vector of numbers for which to identify peaks
#' @param npoints The number of points to either side of the local maxima.
#'
#' Author: user 'stas g' on stackexchange at
#'https://stats.stackexchange.com/questions/22974/how-to-find-local-peaks-valleys-in-a-series-of-data
find_local_maxima <- function (x, npoints = 3){
shape <- diff(sign(diff(x, na.pad = FALSE)))
pks <- sapply(which(shape < 0), FUN = function(i){
z <- i - npoints + 1
z <- ifelse(z > 0, z, 1)
w <- i + npoints + 1
w <- ifelse(w < length(x), w, length(x))
if (all(x[c(z : i, (i + 2) : w)] <= x[i + 1]))
return(i + 1)
else
return(numeric(0))
})
pks <- unlist(pks)
pks
}
#' Finds peaks in a time series by using a sliding window.
#' Wrapper around find_local_maxima().
#'
#' @param x The vector of numbers for which to identify peaks
#' @param npoints The number of points to either side of the local maxima.
#'
#' Author: user 'stas g' on stackexchange at
#'https://stats.stackexchange.com/questions/22974/how-to-find-local-peaks-valleys-in-a-series-of-data
find_peaks <- function(x, npoints = 3) {
find_local_maxima(x, npoints)
}
#' Finds troughs in a time series by using a sliding window.
#'
#' @param x The vector of numbers for which to identify peaks
#' @param npoints The number of points to either side of the local minima.
#'
#' Author: user 'stas g' on stackexchange at
#'https://stats.stackexchange.com/questions/22974/how-to-find-local-peaks-valleys-in-a-series-of-data
find_local_minima <- function (x, npoints = 3){
# Negate the input to find local minima with the local maxima function.
find_local_maxima(-x)
}
#' Finds peaks in a time series by using a sliding window.
#' Wrapper around find_local_maxima().
#'
#' @param x The vector of numbers for which to identify peaks
#' @param npoints The number of points to either side of the local maxima.
#'
#' Author: user 'stas g' on stackexchange at
#'https://stats.stackexchange.com/questions/22974/how-to-find-local-peaks-valleys-in-a-series-of-data
find_troughs <- function(x, npoints = 3) {
find_local_minima(x, npoints)
}
|
library(elec)
### Name: audit.totals.to.OS
### Title: Converting total vote counts to Over Statements
### Aliases: audit.totals.to.OS
### ** Examples
## Generate a fake race, a fake audit, and then compute overstatements
Z = make.sample(0.08, 150, per.winner=0.4, R=2.01)
Z
Zb = make.ok.truth(Z, num.off=150, amount.off=5)
Zb
aud = Zb$V[ sample(1:Zb$N, 10), ]
aud
audit.totals.to.OS(Z, aud )
| /data/genthat_extracted_code/elec/examples/audit.totals.to.OS.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 400 | r | library(elec)
### Name: audit.totals.to.OS
### Title: Converting total vote counts to Over Statements
### Aliases: audit.totals.to.OS
### ** Examples
## Generate a fake race, a fake audit, and then compute overstatements
Z = make.sample(0.08, 150, per.winner=0.4, R=2.01)
Z
Zb = make.ok.truth(Z, num.off=150, amount.off=5)
Zb
aud = Zb$V[ sample(1:Zb$N, 10), ]
aud
audit.totals.to.OS(Z, aud )
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/time_to_ago.R
\name{time_to_ago}
\alias{time_to_ago}
\title{Convert a time into how long "ago" it was}
\usage{
time_to_ago(dat, sort = TRUE, to_character = TRUE, add_ago = TRUE)
}
\description{
Convert a time into how long "ago" it was
}
| /tbsim_app/man/time_to_ago.Rd | no_license | saviclab/TBsim | R | false | true | 316 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/time_to_ago.R
\name{time_to_ago}
\alias{time_to_ago}
\title{Convert a time into how long "ago" it was}
\usage{
time_to_ago(dat, sort = TRUE, to_character = TRUE, add_ago = TRUE)
}
\description{
Convert a time into how long "ago" it was
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/R6Classes_H5T.R
\docType{class}
\name{H5T_LOGICAL-class}
\alias{H5T_LOGICAL}
\alias{H5T_LOGICAL-class}
\title{Class for HDF5 logical datatypes. This is an enum with the 3 values FALSE, TRUE and NA mapped on values 0, 1 and 2.
Is transparently mapped onto a logical variable}
\value{
Object of class \code{\link[=H5T_LOGICAL-class]{H5T_LOGICAL}}.
}
\description{
Inherits from class \code{\link[=H5T-class]{H5T}}.
}
\section{Methods}{
\describe{
\item{\code{new}}{
\strong{Usage:}
\preformatted{new(include_NA = TRUE, id = NULL)}
Create a logical datatype. This is
internally represented by an ENUM-type
\strong{Parameters:}
\describe{
\item{id}{Internal use only}
}}
}}
\author{
Holger Hoefling
}
\seealso{
H5Class_overview, \code{\link[=H5T-class]{H5T}}, \code{\link[=H5T_ENUM-class]{H5T_ENUM}}
}
| /man/H5T_LOGICAL-class.Rd | permissive | Novartis/hdf5r | R | false | true | 880 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/R6Classes_H5T.R
\docType{class}
\name{H5T_LOGICAL-class}
\alias{H5T_LOGICAL}
\alias{H5T_LOGICAL-class}
\title{Class for HDF5 logical datatypes. This is an enum with the 3 values FALSE, TRUE and NA mapped on values 0, 1 and 2.
Is transparently mapped onto a logical variable}
\value{
Object of class \code{\link[=H5T_LOGICAL-class]{H5T_LOGICAL}}.
}
\description{
Inherits from class \code{\link[=H5T-class]{H5T}}.
}
\section{Methods}{
\describe{
\item{\code{new}}{
\strong{Usage:}
\preformatted{new(include_NA = TRUE, id = NULL)}
Create a logical datatype. This is
internally represented by an ENUM-type
\strong{Parameters:}
\describe{
\item{id}{Internal use only}
}}
}}
\author{
Holger Hoefling
}
\seealso{
H5Class_overview, \code{\link[=H5T-class]{H5T}}, \code{\link[=H5T_ENUM-class]{H5T_ENUM}}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plotTree.R
\name{plotTree}
\alias{plotTree}
\title{Plot tree}
\usage{
plotTree(
tree,
timeline = FALSE,
geo_units = list("epochs", "periods"),
geo = timeline,
time_bars = timeline,
node_age_bars = FALSE,
age_bars_color = "blue",
age_bars_colored_by = NULL,
node_labels = NULL,
node_labels_color = "black",
node_labels_size = 3,
node_labels_offset = 0,
tip_labels = TRUE,
tip_labels_italics = FALSE,
tip_labels_remove_underscore = TRUE,
tip_labels_color = "black",
tip_labels_size = 3,
tip_labels_offset = 0,
node_pp = FALSE,
node_pp_shape = 16,
node_pp_color = "black",
node_pp_size = "variable",
branch_color = "black",
color_branch_by = NULL,
line_width = 1,
tree_layout = "rectangular",
...
)
}
\arguments{
\item{tree}{(list of lists of treedata objects; no default) Name of a list of
lists of treedata objects, such as produced by readTrees(). This object
should only contain only one summary tree from one trace file. If it contains
multiple trees or multiple traces, only the first will be used.}
\item{timeline}{(logical; FALSE) Plot time tree with labeled x-axis with
timescale in MYA.}
\item{geo_units}{(list; list("epochs", "periods")) Which geological units to
include in the geo timescale.}
\item{geo}{(logical; timeline) Add a geological timeline? Defaults to the
same as timeline.}
\item{time_bars}{(logical; timeline) Add vertical gray bars to indicate
geological timeline units if geo == TRUE or regular time intervals (in MYA)
if geo == FALSE.}
\item{node_age_bars}{(logical; FALSE) Plot time tree with node age bars?}
\item{age_bars_color}{(character; "blue") Color for node age bars. If
age_bars_colored_by pecifies a variable (not NULL), you must provide two
colors, low and high values for a gradient. Colors must be either
R valid color names or valid hex codes.}
\item{age_bars_colored_by}{(character; NULL) Specify column to color node age
bars by, such as "posterior". If null, all node age bars plotted the same
color, specified by age_bars_color}
\item{node_labels}{(character; NULL) Plot text labels at nodes, specified by
the name of the corresponding column in the tidytree object. If NULL, no
text is plotted.}
\item{node_labels_color}{(character; "black") Color to plot node_labels,
either as a valid R color name or a valid hex code.}
\item{node_labels_size}{(numeric; 3) Size of node labels}
\item{node_labels_offset}{(numeric; 0) Horizontal offset of node labels from
nodes.}
\item{tip_labels}{(logical; TRUE) Plot tip labels?}
\item{tip_labels_italics}{(logical; FALSE) Plot tip labels in italics?}
\item{tip_labels_remove_underscore}{(logical; TRUE) Remove underscores in tip
labels?}
\item{tip_labels_color}{(character; "black") Color to plot tip labels, either
as a valid R color name or a valid hex code.}
\item{tip_labels_size}{(numeric; 3) Size of tip labels}
\item{tip_labels_offset}{(numeric; 1) Horizontal offset of tip labels from
tree.}
\item{node_pp}{(logical; FALSE) Plot posterior probabilities as symbols at
nodes? Specify symbol aesthetics with node_pp_shape, node_pp_color, and
node_pp_size.}
\item{node_pp_shape}{(integer; 1) Integer corresponding to point shape
(value between 0-25). See ggplot2 documentation for details:
\url{https://ggplot2.tidyverse.org/articles/ggplot2-specs.html#point}}
\item{node_pp_color}{(character; "black") Color for node_pp symbols, either
as valid R color name(s) or hex code(s). Can be a single character string
specifying a single color, or a vector of length two specifying two colors
to form a gradient. In this case, posterior probabilities will be indicated
by color along the specified gradient.}
\item{node_pp_size}{(numeric or character; 1) Size for node_pp symbols.
If numeric, the size will be fixed at the specified value. If a character,
it should specify "variable", indicating that size should be scaled by the
posterior value. Size regulates the area of the shape, following ggplot2
best practices:
\url{https://ggplot2.tidyverse.org/reference/scale_size.html})}
\item{branch_color}{(character; "black") A single character string
specifying the color (R color name or hex code) for all branches OR a
vector of length 2 specifying two colors for a gradient, used to color the
branches according to the variable specified in color_branch_by. If only 1
color is provided and you specify color_branch_by, default colors will be
chosen (low = "#005ac8", high = "#fa7850").}
\item{color_branch_by}{(character; NULL ) Optional name of one quantitative
variable in the treedata object to color branches, such as a rate.}
\item{line_width}{(numeric; 1) Change line width for branches}
\item{tree_layout}{(character; "rectangular") Tree shape layout, passed
to ggtree(). Options are 'rectangular', 'cladogram', 'slanted', 'ellipse',
'roundrect', 'fan', 'circular', 'inward_circular', 'radial', 'equal_angle',
'daylight', or 'ape'.}
\item{...}{(various) Additional arguments passed to ggtree::ggtree().}
}
\value{
returns a single plot object.
}
\description{
Plots a single tree, such as an MCC or MAP tree.
}
\details{
Plots a single tree, such as an MCC or MAP tree, with
optionally labeled posterior probabilities at nodes, a
timescale plotted on the x - axis, and 95\% CI for node ages.
}
\examples{
\donttest{
# Example of standard tree plot
file <- system.file("extdata",
"sub_models/primates_cytb_GTR_MAP.tre",
package="RevGadgets")
tree <- readTrees(paths = file)
# Reroot tree before plotting
tree_rooted <- rerootPhylo(tree = tree, outgroup = "Galeopterus_variegatus")
# Plot
p <- plotTree(tree = tree_rooted, node_labels = "posterior");p
# Plot unladderized tree
p <- plotTree(tree = tree_rooted,
node_labels = "posterior",
ladderize = FALSE);p
# We can add a scale bar:
p + ggtree::geom_treescale(x = -0.35, y = -1)
# Example of coloring branches by rate
file <- system.file("extdata",
"relaxed_ou/relaxed_OU_MAP.tre",
package="RevGadgets")
tree <- readTrees(paths = file)
p <- plotTree(tree = tree,
node_age_bars = FALSE,
node_pp = FALSE,
tip_labels_remove_underscore = TRUE,
tip_labels_italics = FALSE,
color_branch_by = "branch_thetas",
line_width = 1.7) +
ggplot2::theme(legend.position=c(.1, .9));p
}
}
| /man/plotTree.Rd | no_license | mikeryanmay/RevGadgetsActionTest | R | false | true | 6,473 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plotTree.R
\name{plotTree}
\alias{plotTree}
\title{Plot tree}
\usage{
plotTree(
tree,
timeline = FALSE,
geo_units = list("epochs", "periods"),
geo = timeline,
time_bars = timeline,
node_age_bars = FALSE,
age_bars_color = "blue",
age_bars_colored_by = NULL,
node_labels = NULL,
node_labels_color = "black",
node_labels_size = 3,
node_labels_offset = 0,
tip_labels = TRUE,
tip_labels_italics = FALSE,
tip_labels_remove_underscore = TRUE,
tip_labels_color = "black",
tip_labels_size = 3,
tip_labels_offset = 0,
node_pp = FALSE,
node_pp_shape = 16,
node_pp_color = "black",
node_pp_size = "variable",
branch_color = "black",
color_branch_by = NULL,
line_width = 1,
tree_layout = "rectangular",
...
)
}
\arguments{
\item{tree}{(list of lists of treedata objects; no default) Name of a list of
lists of treedata objects, such as produced by readTrees(). This object
should only contain only one summary tree from one trace file. If it contains
multiple trees or multiple traces, only the first will be used.}
\item{timeline}{(logical; FALSE) Plot time tree with labeled x-axis with
timescale in MYA.}
\item{geo_units}{(list; list("epochs", "periods")) Which geological units to
include in the geo timescale.}
\item{geo}{(logical; timeline) Add a geological timeline? Defaults to the
same as timeline.}
\item{time_bars}{(logical; timeline) Add vertical gray bars to indicate
geological timeline units if geo == TRUE or regular time intervals (in MYA)
if geo == FALSE.}
\item{node_age_bars}{(logical; FALSE) Plot time tree with node age bars?}
\item{age_bars_color}{(character; "blue") Color for node age bars. If
age_bars_colored_by pecifies a variable (not NULL), you must provide two
colors, low and high values for a gradient. Colors must be either
R valid color names or valid hex codes.}
\item{age_bars_colored_by}{(character; NULL) Specify column to color node age
bars by, such as "posterior". If null, all node age bars plotted the same
color, specified by age_bars_color}
\item{node_labels}{(character; NULL) Plot text labels at nodes, specified by
the name of the corresponding column in the tidytree object. If NULL, no
text is plotted.}
\item{node_labels_color}{(character; "black") Color to plot node_labels,
either as a valid R color name or a valid hex code.}
\item{node_labels_size}{(numeric; 3) Size of node labels}
\item{node_labels_offset}{(numeric; 0) Horizontal offset of node labels from
nodes.}
\item{tip_labels}{(logical; TRUE) Plot tip labels?}
\item{tip_labels_italics}{(logical; FALSE) Plot tip labels in italics?}
\item{tip_labels_remove_underscore}{(logical; TRUE) Remove underscores in tip
labels?}
\item{tip_labels_color}{(character; "black") Color to plot tip labels, either
as a valid R color name or a valid hex code.}
\item{tip_labels_size}{(numeric; 3) Size of tip labels}
\item{tip_labels_offset}{(numeric; 1) Horizontal offset of tip labels from
tree.}
\item{node_pp}{(logical; FALSE) Plot posterior probabilities as symbols at
nodes? Specify symbol aesthetics with node_pp_shape, node_pp_color, and
node_pp_size.}
\item{node_pp_shape}{(integer; 1) Integer corresponding to point shape
(value between 0-25). See ggplot2 documentation for details:
\url{https://ggplot2.tidyverse.org/articles/ggplot2-specs.html#point}}
\item{node_pp_color}{(character; "black") Color for node_pp symbols, either
as valid R color name(s) or hex code(s). Can be a single character string
specifying a single color, or a vector of length two specifying two colors
to form a gradient. In this case, posterior probabilities will be indicated
by color along the specified gradient.}
\item{node_pp_size}{(numeric or character; 1) Size for node_pp symbols.
If numeric, the size will be fixed at the specified value. If a character,
it should specify "variable", indicating that size should be scaled by the
posterior value. Size regulates the area of the shape, following ggplot2
best practices:
\url{https://ggplot2.tidyverse.org/reference/scale_size.html})}
\item{branch_color}{(character; "black") A single character string
specifying the color (R color name or hex code) for all branches OR a
vector of length 2 specifying two colors for a gradient, used to color the
branches according to the variable specified in color_branch_by. If only 1
color is provided and you specify color_branch_by, default colors will be
chosen (low = "#005ac8", high = "#fa7850").}
\item{color_branch_by}{(character; NULL ) Optional name of one quantitative
variable in the treedata object to color branches, such as a rate.}
\item{line_width}{(numeric; 1) Change line width for branches}
\item{tree_layout}{(character; "rectangular") Tree shape layout, passed
to ggtree(). Options are 'rectangular', 'cladogram', 'slanted', 'ellipse',
'roundrect', 'fan', 'circular', 'inward_circular', 'radial', 'equal_angle',
'daylight', or 'ape'.}
\item{...}{(various) Additional arguments passed to ggtree::ggtree().}
}
\value{
returns a single plot object.
}
\description{
Plots a single tree, such as an MCC or MAP tree.
}
\details{
Plots a single tree, such as an MCC or MAP tree, with
optionally labeled posterior probabilities at nodes, a
timescale plotted on the x - axis, and 95\% CI for node ages.
}
\examples{
\donttest{
# Example of standard tree plot
file <- system.file("extdata",
"sub_models/primates_cytb_GTR_MAP.tre",
package="RevGadgets")
tree <- readTrees(paths = file)
# Reroot tree before plotting
tree_rooted <- rerootPhylo(tree = tree, outgroup = "Galeopterus_variegatus")
# Plot
p <- plotTree(tree = tree_rooted, node_labels = "posterior");p
# Plot unladderized tree
p <- plotTree(tree = tree_rooted,
node_labels = "posterior",
ladderize = FALSE);p
# We can add a scale bar:
p + ggtree::geom_treescale(x = -0.35, y = -1)
# Example of coloring branches by rate
file <- system.file("extdata",
"relaxed_ou/relaxed_OU_MAP.tre",
package="RevGadgets")
tree <- readTrees(paths = file)
p <- plotTree(tree = tree,
node_age_bars = FALSE,
node_pp = FALSE,
tip_labels_remove_underscore = TRUE,
tip_labels_italics = FALSE,
color_branch_by = "branch_thetas",
line_width = 1.7) +
ggplot2::theme(legend.position=c(.1, .9));p
}
}
|
#/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/
#R script for running mQTL analysis for EWAS using GEM/matrixEQTL
#
#inputs: matrix of methylation beta values (EPIC), matrix of SNP genotypes (GSA),
# phenotype data
#
# authors. Ayden Saffari <ayden.saffari@lshtm.ac.uk> (MRC ING, LSHTM)
# Ashutosh Singh Tomar (CSIR, CCMB)
# Prachand Issarapu (CSIR, CCMB)
#
# NOT FOR DISTRIBUTION/ PUBLIC CONSUMPTION
#/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/
library("GEM")
library("plyr")
library("dplyr")
library("reshape2")
library("ggplot2")
library("gamplotlib")
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#initialization
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
##########
#load data
##########
res_DMPs_pcs <- readRDS("../R_objects/res_DMPs_pcs.rds")
DMRs_CpGs <- readRDS("../R_objects/EMPH_GMB_DMRs_CpGs.rds")
norm_beta_fil <- readRDS("../R_objects/norm_beta_fil.rds")
#GMB_CpGs <- norm_beta_fil[which(rownames(norm_beta_fil) %in%
# unique(c(res_DMPs_pcs$Name[
# res_DMPs_pcs$adj.P.Val < 0.1],DMRs_CpGs))),]
GMB_CpGs <- norm_beta_fil[which(rownames(norm_beta_fil) %in%
DMRs_CpGs),]
pcs <- readRDS("../R_objects/pcs.rds")
pdata <- cbind(pdata,pcs[,1:15])
GSA_sample_sheet <- read.csv("/data/GSA/emphasis/EMPHASIS_GMB_GSA_Samplesheet.csv")
GMB_SNPs <- read.table("../data/GSA_GMB_PLINKfiltered_a_hwe_geno_maf_recodeA_t.traw",
sep="\t", head=T)
GMB_SNPs <- GMB_SNPs[,-c(1,3,4,5,6)]
#############################
#produce genotype summary stat table
#############################
summary_table <- apply(GMB_SNPs[,-1],1,function(x){summary(as.factor(x))})
summary_table <- ldply(summary_table,function(s){t(data.frame(unlist(s)))})
summary_table_fil <- summary_table[!(summary_table$`NA's` > 30),]
summary_table_fil$SNP <- rownames(GMB_SNPs)
summary_table_fil <- summary_table_fil[,match(c("SNP","0", "1","2","NA's"),
colnames(summary_table_fil))]
##########################
#reshape GSA data for GEM
###########################
rownames(GMB_SNPs) <- GMB_SNPs[,1]
GMB_SNPs <- GMB_SNPs[,-1]
pdata <- readRDS("../R_objects/pdata.rds")
#edit sample names to match those in sample sheet/beta matrix
colnames(GMB_SNPs) <- paste0("2",sapply(colnames(GMB_SNPs),
function(x){strsplit(x,"X?[0:9]*_2")[[1]][2]}))
#match sample order in sample sheet to GSA data
GSA_sample_sheet <- GSA_sample_sheet[match(colnames(GMB_SNPs),
GSA_sample_sheet$Array.info),]
all(GSA_sample_sheet$Array.info == colnames(GMB_SNPs))
#replace arrays with sample IDs for GSA
colnames(GMB_SNPs) <- GSA_sample_sheet$Sample.ID[
GSA_sample_sheet$Array.info == colnames(GMB_SNPs)]
#change genotype coding to 1,2,3
#REMOVE -don't need to do this, 0,1,2 coding is equivalent
#GMB_SNPs_f <- t(as.data.frame(apply(GMB_SNPs,1,factor)))
#GMB_SNPs_f <- revalue(GMB_SNPs_f, c("0"="1", "1"="2", "2"="3"))
###########################
#reshape EPIC data for GEM
############################
#replace arrays with sample IDs for EPIC array
GMB_CpGs <- GMB_CpGs[,match(rownames(pdata),colnames(GMB_CpGs))]
all(colnames(GMB_CpGs) == rownames(pdata))
colnames(GMB_CpGs) <- pdata$Subject_ID
GMB_CpGs <- as.data.frame(GMB_CpGs)
#dont need to do - add ID column and move to 1
#GMB_CpGs$ID <- rownames(GMB_CpGs)
#GMB_CpGs <- GMB_CpGs[,c(ncol(GMB_CpGs),1:(ncol(GMB_CpGs) - 1))]
GMB_SNPs <- GMB_SNPs[,colnames(GMB_SNPs) %in% colnames(GMB_CpGs)]
GMB_CpGs <- GMB_CpGs[,match(colnames(GMB_SNPs),colnames(GMB_CpGs))]
#rownames(GMB_CpGs) <- GMB_CpGs$ID
#GMB_CpGs <- GMB_CpGs[,-1]
all(colnames(GMB_SNPs) == colnames(GMB_CpGs))
dim(GMB_CpGs)
dim(GMB_SNPs)
#match pdata sample order to GSA and EPIC
pdata <- pdata[match(as.factor(colnames(GMB_SNPs)),pdata$Subject_ID),]
all(colnames(GMB_SNPs) == pdata$Subject_ID)
all(colnames(GMB_CpGs) == pdata$Subject_ID)
dim(pdata)
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# run GEM mQTL analysis
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#create env and cov objects
env <- pdata[,colnames(pdata) %in% c("Subject_ID","MasterGroupNo")]
rownames(env) <- env$Subject_ID
env <- t(env[,colnames(env) == "MasterGroupNo",drop=F])
rownames(env)
cov <- pdata[,colnames(pdata) %in% c("Subject_ID","PC1","PC2","PC3","PC4",
"PC5","PC6","PC7","PC8","PC9","PC10", "PC11","PC12","PC13","PC15","Age",
"MooreSoC","MasterGroupNo"),drop=F]
cov$MooreSoC <- recode(cov$MooreSoC,dry="1",rainy="2")
cov$MooreSoC <- relevel(cov$MooreSoC,"1")
cov <- dcast(melt(cov, id.var = "Subject_ID"), ... ~ Subject_ID )
rownames(cov) <- cov$variable
cov <- cov[,-1]
cov <- cov[,match(pdata$Subject_ID,colnames(cov))]
dim(cov)
#create combined cov file
cov_env <- rbind(cov[rownames(cov) != "MasterGroupNo",],
cov[rownames(cov) == "MasterGroupNo",])
#remove mastergroup from cov
cov <- cov[rownames(cov) != "MasterGroupNo",]
#convert to numeric
cov_num <- sapply(cov[,], as.numeric)
rownames(cov_num) <- rownames(cov)
cov_env_num <- sapply(cov_env[,], as.numeric)
rownames(cov_env_num) <- rownames(cov_env)
env_num <- t(as.data.frame(sapply(env[,,drop=F], as.numeric)))
rownames(env_num) <- rownames(env)
colnames(env_num) <- colnames(env)
dim(cov_num)
rownames(cov_num)
dim(env_num)
rownames(env_num)
dim(cov_env_num)
rownames(cov_env_num)
all.equal(colnames(cov_num),as.character(pdata$Subject_ID))
all.equal(colnames(cov_num),colnames(GMB_SNPs))
all.equal(colnames(cov_num),colnames(GMB_CpGs))
all.equal(colnames(cov_num),colnames(env_num))
all.equal(colnames(env_num),as.character(pdata$Subject_ID))
all.equal(colnames(env_num),colnames(GMB_SNPs))
all.equal(colnames(env_num),colnames(GMB_CpGs))
all.equal(colnames(cov_env_num),colnames(cov_num))
#save as text files
write.table(GMB_SNPs,"../data/GMB_SNPs.txt",sep="\t")
write.table(GMB_CpGs,"../data/GMB_CpGs.txt",sep="\t")
write.table(env_num,"../data/GMB_env.txt",sep="\t")
write.table(cov_num,"../data/GMB_cov.txt",sep="\t")
write.table(cov_env_num,"../data/GMB_gxe.txt",sep="\t")
#run GEM models
GEM_Emodel("../data/GMB_env.txt", "../data/GMB_cov.txt", "../data/GMB_CpGs.txt",
1,"../results/GEM/Result_Emodel.txt", "../results/GEM/Emodel_QQ.jpg",
savePlot=T)
GEM_Gmodel("../data/GMB_SNPs.txt","../data/GMB_cov.txt","../data/GMB_CpGs.txt",
1e-04, "../results/GEM/Result_Gmodel.txt")
GEM_GxEmodel("../data/GMB_SNPs.txt", "../data/GMB_gxe.txt", "../data/GMB_CpGs.txt",
1, "../results/GEM/Result_GEmodel.txt", topKplot = 1, savePlot=T)
#Run regression with additive genotype and interaction
GxE_reg_top <- merge(t(GMB_SNPs[rownames(GMB_SNPs) %in%
c("rs1423249"),]),
t(GMB_CpGs[rownames(GMB_CpGs) %in% c("cg06837426","cg20673840","cg20451680",
"cg14972155","cg20059697",
"cg13106512","cg21180956"),]),by="row.names")
rownames(GxE_reg_top) <- GxE_reg_top$Row.names
GxE_reg_top <- GxE_reg_top[,-1]
GxE_reg_top <- GxE_reg_top[match(pdata$Subject_ID,
rownames(GxE_reg_top)),]
GxE_reg_top <- merge(GxE_reg_top,pdata,by.x='row.names',by.y='Subject_ID')
#alternative with genotype as factor
#not run
summary(lm(cg14972155 ~ PC1 + PC2 + PC3 + PC4 + PC5 + PC6 +
PC7 + PC8 + PC9 + PC10 +
PC11 + PC12 + PC13 + PC15 + Age + MooreSoC +
MasterGroupNo +
rs1423249,GxE_reg_top))
#meth x inter x geno plot
GxE_reg_top_fil <- GxE_reg_top[,colnames(GxE_reg_top) %in%
c("Row.names","rs10239100","rs1423249","rs278368","cg20673840",
"cg06837426","cg14972155","MasterGroupNo")]
GxE_reg_top_fil <- na.omit(GxE_reg_top_fil)
colnames(GxE_reg_top_fil)[which(colnames(GxE_reg_top_fil) == "MasterGroupNo")] <- "intervention"
GxE_reg_top_fil$intervention <- revalue(GxE_reg_top_fil$intervention, c("1"="intervention","2"="control"))
GxE_reg_top_fil$intervention <- relevel(GxE_reg_top_fil$intervention,"control")
GxE_reg_top_fil$rs1423249 <- as.factor(GxE_reg_top_fil$rs1423249)
GxE_reg_top_fil$rs1423249 <- revalue(GxE_reg_top_fil$rs1423249, c("0"="GG","1"="GA","2"="AA"))
#cg06837426 ~ rs1423249
ggplot(GxE_reg_top_fil, aes(rs1423249,cg06837426),color=rs1423249) +
geom_point(aes(color = rs1423249)) +
scale_color_manual(values=c("#C04B8E","#C04B8E","#C04B8E")) +
stat_summary(aes(y = cg06837426,group=rs1423249),fun.y=mean,colour="#252997",
geom="line",group=1) +
theme_gamplotlib() + theme(strip.background = element_blank(),
panel.grid.major.x = element_blank(),legend.position="none",
aspect.ratio=1) +
scale_x_discrete() +
ylab("methylation Beta value") + xlab("genotype") +
ggtitle("cg06837426 ~ rs1423249")
ggsave("../results/GMB_mQTL_cg06837426_rs1423249_G_scatter.pdf",width=(4),
height=3.5, units="in", dpi=300)
#cg06837426 ~ rs1423249:intervention
ggplot(GxE_reg_top_fil, aes(intervention,cg06837426),color=intervention) +
geom_point(aes(color = intervention)) +
scale_color_manual(values=c("#46617A","#00B8A2")) +
stat_summary(aes(y = cg06837426,group=intervention), fun.y=mean,
colour="#252997", geom="line",group=1) + facet_wrap( ~ rs1423249) +
theme_gamplotlib() + theme(strip.background = element_blank(),
panel.grid.major.x = element_blank(),legend.position="none",
aspect.ratio=1) +
scale_x_discrete(labels=c("control","inter.")) +
ylab("methylation Beta value") + xlab("group") +
ggtitle("cg06837426 ~ rs1423249:intervention")
ggsave("../results/GMB_mQTL_cg06837426_rs1423249_GxE_scatter.pdf",width=(4),
height=3.5, units="in", dpi=300)
#cg20673840 ~ rs1423249
ggplot(GxE_reg_top_fil, aes(rs1423249,cg20673840),color=rs1423249) +
geom_point(aes(color = rs1423249)) + scale_color_manual(values=c("#C04B8E",
"#C04B8E","#C04B8E")) + stat_summary(aes(y = cg20673840,group=rs1423249),
fun.y=mean,colour="#252997", geom="line",group=1) +
theme_gamplotlib() + theme(strip.background = element_blank(),
panel.grid.major.x = element_blank(),legend.position="none",
aspect.ratio=1) +
scale_x_discrete() +
ylab("methylation Beta value") + xlab("genotype") +
ggtitle("cg20673840 ~ rs1423249")
ggsave("../results/GMB_mQTL_cg20673840_rs1423249_G_scatter.pdf",width=(4),
height=3.5, units="in", dpi=300)
#cg20673840 ~ rs1423249:intervention
ggplot(GxE_reg_top_fil, aes(intervention,cg20673840),color=intervention) +
geom_point(aes(color = intervention)) + scale_color_manual(values=c("#46617A",
"#00B8A2")) + stat_summary(aes(y = cg20673840,group=intervention), fun.y=mean,
colour="#252997", geom="line",group=1) + facet_wrap( ~ rs1423249) +
theme_gamplotlib() + theme(strip.background = element_blank(),
panel.grid.major.x = element_blank(),legend.position="none",
aspect.ratio=1) +
scale_x_discrete(labels=c("control","inter.")) +
ylab("methylation Beta value") + xlab("group") +
ggtitle("cg20673840 ~ rs1423249:intervention")
ggsave("GMB_mQTL_cg20673840_rs1423249_GxE_scatter.pdf",width=(4),
height=3.5, units="in", dpi=300)
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# run additional analyses
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#G only mQTLs
######
#factor for intervention vs control
GxE_reg_top$intervention <- relevel(GxE_reg_top$MasterGroupNo,"2")
#rs1423249
mQTL_cpgs <- c("cg06837426","cg20673840","cg20451680",
"cg14972155","cg20059697",
"cg13106512","cg21180956")
#G
###
print("G")
res_mQTL_cpgs <- lapply(mQTL_cpgs, function(x) {
lm(substitute(cpg ~ PC1 + PC2 + PC3 + PC4 + PC5 + PC6 + PC7 + PC8 + PC9 + PC10 +
PC11 + PC12 + PC13 + PC15 + Age + MooreSoC + rs1423249,
list(cpg = as.name(x))), data = GxE_reg_top)})
names(res_mQTL_cpgs) <- mQTL_cpgs
#coeffs and adj R sqrd
res_mQTL_cpgs_summ <- lapply(res_mQTL_cpgs,summary)
lapply(res_mQTL_cpgs_summ,function(x){x$coefficients[c(18),]})
lapply(res_mQTL_cpgs_summ,function(x){x$adj.r.squared})
#AIC
lapply(res_mQTL_cpgs,AIC)
#E
###
print("E")
res_mQTL_cpgs <- lapply(mQTL_cpgs, function(x) {
lm(substitute(cpg ~ PC1 + PC2 + PC3 + PC4 + PC5 + PC6 + PC7 + PC8 + PC9 + PC10 +
PC11 + PC12 + PC13 + PC15 + Age + MooreSoC + intervention,
list(cpg = as.name(x))), data = GxE_reg_top)})
names(res_mQTL_cpgs) <- mQTL_cpgs
#coeffs and adj R sqrd
res_mQTL_cpgs_summ <- lapply(res_mQTL_cpgs,summary)
lapply(res_mQTL_cpgs_summ,function(x){x$coefficients[c(18),]})
lapply(res_mQTL_cpgs_summ,function(x){x$adj.r.squared})
#AIC
lapply(res_mQTL_cpgs,AIC)
#G+E
####
print("G + E")
res_mQTL_cpgs <- lapply(mQTL_cpgs, function(x) {
lm(substitute(cpg ~ PC1 + PC2 + PC3 + PC4 + PC5 + PC6 + PC7 + PC8 + PC9 + PC10 +
PC11 + PC12 + PC13 + PC15 + Age + MooreSoC + rs1423249 + intervention,
list(cpg = as.name(x))), data = GxE_reg_top)})
names(res_mQTL_cpgs) <- mQTL_cpgs
#coeffs and adj R sqrd
res_mQTL_cpgs_summ <- lapply(res_mQTL_cpgs,summary)
lapply(res_mQTL_cpgs_summ,function(x){x$coefficients[c(18,19),]})
lapply(res_mQTL_cpgs_summ,function(x){x$adj.r.squared})
#AIC
lapply(res_mQTL_cpgs,AIC)
#GxE
####
print("G x E")
res_mQTL_cpgs <- lapply(mQTL_cpgs, function(x) {
lm(substitute(cpg ~ PC1 + PC2 + PC3 + PC4 + PC5 + PC6 + PC7 + PC8 + PC9 + PC10 +
PC11 + PC12 + PC13 + PC15 + Age + MooreSoC + rs1423249*MasterGroupNo,
list(cpg = as.name(x))), data = GxE_reg_top)})
names(res_mQTL_cpgs) <- mQTL_cpgs
#coeffs and adj R sqrd
res_mQTL_cpgs_summ <- lapply(res_mQTL_cpgs,summary)
lapply(res_mQTL_cpgs_summ,function(x){x$coefficients[c(18,19,20),]})
lapply(res_mQTL_cpgs_summ,function(x){x$adj.r.squared})
#AIC
lapply(res_mQTL_cpgs,AIC)
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# redo mQTL with imputed data for chr 5 and 8
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# chr 5
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
GMB_SNPs_chr5_3 <- read.table("/data/GSA/emphasis/imputed_geno_chr5_chr8/CHROMOSOME_5/PED_FORMAT/plink/fcgene_plink_chr5_info03_a_hwe_geno_maf_recodeA_t.traw", sep="\t", head=T)
GMB_SNPs_chr5_9 <- read.table("/data/GSA/emphasis/imputed_geno_chr5_chr8/CHROMOSOME_5/PED_FORMAT/plink/fcgene_plink_chr5_info09_a_hwe_geno_maf_recodeA_t.traw", sep="\t", head=T)
GMB_SNPs_chr5 <- GMB_SNPs_chr5_9
GMB_SNPs_chr5 <- GMB_SNPs_chr5[,-c(1,3,4,5,6)]
##########################
#reshape GSA data for GEM
###########################
SNPs_chr5 <- GMB_SNPs_chr5[,1,drop=F]
GMB_SNPs_chr5 <- GMB_SNPs_chr5[,-1]
#edit sample names to match those in sample sheet/beta matrix
colnames(GMB_SNPs_chr5) <- paste0("",sapply(colnames(GMB_SNPs_chr5),
function(x){strsplit(x,"X?[0:9]*_")[[1]][2]}))
#match sample order in sample sheet to GSA data
GSA_sample_sheet_chr5 <- GSA_sample_sheet[match(colnames(GMB_SNPs_chr5),
GSA_sample_sheet$Sample.ID),]
all(GSA_sample_sheet_chr5$Sample.ID == colnames(GMB_SNPs_chr5))
#add probe names back in
#rownames(GMB_SNPs_chr5) <- make.names(t(SNPs_chr5), unique=TRUE)
rownames(GMB_SNPs_chr5) <- t(SNPs_chr5)
###########################
#reshape EPIC data for GEM
############################
GMB_SNPs_chr5 <- GMB_SNPs_chr5[,colnames(GMB_SNPs_chr5) %in% colnames(GMB_CpGs)]
GMB_CpGs_chr5 <- GMB_CpGs[,match(colnames(GMB_SNPs_chr5),colnames(GMB_CpGs))]
all(colnames(GMB_SNPs_chr5) == colnames(GMB_CpGs_chr5))
dim(GMB_CpGs_chr5)
dim(GMB_SNPs_chr5)
#match pdata sample order to GSA and EPIC
pdata_chr5 <- pdata[match(as.factor(colnames(GMB_SNPs_chr5)),pdata$Subject_ID),]
all(colnames(GMB_SNPs_chr5) == pdata_chr5$Subject_ID)
all(colnames(GMB_CpGs_chr5) == pdata_chr5$Subject_ID)
dim(pdata_chr5)
################
#run GEM models
################
#create env and cov objects
env <- pdata_chr5[,colnames(pdata_chr5) %in% c("Subject_ID","MasterGroupNo")]
rownames(env) <- env$Subject_ID
env <- t(env[,colnames(env) == "MasterGroupNo",drop=F])
rownames(env)
cov <- pdata_chr5[,colnames(pdata_chr5) %in% c("Subject_ID","PC1","PC2","PC3","PC4",
"PC5","PC6","PC7","PC8","PC9","PC10", "PC11","PC12","PC13","PC15","Age",
"MooreSoC","MasterGroupNo"),drop=F]
cov$MooreSoC <- recode(cov$MooreSoC,dry="1",rainy="2")
cov$MooreSoC <- relevel(cov$MooreSoC,"1")
cov <- dcast(melt(cov, id.var = "Subject_ID"), ... ~ Subject_ID )
rownames(cov) <- cov$variable
cov <- cov[,-1]
cov <- cov[,match(pdata_chr5$Subject_ID,colnames(cov))]
dim(cov)
#create combined cov file
cov_env <- rbind(cov[rownames(cov) != "MasterGroupNo",],
cov[rownames(cov) == "MasterGroupNo",])
#remove mastergroup from cov
cov <- cov[rownames(cov) != "MasterGroupNo",]
cov_num <- sapply(cov[,], as.numeric)
rownames(cov_num) <- rownames(cov)
cov_env_num <- sapply(cov_env[,], as.numeric)
rownames(cov_env_num) <- rownames(cov_env)
env_num <- t(as.data.frame(sapply(env[,,drop=F], as.numeric)))
rownames(env_num) <- rownames(env)
colnames(env_num) <- colnames(env)
dim(cov_num)
rownames(cov_num)
dim(env_num)
rownames(env_num)
dim(cov_env_num)
rownames(cov_env_num)
all.equal(colnames(cov_num),as.character(pdata_chr5$Subject_ID))
all.equal(colnames(cov_num),colnames(GMB_SNPs_chr5))
all.equal(colnames(cov_num),colnames(GMB_CpGs_chr5))
all.equal(colnames(cov_num),colnames(env_num))
all.equal(colnames(env_num),as.character(pdata_chr5$Subject_ID))
all.equal(colnames(env_num),colnames(GMB_SNPs_chr5))
all.equal(colnames(env_num),colnames(GMB_CpGs_chr5))
all.equal(colnames(cov_env_num),colnames(cov_num))
#save as text files
write.table(GMB_SNPs_chr5,"../data/GMB_SNPs_chr5.txt",sep="\t")
write.table(GMB_CpGs_chr5,"../data/GMB_CpGs_chr5.txt",sep="\t")
write.table(env_num,"../data/GMB_env_chr5.txt",sep="\t")
write.table(cov_num,"../data/GMB_cov_chr5.txt",sep="\t")
write.table(cov_env_num,"../data/GMB_gxe_chr5.txt",sep="\t")
#run GEM models
GEM_Gmodel("../data/GMB_SNPs_chr5.txt",
"../data/GMB_cov_chr5.txt",
"../data/GMB_CpGs_chr5.txt",1e-04,
"../results/GEM/Result_Gmodel_chr5_imputed_9.txt")
GEM_GxEmodel("../data/GMB_SNPs_chr5.txt",
"../data/GMB_gxe_chr5.txt",
"../data/GMB_CpGs_chr5.txt", 1,
"../results/GEM/Result_GEmodel_chr5_imputed_9.txt",
topKplot = 1, savePlot=T)
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# chr 8
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
GMB_SNPs_chr8_3 <- read.table("/data/GSA/emphasis/imputed_geno_chr5_chr8/CHROMOSOME_8/PED_FORMAT/plink/fcgene_plink_chr8_info03_a_hwe_geno_maf_recodeA_t.traw", sep="\t", head=T)
GMB_SNPs_chr8_9 <- read.table("/data/GSA/emphasis/imputed_geno_chr5_chr8/CHROMOSOME_8/PED_FORMAT/plink/fcgene_plink_chr8_info09_a_hwe_geno_maf_recodeA_t.traw", sep="\t", head=T)
GMB_SNPs_chr8 <- GMB_SNPs_chr8_9
GMB_SNPs_chr8 <- GMB_SNPs_chr8[,-c(1,3,4,5,6)]
##########################
#reshape GSA data for GEM
###########################
SNPs_chr8 <- GMB_SNPs_chr8[,1,drop=F]
GMB_SNPs_chr8 <- GMB_SNPs_chr8[,-1]
#edit sample names to match those in sample sheet/beta matrix
colnames(GMB_SNPs_chr8) <- paste0("",sapply(colnames(GMB_SNPs_chr8),
function(x){strsplit(x,"X?[0:9]*_")[[1]][2]}))
#match sample order in sample sheet to GSA data
GSA_sample_sheet_chr8 <- GSA_sample_sheet[match(colnames(GMB_SNPs_chr8),
GSA_sample_sheet$Sample.ID),]
all(GSA_sample_sheet_chr8$Sample.ID == colnames(GMB_SNPs_chr8))
#add probe names back in
rownames(GMB_SNPs_chr8) <- t(SNPs_chr8)
###########################
#reshape EPIC data for GEM
############################
GMB_SNPs_chr8 <- GMB_SNPs_chr8[,colnames(GMB_SNPs_chr8) %in% colnames(GMB_CpGs)]
GMB_CpGs_chr8 <- GMB_CpGs[,match(colnames(GMB_SNPs_chr8),colnames(GMB_CpGs))]
all(colnames(GMB_SNPs_chr8) == colnames(GMB_CpGs_chr8))
dim(GMB_CpGs_chr8)
dim(GMB_SNPs_chr8)
#match pdata sample order to GSA and EPIC
pdata_chr8 <- pdata[match(as.factor(colnames(GMB_SNPs_chr8)),pdata$Subject_ID),]
all(colnames(GMB_SNPs_chr8) == pdata_chr8$Subject_ID)
all(colnames(GMB_CpGs_chr8) == pdata_chr8$Subject_ID)
dim(pdata_chr8)
#create env and cov objects
env <- pdata_chr8[,colnames(pdata_chr8) %in% c("Subject_ID","MasterGroupNo")]
rownames(env) <- env$Subject_ID
env <- t(env[,colnames(env) == "MasterGroupNo",drop=F])
rownames(env)
cov <- pdata_chr8[,colnames(pdata_chr8) %in% c("Subject_ID","PC1","PC2","PC3","PC4",
"PC5","PC6","PC7","PC8","PC9","PC10", "PC11","PC12","PC13","PC15","Age","MooreSoC","MasterGroupNo"),drop=F]
cov$MooreSoC <- recode(cov$MooreSoC,dry="1",rainy="2")
cov$MooreSoC <- relevel(cov$MooreSoC,"1")
cov <- dcast(melt(cov, id.var = "Subject_ID"), ... ~ Subject_ID )
rownames(cov) <- cov$variable
cov <- cov[,-1]
cov <- cov[,match(pdata_chr8$Subject_ID,colnames(cov))]
dim(cov)
#create combined cov file
cov_env <- rbind(cov[rownames(cov) != "MasterGroupNo",],
cov[rownames(cov) == "MasterGroupNo",])
#remove mastergroup from cov
cov <- cov[rownames(cov) != "MasterGroupNo",]
cov_num <- sapply(cov[,], as.numeric)
rownames(cov_num) <- rownames(cov)
cov_env_num <- sapply(cov_env[,], as.numeric)
rownames(cov_env_num) <- rownames(cov_env)
env_num <- t(as.data.frame(sapply(env[,,drop=F], as.numeric)))
rownames(env_num) <- rownames(env)
colnames(env_num) <- colnames(env)
dim(cov_num)
rownames(cov_num)
dim(env_num)
rownames(env_num)
dim(cov_env_num)
rownames(cov_env_num)
all.equal(colnames(cov_num),as.character(pdata_chr8$Subject_ID))
all.equal(colnames(cov_num),colnames(GMB_SNPs_chr8))
all.equal(colnames(cov_num),colnames(GMB_CpGs_chr8))
all.equal(colnames(cov_num),colnames(env_num))
all.equal(colnames(env_num),as.character(pdata_chr8$Subject_ID))
all.equal(colnames(env_num),colnames(GMB_SNPs_chr8))
all.equal(colnames(env_num),colnames(GMB_CpGs_chr8))
all.equal(colnames(cov_env_num),colnames(cov_num))
#save as text files
write.table(GMB_SNPs_chr8,"../data/GMB_SNPs_chr8.txt",sep="\t")
write.table(GMB_CpGs_chr8,"../data/GMB_CpGs_chr8.txt",sep="\t")
write.table(env_num,"../data/GMB_env_chr8.txt",sep="\t")
write.table(cov_num,"../data/GMB_cov_chr8.txt",sep="\t")
write.table(cov_env_num,"../data/GMB_gxe_chr8.txt",sep="\t")
#run GEM models
GEM_Gmodel("../data/GMB_SNPs_chr8.txt",
"../data/GMB_cov_chr8.txt",
"../data/GMB_CpGs_chr8.txt",1e-04,
"../results/GEM/Result_Gmodel_chr8_imputed_9.txt")
GEM_GxEmodel("../data/GMB_SNPs_chr8.txt",
"../data/GMB_gxe_chr8.txt",
"../data/GMB_CpGs_chr8.txt", 1,
"../results/GEM/Result_GEmodel_chr8_imputed_9.txt",
topKplot = 1, savePlot=F)
| /EPIC_analysis/inter_EWAS_mQTL.R | no_license | EMPHASIS-STUDY/EWAS | R | false | false | 23,103 | r | #/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/
#R script for running mQTL analysis for EWAS using GEM/matrixEQTL
#
#inputs: matrix of methylation beta values (EPIC), matrix of SNP genotypes (GSA),
# phenotype data
#
# authors. Ayden Saffari <ayden.saffari@lshtm.ac.uk> (MRC ING, LSHTM)
# Ashutosh Singh Tomar (CSIR, CCMB)
# Prachand Issarapu (CSIR, CCMB)
#
# NOT FOR DISTRIBUTION/ PUBLIC CONSUMPTION
#/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/
library("GEM")
library("plyr")
library("dplyr")
library("reshape2")
library("ggplot2")
library("gamplotlib")
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#initialization
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
##########
#load data
##########
res_DMPs_pcs <- readRDS("../R_objects/res_DMPs_pcs.rds")
DMRs_CpGs <- readRDS("../R_objects/EMPH_GMB_DMRs_CpGs.rds")
norm_beta_fil <- readRDS("../R_objects/norm_beta_fil.rds")
#GMB_CpGs <- norm_beta_fil[which(rownames(norm_beta_fil) %in%
# unique(c(res_DMPs_pcs$Name[
# res_DMPs_pcs$adj.P.Val < 0.1],DMRs_CpGs))),]
GMB_CpGs <- norm_beta_fil[which(rownames(norm_beta_fil) %in%
DMRs_CpGs),]
pcs <- readRDS("../R_objects/pcs.rds")
pdata <- cbind(pdata,pcs[,1:15])
GSA_sample_sheet <- read.csv("/data/GSA/emphasis/EMPHASIS_GMB_GSA_Samplesheet.csv")
GMB_SNPs <- read.table("../data/GSA_GMB_PLINKfiltered_a_hwe_geno_maf_recodeA_t.traw",
sep="\t", head=T)
GMB_SNPs <- GMB_SNPs[,-c(1,3,4,5,6)]
#############################
#produce genotype summary stat table
#############################
summary_table <- apply(GMB_SNPs[,-1],1,function(x){summary(as.factor(x))})
summary_table <- ldply(summary_table,function(s){t(data.frame(unlist(s)))})
summary_table_fil <- summary_table[!(summary_table$`NA's` > 30),]
summary_table_fil$SNP <- rownames(GMB_SNPs)
summary_table_fil <- summary_table_fil[,match(c("SNP","0", "1","2","NA's"),
colnames(summary_table_fil))]
##########################
#reshape GSA data for GEM
###########################
rownames(GMB_SNPs) <- GMB_SNPs[,1]
GMB_SNPs <- GMB_SNPs[,-1]
pdata <- readRDS("../R_objects/pdata.rds")
#edit sample names to match those in sample sheet/beta matrix
colnames(GMB_SNPs) <- paste0("2",sapply(colnames(GMB_SNPs),
function(x){strsplit(x,"X?[0:9]*_2")[[1]][2]}))
#match sample order in sample sheet to GSA data
GSA_sample_sheet <- GSA_sample_sheet[match(colnames(GMB_SNPs),
GSA_sample_sheet$Array.info),]
all(GSA_sample_sheet$Array.info == colnames(GMB_SNPs))
#replace arrays with sample IDs for GSA
colnames(GMB_SNPs) <- GSA_sample_sheet$Sample.ID[
GSA_sample_sheet$Array.info == colnames(GMB_SNPs)]
#change genotype coding to 1,2,3
#REMOVE -don't need to do this, 0,1,2 coding is equivalent
#GMB_SNPs_f <- t(as.data.frame(apply(GMB_SNPs,1,factor)))
#GMB_SNPs_f <- revalue(GMB_SNPs_f, c("0"="1", "1"="2", "2"="3"))
###########################
#reshape EPIC data for GEM
############################
#replace arrays with sample IDs for EPIC array
GMB_CpGs <- GMB_CpGs[,match(rownames(pdata),colnames(GMB_CpGs))]
all(colnames(GMB_CpGs) == rownames(pdata))
colnames(GMB_CpGs) <- pdata$Subject_ID
GMB_CpGs <- as.data.frame(GMB_CpGs)
#dont need to do - add ID column and move to 1
#GMB_CpGs$ID <- rownames(GMB_CpGs)
#GMB_CpGs <- GMB_CpGs[,c(ncol(GMB_CpGs),1:(ncol(GMB_CpGs) - 1))]
GMB_SNPs <- GMB_SNPs[,colnames(GMB_SNPs) %in% colnames(GMB_CpGs)]
GMB_CpGs <- GMB_CpGs[,match(colnames(GMB_SNPs),colnames(GMB_CpGs))]
#rownames(GMB_CpGs) <- GMB_CpGs$ID
#GMB_CpGs <- GMB_CpGs[,-1]
all(colnames(GMB_SNPs) == colnames(GMB_CpGs))
dim(GMB_CpGs)
dim(GMB_SNPs)
#match pdata sample order to GSA and EPIC
pdata <- pdata[match(as.factor(colnames(GMB_SNPs)),pdata$Subject_ID),]
all(colnames(GMB_SNPs) == pdata$Subject_ID)
all(colnames(GMB_CpGs) == pdata$Subject_ID)
dim(pdata)
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# run GEM mQTL analysis
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#create env and cov objects
env <- pdata[,colnames(pdata) %in% c("Subject_ID","MasterGroupNo")]
rownames(env) <- env$Subject_ID
env <- t(env[,colnames(env) == "MasterGroupNo",drop=F])
rownames(env)
cov <- pdata[,colnames(pdata) %in% c("Subject_ID","PC1","PC2","PC3","PC4",
"PC5","PC6","PC7","PC8","PC9","PC10", "PC11","PC12","PC13","PC15","Age",
"MooreSoC","MasterGroupNo"),drop=F]
cov$MooreSoC <- recode(cov$MooreSoC,dry="1",rainy="2")
cov$MooreSoC <- relevel(cov$MooreSoC,"1")
cov <- dcast(melt(cov, id.var = "Subject_ID"), ... ~ Subject_ID )
rownames(cov) <- cov$variable
cov <- cov[,-1]
cov <- cov[,match(pdata$Subject_ID,colnames(cov))]
dim(cov)
#create combined cov file
cov_env <- rbind(cov[rownames(cov) != "MasterGroupNo",],
cov[rownames(cov) == "MasterGroupNo",])
#remove mastergroup from cov
cov <- cov[rownames(cov) != "MasterGroupNo",]
#convert to numeric
cov_num <- sapply(cov[,], as.numeric)
rownames(cov_num) <- rownames(cov)
cov_env_num <- sapply(cov_env[,], as.numeric)
rownames(cov_env_num) <- rownames(cov_env)
env_num <- t(as.data.frame(sapply(env[,,drop=F], as.numeric)))
rownames(env_num) <- rownames(env)
colnames(env_num) <- colnames(env)
dim(cov_num)
rownames(cov_num)
dim(env_num)
rownames(env_num)
dim(cov_env_num)
rownames(cov_env_num)
all.equal(colnames(cov_num),as.character(pdata$Subject_ID))
all.equal(colnames(cov_num),colnames(GMB_SNPs))
all.equal(colnames(cov_num),colnames(GMB_CpGs))
all.equal(colnames(cov_num),colnames(env_num))
all.equal(colnames(env_num),as.character(pdata$Subject_ID))
all.equal(colnames(env_num),colnames(GMB_SNPs))
all.equal(colnames(env_num),colnames(GMB_CpGs))
all.equal(colnames(cov_env_num),colnames(cov_num))
#save as text files
write.table(GMB_SNPs,"../data/GMB_SNPs.txt",sep="\t")
write.table(GMB_CpGs,"../data/GMB_CpGs.txt",sep="\t")
write.table(env_num,"../data/GMB_env.txt",sep="\t")
write.table(cov_num,"../data/GMB_cov.txt",sep="\t")
write.table(cov_env_num,"../data/GMB_gxe.txt",sep="\t")
#run GEM models
GEM_Emodel("../data/GMB_env.txt", "../data/GMB_cov.txt", "../data/GMB_CpGs.txt",
1,"../results/GEM/Result_Emodel.txt", "../results/GEM/Emodel_QQ.jpg",
savePlot=T)
GEM_Gmodel("../data/GMB_SNPs.txt","../data/GMB_cov.txt","../data/GMB_CpGs.txt",
1e-04, "../results/GEM/Result_Gmodel.txt")
GEM_GxEmodel("../data/GMB_SNPs.txt", "../data/GMB_gxe.txt", "../data/GMB_CpGs.txt",
1, "../results/GEM/Result_GEmodel.txt", topKplot = 1, savePlot=T)
#Run regression with additive genotype and interaction
GxE_reg_top <- merge(t(GMB_SNPs[rownames(GMB_SNPs) %in%
c("rs1423249"),]),
t(GMB_CpGs[rownames(GMB_CpGs) %in% c("cg06837426","cg20673840","cg20451680",
"cg14972155","cg20059697",
"cg13106512","cg21180956"),]),by="row.names")
rownames(GxE_reg_top) <- GxE_reg_top$Row.names
GxE_reg_top <- GxE_reg_top[,-1]
GxE_reg_top <- GxE_reg_top[match(pdata$Subject_ID,
rownames(GxE_reg_top)),]
GxE_reg_top <- merge(GxE_reg_top,pdata,by.x='row.names',by.y='Subject_ID')
#alternative with genotype as factor
#not run
summary(lm(cg14972155 ~ PC1 + PC2 + PC3 + PC4 + PC5 + PC6 +
PC7 + PC8 + PC9 + PC10 +
PC11 + PC12 + PC13 + PC15 + Age + MooreSoC +
MasterGroupNo +
rs1423249,GxE_reg_top))
#meth x inter x geno plot
GxE_reg_top_fil <- GxE_reg_top[,colnames(GxE_reg_top) %in%
c("Row.names","rs10239100","rs1423249","rs278368","cg20673840",
"cg06837426","cg14972155","MasterGroupNo")]
GxE_reg_top_fil <- na.omit(GxE_reg_top_fil)
colnames(GxE_reg_top_fil)[which(colnames(GxE_reg_top_fil) == "MasterGroupNo")] <- "intervention"
GxE_reg_top_fil$intervention <- revalue(GxE_reg_top_fil$intervention, c("1"="intervention","2"="control"))
GxE_reg_top_fil$intervention <- relevel(GxE_reg_top_fil$intervention,"control")
GxE_reg_top_fil$rs1423249 <- as.factor(GxE_reg_top_fil$rs1423249)
GxE_reg_top_fil$rs1423249 <- revalue(GxE_reg_top_fil$rs1423249, c("0"="GG","1"="GA","2"="AA"))
#cg06837426 ~ rs1423249
ggplot(GxE_reg_top_fil, aes(rs1423249,cg06837426),color=rs1423249) +
geom_point(aes(color = rs1423249)) +
scale_color_manual(values=c("#C04B8E","#C04B8E","#C04B8E")) +
stat_summary(aes(y = cg06837426,group=rs1423249),fun.y=mean,colour="#252997",
geom="line",group=1) +
theme_gamplotlib() + theme(strip.background = element_blank(),
panel.grid.major.x = element_blank(),legend.position="none",
aspect.ratio=1) +
scale_x_discrete() +
ylab("methylation Beta value") + xlab("genotype") +
ggtitle("cg06837426 ~ rs1423249")
ggsave("../results/GMB_mQTL_cg06837426_rs1423249_G_scatter.pdf",width=(4),
height=3.5, units="in", dpi=300)
#cg06837426 ~ rs1423249:intervention
ggplot(GxE_reg_top_fil, aes(intervention,cg06837426),color=intervention) +
geom_point(aes(color = intervention)) +
scale_color_manual(values=c("#46617A","#00B8A2")) +
stat_summary(aes(y = cg06837426,group=intervention), fun.y=mean,
colour="#252997", geom="line",group=1) + facet_wrap( ~ rs1423249) +
theme_gamplotlib() + theme(strip.background = element_blank(),
panel.grid.major.x = element_blank(),legend.position="none",
aspect.ratio=1) +
scale_x_discrete(labels=c("control","inter.")) +
ylab("methylation Beta value") + xlab("group") +
ggtitle("cg06837426 ~ rs1423249:intervention")
ggsave("../results/GMB_mQTL_cg06837426_rs1423249_GxE_scatter.pdf",width=(4),
height=3.5, units="in", dpi=300)
#cg20673840 ~ rs1423249
ggplot(GxE_reg_top_fil, aes(rs1423249,cg20673840),color=rs1423249) +
geom_point(aes(color = rs1423249)) + scale_color_manual(values=c("#C04B8E",
"#C04B8E","#C04B8E")) + stat_summary(aes(y = cg20673840,group=rs1423249),
fun.y=mean,colour="#252997", geom="line",group=1) +
theme_gamplotlib() + theme(strip.background = element_blank(),
panel.grid.major.x = element_blank(),legend.position="none",
aspect.ratio=1) +
scale_x_discrete() +
ylab("methylation Beta value") + xlab("genotype") +
ggtitle("cg20673840 ~ rs1423249")
ggsave("../results/GMB_mQTL_cg20673840_rs1423249_G_scatter.pdf",width=(4),
height=3.5, units="in", dpi=300)
#cg20673840 ~ rs1423249:intervention
ggplot(GxE_reg_top_fil, aes(intervention,cg20673840),color=intervention) +
geom_point(aes(color = intervention)) + scale_color_manual(values=c("#46617A",
"#00B8A2")) + stat_summary(aes(y = cg20673840,group=intervention), fun.y=mean,
colour="#252997", geom="line",group=1) + facet_wrap( ~ rs1423249) +
theme_gamplotlib() + theme(strip.background = element_blank(),
panel.grid.major.x = element_blank(),legend.position="none",
aspect.ratio=1) +
scale_x_discrete(labels=c("control","inter.")) +
ylab("methylation Beta value") + xlab("group") +
ggtitle("cg20673840 ~ rs1423249:intervention")
ggsave("GMB_mQTL_cg20673840_rs1423249_GxE_scatter.pdf",width=(4),
height=3.5, units="in", dpi=300)
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# run additional analyses
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#G only mQTLs
######
#factor for intervention vs control
GxE_reg_top$intervention <- relevel(GxE_reg_top$MasterGroupNo,"2")
#rs1423249
mQTL_cpgs <- c("cg06837426","cg20673840","cg20451680",
"cg14972155","cg20059697",
"cg13106512","cg21180956")
#G
###
print("G")
res_mQTL_cpgs <- lapply(mQTL_cpgs, function(x) {
lm(substitute(cpg ~ PC1 + PC2 + PC3 + PC4 + PC5 + PC6 + PC7 + PC8 + PC9 + PC10 +
PC11 + PC12 + PC13 + PC15 + Age + MooreSoC + rs1423249,
list(cpg = as.name(x))), data = GxE_reg_top)})
names(res_mQTL_cpgs) <- mQTL_cpgs
#coeffs and adj R sqrd
res_mQTL_cpgs_summ <- lapply(res_mQTL_cpgs,summary)
lapply(res_mQTL_cpgs_summ,function(x){x$coefficients[c(18),]})
lapply(res_mQTL_cpgs_summ,function(x){x$adj.r.squared})
#AIC
lapply(res_mQTL_cpgs,AIC)
#E
###
print("E")
res_mQTL_cpgs <- lapply(mQTL_cpgs, function(x) {
lm(substitute(cpg ~ PC1 + PC2 + PC3 + PC4 + PC5 + PC6 + PC7 + PC8 + PC9 + PC10 +
PC11 + PC12 + PC13 + PC15 + Age + MooreSoC + intervention,
list(cpg = as.name(x))), data = GxE_reg_top)})
names(res_mQTL_cpgs) <- mQTL_cpgs
#coeffs and adj R sqrd
res_mQTL_cpgs_summ <- lapply(res_mQTL_cpgs,summary)
lapply(res_mQTL_cpgs_summ,function(x){x$coefficients[c(18),]})
lapply(res_mQTL_cpgs_summ,function(x){x$adj.r.squared})
#AIC
lapply(res_mQTL_cpgs,AIC)
#G+E
####
print("G + E")
res_mQTL_cpgs <- lapply(mQTL_cpgs, function(x) {
lm(substitute(cpg ~ PC1 + PC2 + PC3 + PC4 + PC5 + PC6 + PC7 + PC8 + PC9 + PC10 +
PC11 + PC12 + PC13 + PC15 + Age + MooreSoC + rs1423249 + intervention,
list(cpg = as.name(x))), data = GxE_reg_top)})
names(res_mQTL_cpgs) <- mQTL_cpgs
#coeffs and adj R sqrd
res_mQTL_cpgs_summ <- lapply(res_mQTL_cpgs,summary)
lapply(res_mQTL_cpgs_summ,function(x){x$coefficients[c(18,19),]})
lapply(res_mQTL_cpgs_summ,function(x){x$adj.r.squared})
#AIC
lapply(res_mQTL_cpgs,AIC)
#GxE
####
print("G x E")
res_mQTL_cpgs <- lapply(mQTL_cpgs, function(x) {
lm(substitute(cpg ~ PC1 + PC2 + PC3 + PC4 + PC5 + PC6 + PC7 + PC8 + PC9 + PC10 +
PC11 + PC12 + PC13 + PC15 + Age + MooreSoC + rs1423249*MasterGroupNo,
list(cpg = as.name(x))), data = GxE_reg_top)})
names(res_mQTL_cpgs) <- mQTL_cpgs
#coeffs and adj R sqrd
res_mQTL_cpgs_summ <- lapply(res_mQTL_cpgs,summary)
lapply(res_mQTL_cpgs_summ,function(x){x$coefficients[c(18,19,20),]})
lapply(res_mQTL_cpgs_summ,function(x){x$adj.r.squared})
#AIC
lapply(res_mQTL_cpgs,AIC)
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# redo mQTL with imputed data for chr 5 and 8
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# chr 5
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
GMB_SNPs_chr5_3 <- read.table("/data/GSA/emphasis/imputed_geno_chr5_chr8/CHROMOSOME_5/PED_FORMAT/plink/fcgene_plink_chr5_info03_a_hwe_geno_maf_recodeA_t.traw", sep="\t", head=T)
GMB_SNPs_chr5_9 <- read.table("/data/GSA/emphasis/imputed_geno_chr5_chr8/CHROMOSOME_5/PED_FORMAT/plink/fcgene_plink_chr5_info09_a_hwe_geno_maf_recodeA_t.traw", sep="\t", head=T)
GMB_SNPs_chr5 <- GMB_SNPs_chr5_9
GMB_SNPs_chr5 <- GMB_SNPs_chr5[,-c(1,3,4,5,6)]
##########################
#reshape GSA data for GEM
###########################
SNPs_chr5 <- GMB_SNPs_chr5[,1,drop=F]
GMB_SNPs_chr5 <- GMB_SNPs_chr5[,-1]
#edit sample names to match those in sample sheet/beta matrix
colnames(GMB_SNPs_chr5) <- paste0("",sapply(colnames(GMB_SNPs_chr5),
function(x){strsplit(x,"X?[0:9]*_")[[1]][2]}))
#match sample order in sample sheet to GSA data
GSA_sample_sheet_chr5 <- GSA_sample_sheet[match(colnames(GMB_SNPs_chr5),
GSA_sample_sheet$Sample.ID),]
all(GSA_sample_sheet_chr5$Sample.ID == colnames(GMB_SNPs_chr5))
#add probe names back in
#rownames(GMB_SNPs_chr5) <- make.names(t(SNPs_chr5), unique=TRUE)
rownames(GMB_SNPs_chr5) <- t(SNPs_chr5)
###########################
#reshape EPIC data for GEM
############################
GMB_SNPs_chr5 <- GMB_SNPs_chr5[,colnames(GMB_SNPs_chr5) %in% colnames(GMB_CpGs)]
GMB_CpGs_chr5 <- GMB_CpGs[,match(colnames(GMB_SNPs_chr5),colnames(GMB_CpGs))]
all(colnames(GMB_SNPs_chr5) == colnames(GMB_CpGs_chr5))
dim(GMB_CpGs_chr5)
dim(GMB_SNPs_chr5)
#match pdata sample order to GSA and EPIC
pdata_chr5 <- pdata[match(as.factor(colnames(GMB_SNPs_chr5)),pdata$Subject_ID),]
all(colnames(GMB_SNPs_chr5) == pdata_chr5$Subject_ID)
all(colnames(GMB_CpGs_chr5) == pdata_chr5$Subject_ID)
dim(pdata_chr5)
################
#run GEM models
################
#create env and cov objects
env <- pdata_chr5[,colnames(pdata_chr5) %in% c("Subject_ID","MasterGroupNo")]
rownames(env) <- env$Subject_ID
env <- t(env[,colnames(env) == "MasterGroupNo",drop=F])
rownames(env)
cov <- pdata_chr5[,colnames(pdata_chr5) %in% c("Subject_ID","PC1","PC2","PC3","PC4",
"PC5","PC6","PC7","PC8","PC9","PC10", "PC11","PC12","PC13","PC15","Age",
"MooreSoC","MasterGroupNo"),drop=F]
cov$MooreSoC <- recode(cov$MooreSoC,dry="1",rainy="2")
cov$MooreSoC <- relevel(cov$MooreSoC,"1")
cov <- dcast(melt(cov, id.var = "Subject_ID"), ... ~ Subject_ID )
rownames(cov) <- cov$variable
cov <- cov[,-1]
cov <- cov[,match(pdata_chr5$Subject_ID,colnames(cov))]
dim(cov)
#create combined cov file
cov_env <- rbind(cov[rownames(cov) != "MasterGroupNo",],
cov[rownames(cov) == "MasterGroupNo",])
#remove mastergroup from cov
cov <- cov[rownames(cov) != "MasterGroupNo",]
cov_num <- sapply(cov[,], as.numeric)
rownames(cov_num) <- rownames(cov)
cov_env_num <- sapply(cov_env[,], as.numeric)
rownames(cov_env_num) <- rownames(cov_env)
env_num <- t(as.data.frame(sapply(env[,,drop=F], as.numeric)))
rownames(env_num) <- rownames(env)
colnames(env_num) <- colnames(env)
dim(cov_num)
rownames(cov_num)
dim(env_num)
rownames(env_num)
dim(cov_env_num)
rownames(cov_env_num)
all.equal(colnames(cov_num),as.character(pdata_chr5$Subject_ID))
all.equal(colnames(cov_num),colnames(GMB_SNPs_chr5))
all.equal(colnames(cov_num),colnames(GMB_CpGs_chr5))
all.equal(colnames(cov_num),colnames(env_num))
all.equal(colnames(env_num),as.character(pdata_chr5$Subject_ID))
all.equal(colnames(env_num),colnames(GMB_SNPs_chr5))
all.equal(colnames(env_num),colnames(GMB_CpGs_chr5))
all.equal(colnames(cov_env_num),colnames(cov_num))
#save as text files
write.table(GMB_SNPs_chr5,"../data/GMB_SNPs_chr5.txt",sep="\t")
write.table(GMB_CpGs_chr5,"../data/GMB_CpGs_chr5.txt",sep="\t")
write.table(env_num,"../data/GMB_env_chr5.txt",sep="\t")
write.table(cov_num,"../data/GMB_cov_chr5.txt",sep="\t")
write.table(cov_env_num,"../data/GMB_gxe_chr5.txt",sep="\t")
#run GEM models
GEM_Gmodel("../data/GMB_SNPs_chr5.txt",
"../data/GMB_cov_chr5.txt",
"../data/GMB_CpGs_chr5.txt",1e-04,
"../results/GEM/Result_Gmodel_chr5_imputed_9.txt")
GEM_GxEmodel("../data/GMB_SNPs_chr5.txt",
"../data/GMB_gxe_chr5.txt",
"../data/GMB_CpGs_chr5.txt", 1,
"../results/GEM/Result_GEmodel_chr5_imputed_9.txt",
topKplot = 1, savePlot=T)
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# chr 8
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
GMB_SNPs_chr8_3 <- read.table("/data/GSA/emphasis/imputed_geno_chr5_chr8/CHROMOSOME_8/PED_FORMAT/plink/fcgene_plink_chr8_info03_a_hwe_geno_maf_recodeA_t.traw", sep="\t", head=T)
GMB_SNPs_chr8_9 <- read.table("/data/GSA/emphasis/imputed_geno_chr5_chr8/CHROMOSOME_8/PED_FORMAT/plink/fcgene_plink_chr8_info09_a_hwe_geno_maf_recodeA_t.traw", sep="\t", head=T)
GMB_SNPs_chr8 <- GMB_SNPs_chr8_9
GMB_SNPs_chr8 <- GMB_SNPs_chr8[,-c(1,3,4,5,6)]
##########################
#reshape GSA data for GEM
###########################
SNPs_chr8 <- GMB_SNPs_chr8[,1,drop=F]
GMB_SNPs_chr8 <- GMB_SNPs_chr8[,-1]
#edit sample names to match those in sample sheet/beta matrix
colnames(GMB_SNPs_chr8) <- paste0("",sapply(colnames(GMB_SNPs_chr8),
function(x){strsplit(x,"X?[0:9]*_")[[1]][2]}))
#match sample order in sample sheet to GSA data
GSA_sample_sheet_chr8 <- GSA_sample_sheet[match(colnames(GMB_SNPs_chr8),
GSA_sample_sheet$Sample.ID),]
all(GSA_sample_sheet_chr8$Sample.ID == colnames(GMB_SNPs_chr8))
#add probe names back in
rownames(GMB_SNPs_chr8) <- t(SNPs_chr8)
###########################
#reshape EPIC data for GEM
############################
GMB_SNPs_chr8 <- GMB_SNPs_chr8[,colnames(GMB_SNPs_chr8) %in% colnames(GMB_CpGs)]
GMB_CpGs_chr8 <- GMB_CpGs[,match(colnames(GMB_SNPs_chr8),colnames(GMB_CpGs))]
all(colnames(GMB_SNPs_chr8) == colnames(GMB_CpGs_chr8))
dim(GMB_CpGs_chr8)
dim(GMB_SNPs_chr8)
#match pdata sample order to GSA and EPIC
pdata_chr8 <- pdata[match(as.factor(colnames(GMB_SNPs_chr8)),pdata$Subject_ID),]
all(colnames(GMB_SNPs_chr8) == pdata_chr8$Subject_ID)
all(colnames(GMB_CpGs_chr8) == pdata_chr8$Subject_ID)
dim(pdata_chr8)
#create env and cov objects
env <- pdata_chr8[,colnames(pdata_chr8) %in% c("Subject_ID","MasterGroupNo")]
rownames(env) <- env$Subject_ID
env <- t(env[,colnames(env) == "MasterGroupNo",drop=F])
rownames(env)
cov <- pdata_chr8[,colnames(pdata_chr8) %in% c("Subject_ID","PC1","PC2","PC3","PC4",
"PC5","PC6","PC7","PC8","PC9","PC10", "PC11","PC12","PC13","PC15","Age","MooreSoC","MasterGroupNo"),drop=F]
cov$MooreSoC <- recode(cov$MooreSoC,dry="1",rainy="2")
cov$MooreSoC <- relevel(cov$MooreSoC,"1")
cov <- dcast(melt(cov, id.var = "Subject_ID"), ... ~ Subject_ID )
rownames(cov) <- cov$variable
cov <- cov[,-1]
cov <- cov[,match(pdata_chr8$Subject_ID,colnames(cov))]
dim(cov)
#create combined cov file
cov_env <- rbind(cov[rownames(cov) != "MasterGroupNo",],
cov[rownames(cov) == "MasterGroupNo",])
#remove mastergroup from cov
cov <- cov[rownames(cov) != "MasterGroupNo",]
cov_num <- sapply(cov[,], as.numeric)
rownames(cov_num) <- rownames(cov)
cov_env_num <- sapply(cov_env[,], as.numeric)
rownames(cov_env_num) <- rownames(cov_env)
env_num <- t(as.data.frame(sapply(env[,,drop=F], as.numeric)))
rownames(env_num) <- rownames(env)
colnames(env_num) <- colnames(env)
dim(cov_num)
rownames(cov_num)
dim(env_num)
rownames(env_num)
dim(cov_env_num)
rownames(cov_env_num)
all.equal(colnames(cov_num),as.character(pdata_chr8$Subject_ID))
all.equal(colnames(cov_num),colnames(GMB_SNPs_chr8))
all.equal(colnames(cov_num),colnames(GMB_CpGs_chr8))
all.equal(colnames(cov_num),colnames(env_num))
all.equal(colnames(env_num),as.character(pdata_chr8$Subject_ID))
all.equal(colnames(env_num),colnames(GMB_SNPs_chr8))
all.equal(colnames(env_num),colnames(GMB_CpGs_chr8))
all.equal(colnames(cov_env_num),colnames(cov_num))
#save as text files
write.table(GMB_SNPs_chr8,"../data/GMB_SNPs_chr8.txt",sep="\t")
write.table(GMB_CpGs_chr8,"../data/GMB_CpGs_chr8.txt",sep="\t")
write.table(env_num,"../data/GMB_env_chr8.txt",sep="\t")
write.table(cov_num,"../data/GMB_cov_chr8.txt",sep="\t")
write.table(cov_env_num,"../data/GMB_gxe_chr8.txt",sep="\t")
#run GEM models
GEM_Gmodel("../data/GMB_SNPs_chr8.txt",
"../data/GMB_cov_chr8.txt",
"../data/GMB_CpGs_chr8.txt",1e-04,
"../results/GEM/Result_Gmodel_chr8_imputed_9.txt")
GEM_GxEmodel("../data/GMB_SNPs_chr8.txt",
"../data/GMB_gxe_chr8.txt",
"../data/GMB_CpGs_chr8.txt", 1,
"../results/GEM/Result_GEmodel_chr8_imputed_9.txt",
topKplot = 1, savePlot=F)
|
# internal constant
bim_names <- c('chr', 'id', 'posg', 'pos', 'alt', 'ref')
#' Read Plink *.bim files
#'
#' This function reads a standard Plink *.bim file into a tibble with named columns.
#' It uses [readr::read_table()] to do it efficiently.
#'
#' @param file Input file (whatever is accepted by [readr::read_table()]).
#' If file as given does not exist and is missing the expected *.bim extension, the function adds the .bim extension and uses that path if that file exists.
#' Additionally, the .gz extension is added automatically if the file (after *.bim extension is added as needed) is still not found and did not already contain the .gz extension and adding it points to an existing file.
#' @param verbose If `TRUE` (default) function reports the path of the file being loaded (after autocompleting the extensions).
#'
#' @return A tibble with columns: `chr`, `id`, `posg`, `pos`, `alt`, `ref`.
#'
#' @examples
#' # to read "data.bim", run like this:
#' # bim <- read_bim("data")
#' # this also works
#' # bim <- read_bim("data.bim")
#'
#' # The following example is more awkward
#' # because package sample data has to be specified in this weird way:
#'
#' # read an existing Plink *.bim file
#' file <- system.file("extdata", 'sample.bim', package = "genio", mustWork = TRUE)
#' bim <- read_bim(file)
#' bim
#'
#' # can specify without extension
#' file <- sub('\\.bim$', '', file) # remove extension from this path on purpose
#' file # verify .bim is missing
#' bim <- read_bim(file) # loads too!
#' bim
#'
#' @seealso
#' [read_plink()] for reading a set of BED/BIM/FAM files.
#'
#' Plink BIM format references:
#' <https://www.cog-genomics.org/plink/1.9/formats#bim>
#' <https://www.cog-genomics.org/plink/2.0/formats#bim>
#'
#' @export
read_bim <- function(file, verbose = TRUE) {
# this generic reader does all the magic
read_tab_generic(
file = file,
ext = 'bim',
tib_names = bim_names,
col_types = 'ccdicc',
verbose = verbose
)
}
| /R/read_bim.R | no_license | cran/genio | R | false | false | 2,007 | r | # internal constant
bim_names <- c('chr', 'id', 'posg', 'pos', 'alt', 'ref')
#' Read Plink *.bim files
#'
#' This function reads a standard Plink *.bim file into a tibble with named columns.
#' It uses [readr::read_table()] to do it efficiently.
#'
#' @param file Input file (whatever is accepted by [readr::read_table()]).
#' If file as given does not exist and is missing the expected *.bim extension, the function adds the .bim extension and uses that path if that file exists.
#' Additionally, the .gz extension is added automatically if the file (after *.bim extension is added as needed) is still not found and did not already contain the .gz extension and adding it points to an existing file.
#' @param verbose If `TRUE` (default) function reports the path of the file being loaded (after autocompleting the extensions).
#'
#' @return A tibble with columns: `chr`, `id`, `posg`, `pos`, `alt`, `ref`.
#'
#' @examples
#' # to read "data.bim", run like this:
#' # bim <- read_bim("data")
#' # this also works
#' # bim <- read_bim("data.bim")
#'
#' # The following example is more awkward
#' # because package sample data has to be specified in this weird way:
#'
#' # read an existing Plink *.bim file
#' file <- system.file("extdata", 'sample.bim', package = "genio", mustWork = TRUE)
#' bim <- read_bim(file)
#' bim
#'
#' # can specify without extension
#' file <- sub('\\.bim$', '', file) # remove extension from this path on purpose
#' file # verify .bim is missing
#' bim <- read_bim(file) # loads too!
#' bim
#'
#' @seealso
#' [read_plink()] for reading a set of BED/BIM/FAM files.
#'
#' Plink BIM format references:
#' <https://www.cog-genomics.org/plink/1.9/formats#bim>
#' <https://www.cog-genomics.org/plink/2.0/formats#bim>
#'
#' @export
read_bim <- function(file, verbose = TRUE) {
# this generic reader does all the magic
read_tab_generic(
file = file,
ext = 'bim',
tib_names = bim_names,
col_types = 'ccdicc',
verbose = verbose
)
}
|
describe("verify_extracted_package", {
tmp <- tempfile()
on.exit(unlink(tmp, recursive = TRUE), add = TRUE)
run <- function(pkgfile) {
unlink(tmp, recursive = TRUE)
mkdirp(tmp)
utils::untar(pkgfile, exdir = tmp)
verify_extracted_package(pkgfile, tmp)
}
it("errors if archive doesn't contain a DESCRIPTION file", {
f1 <- local_binary_package("test1")
expect_error(run(f1),
"'.*test1[.]tgz' is not a valid R package, it is an empty archive",
class = "install_input_error")
})
it("errors if archive DESCRIPTION is not in the root directory", {
f2 <- local_binary_package("test2", "foo/DESCRIPTION" = character())
expect_error(run(f2),
"'.*test2[.]tgz' is not a valid binary, it does not contain 'test2/Meta/package.rds' and 'test2/DESCRIPTION'.",
class = "install_input_error")
})
it("can handle multiple DESCRIPTION files", {
f3 <- local_binary_package("test3",
"DESCRIPTION" = c("Package: test3", "Built: 2017-01-01"),
"tests/testthat/DESCRIPTION" = character(),
"Meta/package.rds" = character())
expect_s3_class(run(f3)$desc, "description")
f4 <- local_binary_package("test4",
"pkgdir/DESCRIPTION" = c("Package: test4", "Built: 2017-01-01"),
"Meta/package.rds" = character())
expect_error(run(f4),
"'.*test4[.]tgz' is not a valid binary, it does not contain 'test4/DESCRIPTION'.",
class = "install_input_error")
})
it("fails if the binary does not contain package.rds", {
f5 <- local_binary_package("test5", "DESCRIPTION" = character())
expect_error(run(f5),
"'.*test5[.]tgz' is not a valid binary, it does not contain 'test5/Meta/package[.]rds'",
class = "install_input_error")
})
it("fails if the DESCRIPTION file is empty", {
f6 <- local_binary_package("test6", "DESCRIPTION" = character(), "Meta/package.rds" = character())
expect_error(run(f6),
"'.*test6[.]tgz' is not a valid binary, 'test6/DESCRIPTION' is empty",
class = "install_input_error")
})
it("fails if the DESCRIPTION file has no 'Built' entry", {
f7 <- local_binary_package("test7", "DESCRIPTION" = c("Package: test7"), "Meta/package.rds" = character())
expect_error(run(f7),
"'.*test7[.]tgz' is not a valid binary, no 'Built' entry in 'test7/DESCRIPTION'",
class = "install_input_error")
})
})
test_that("verify_extrancted_package errors", {
pkg_dir <- file.path("fixtures", "packages")
expect_error(
verify_extracted_package("bad1", file.path(pkg_dir, "bad1")),
"single directory", class = "install_input_error")
expect_error(
verify_extracted_package("bad2", file.path(pkg_dir, "bad2")),
"invalid", class = "install_input_error")
expect_error(
verify_extracted_package("bad3", file.path(pkg_dir, "bad3")),
"Package", class = "install_input_error")
expect_error(
verify_extracted_package("bad4", file.path(pkg_dir, "bad4")),
"package name mismatch", class = "install_input_error")
})
| /tests/testthat/test-install-verify.R | permissive | konradzdeb/pkgdepends | R | false | false | 3,016 | r |
describe("verify_extracted_package", {
tmp <- tempfile()
on.exit(unlink(tmp, recursive = TRUE), add = TRUE)
run <- function(pkgfile) {
unlink(tmp, recursive = TRUE)
mkdirp(tmp)
utils::untar(pkgfile, exdir = tmp)
verify_extracted_package(pkgfile, tmp)
}
it("errors if archive doesn't contain a DESCRIPTION file", {
f1 <- local_binary_package("test1")
expect_error(run(f1),
"'.*test1[.]tgz' is not a valid R package, it is an empty archive",
class = "install_input_error")
})
it("errors if archive DESCRIPTION is not in the root directory", {
f2 <- local_binary_package("test2", "foo/DESCRIPTION" = character())
expect_error(run(f2),
"'.*test2[.]tgz' is not a valid binary, it does not contain 'test2/Meta/package.rds' and 'test2/DESCRIPTION'.",
class = "install_input_error")
})
it("can handle multiple DESCRIPTION files", {
f3 <- local_binary_package("test3",
"DESCRIPTION" = c("Package: test3", "Built: 2017-01-01"),
"tests/testthat/DESCRIPTION" = character(),
"Meta/package.rds" = character())
expect_s3_class(run(f3)$desc, "description")
f4 <- local_binary_package("test4",
"pkgdir/DESCRIPTION" = c("Package: test4", "Built: 2017-01-01"),
"Meta/package.rds" = character())
expect_error(run(f4),
"'.*test4[.]tgz' is not a valid binary, it does not contain 'test4/DESCRIPTION'.",
class = "install_input_error")
})
it("fails if the binary does not contain package.rds", {
f5 <- local_binary_package("test5", "DESCRIPTION" = character())
expect_error(run(f5),
"'.*test5[.]tgz' is not a valid binary, it does not contain 'test5/Meta/package[.]rds'",
class = "install_input_error")
})
it("fails if the DESCRIPTION file is empty", {
f6 <- local_binary_package("test6", "DESCRIPTION" = character(), "Meta/package.rds" = character())
expect_error(run(f6),
"'.*test6[.]tgz' is not a valid binary, 'test6/DESCRIPTION' is empty",
class = "install_input_error")
})
it("fails if the DESCRIPTION file has no 'Built' entry", {
f7 <- local_binary_package("test7", "DESCRIPTION" = c("Package: test7"), "Meta/package.rds" = character())
expect_error(run(f7),
"'.*test7[.]tgz' is not a valid binary, no 'Built' entry in 'test7/DESCRIPTION'",
class = "install_input_error")
})
})
test_that("verify_extrancted_package errors", {
pkg_dir <- file.path("fixtures", "packages")
expect_error(
verify_extracted_package("bad1", file.path(pkg_dir, "bad1")),
"single directory", class = "install_input_error")
expect_error(
verify_extracted_package("bad2", file.path(pkg_dir, "bad2")),
"invalid", class = "install_input_error")
expect_error(
verify_extracted_package("bad3", file.path(pkg_dir, "bad3")),
"Package", class = "install_input_error")
expect_error(
verify_extracted_package("bad4", file.path(pkg_dir, "bad4")),
"package name mismatch", class = "install_input_error")
})
|
setwd("C:/Users/helmac1/Documents/Personal/Coursera/Exploratory Data Program 1")
# read the file given in the assignment
data=read.csv('household_power_consumption.txt',header=T, sep=';')
#merge column 1 and column 2 to create datatime variable
data$Datetime = paste(as.character(data[,1]) , data[,2])
# reformat the first colum as a data
data[,1]=as.Date(data$Date,'%d/%m/%Y')
# Only use the data collected between 1-2-2007 and 2-2-2007
data <- subset(data, Date == "2007-02-01" | Date == "2007-02-02")
# make sure that the data is numeric and can be plotted
data[,3] <- as.numeric(as.character(data[,3]))
data[,4] <- as.numeric(as.character(data[,4]))
data[,5] <- as.numeric(as.character(data[,5]))
data[,7] <- as.numeric(as.character(data[,7]))
data[,7] <- as.numeric(as.character(data[,7]))
data[,9] <- as.numeric(as.character(data[,9]))
#create a datetime object so we use days()
datetime <- strptime(data$Datetime, "%d/%m/%Y %H:%M:%S")
# sets up the order of graphs
par(mfrow = c(2, 2), cex=0.75)
# plots the four graphs in matrix order
plot(datetime, data[,3], type="l", xlab="", ylab="Global Active Power")
plot(datetime, data[,5], type="l", xlab="datetime", ylab="Voltage")
plot(datetime, data[,7], type="l", ylab="Energy Submetering", xlab="")
lines(datetime, data[,8], type="l", col="red")
lines(datetime, data[,9], type="l", col="blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=, lwd=2.5, col=c("black", "red", "blue"), bty="o")
plot(datetime, data[,4], type="l", xlab="datetime", ylab="Global_reactive_power")
# create a png plot with required dimensions
dev.copy(png, file="plot4.png", width=480, height=480)
dev.off()
| /Plot4.R | no_license | corinnehelman/Datasciencecoursera | R | false | false | 1,746 | r |
setwd("C:/Users/helmac1/Documents/Personal/Coursera/Exploratory Data Program 1")
# read the file given in the assignment
data=read.csv('household_power_consumption.txt',header=T, sep=';')
#merge column 1 and column 2 to create datatime variable
data$Datetime = paste(as.character(data[,1]) , data[,2])
# reformat the first colum as a data
data[,1]=as.Date(data$Date,'%d/%m/%Y')
# Only use the data collected between 1-2-2007 and 2-2-2007
data <- subset(data, Date == "2007-02-01" | Date == "2007-02-02")
# make sure that the data is numeric and can be plotted
data[,3] <- as.numeric(as.character(data[,3]))
data[,4] <- as.numeric(as.character(data[,4]))
data[,5] <- as.numeric(as.character(data[,5]))
data[,7] <- as.numeric(as.character(data[,7]))
data[,7] <- as.numeric(as.character(data[,7]))
data[,9] <- as.numeric(as.character(data[,9]))
#create a datetime object so we use days()
datetime <- strptime(data$Datetime, "%d/%m/%Y %H:%M:%S")
# sets up the order of graphs
par(mfrow = c(2, 2), cex=0.75)
# plots the four graphs in matrix order
plot(datetime, data[,3], type="l", xlab="", ylab="Global Active Power")
plot(datetime, data[,5], type="l", xlab="datetime", ylab="Voltage")
plot(datetime, data[,7], type="l", ylab="Energy Submetering", xlab="")
lines(datetime, data[,8], type="l", col="red")
lines(datetime, data[,9], type="l", col="blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=, lwd=2.5, col=c("black", "red", "blue"), bty="o")
plot(datetime, data[,4], type="l", xlab="datetime", ylab="Global_reactive_power")
# create a png plot with required dimensions
dev.copy(png, file="plot4.png", width=480, height=480)
dev.off()
|
# packages needed
install.packages("dplyr")
install.packages("caret")
install.packages("ggplot2")
install.packages("GGally")
install.packages("elasticnet")
library(dplyr) # for data cleaning
library(caret) # for fitting models
library(ggplot2) # for visualizing density
library(GGally) # for correlation matrix
library(elasticnet) # for elastic net
setwd("~/Google Drive/2_SL/assignment2")
News = read.csv("OnlineNewsPopularity.csv", header = TRUE)
str(News)
# ==== data pre-processing ==== #
# missing value
mean(is.na(News))
# # target variable density
ggplot(News, aes(shares))+stat_density(color="darkblue", fill="lightblue")+xlab("Shares (crim)")
# near zero variance feature
colDelete <- nearZeroVar(News, names = F)
# eliminate unneccessary variables
News <- News[,-c(1,2,6,23)]
# # determine correlation between predictors
# ggcorr(News, label = T, label_size = 2)+xlab('correlation coefficient between variables')
# split data in training and test set
set.seed(100)
train_ind <- createDataPartition(News$shares, p = 0.8, list = F)
train <- News[train_ind, ]
test <- News[-train_ind, ]
# ==== fit multiple regression models ==== #
# prepare training scheme
fitControl <- trainControl(method = "cv",
number = 10)
# ---- no regularisation ---- #
set.seed(2019)
lmfit <- train(shares ~., data = train,
method = 'lm',
trControl = fitControl,
preProces = c('scale', 'center'))
# model coefficients
coef(lmfit$finalModel)
summary(lmfit)
# predict on test set
lmfit.pred <- predict(lmfit, test)
sqrt(mean((lmfit.pred - test$shares)^2))
# lmfit.train <- predict(lmfit, train)
# sqrt(mean((lmfit.train - train$crim)^2))
# plot
plot(lmfit$finalModel)
# ----- ridge regression ---- #
set.seed(2019)
ridge <- train(shares ~., data = train,
method='glmnet',
tuneGrid = expand.grid(alpha = 0,
lambda = seq(5188.9,5189,length = 50)),
trControl = fitControl,
preProcess = c('scale', 'center'))
# prediction
ridge.pred <- predict(ridge, test)
sqrt(mean((ridge.pred - test$shares)^2))
# ridge.train <- predict(ridge, train)
# sqrt(mean((ridge.train - train$shares)^2))
# ridge regression result
ridge
plot(ridge, xlab = "lambda in ridge regression" )
plot(ridge$finalModel, xvar = "lambda", label = T, xlab = "log lambda in ridge regression")
abline(v=log(5188.951), col = "darkblue")
plot(ridge$finalModel, xvar = "dev", label = T)
plot(varImp(ridge, scale = T))
ridge$bestTune
# ---- lasso ---- #
set.seed(2019)
lasso <- train(shares ~., train,
method = 'glmnet',
tuneGrid = expand.grid(alpha = 1,
lambda = seq(30, 31, length = 50)),
preProcess = c('scale','center'),
trControl = fitControl)
# prediction and model performance
lasso.pred <- predict(lasso, test)
sqrt(mean((lasso.pred - test$shares)^2))
# lasso.train <- predict(lasso, train)
# sqrt(mean((lasso.train - train$crim)^2))
# best model
lasso$bestTune
# lasso result
lasso
plot(lasso, xlab = "lambda in lasso regression" )
plot(lasso$finalModel, xvar = "lambda", label = T, xlab = "log lambda in lasso")
abline(v=log(30.79592), col = "darkblue")
plot(lasso$finalModel, xvar = "dev", label = T)
plot(varImp(lasso, scale = T))
# ---- elastic net ---- #
set.seed(2019)
elnet <- train(
shares ~ .,
data = train,
method = "glmnet",
preProcess = c('scale','center'),
trControl = fitControl,
tuneGrid = expand.grid(lambda = seq(34, 35, length = 10),
alpha = seq(0, 1, length = 50))
)
# best model
elnet$bestTune
coef(elnet$finalModel, s= elnet$bestTune$lambda)
# model predictions
elnet.pred <- predict(elnet, test)
sqrt(mean((elnet.pred - test$shares)^2))
# result
plot(elnet)
# plot(elnet)
plot(elnet$finalModel, xvar = "lambda", label = T, xlab = "log lambda in elastic net")
abline(v=log(34), col = "darkblue")
plot(elnet$finalModel, xvar = "dev", label = T)
plot(varImp(elnet))
# comparison
model_list <- list(LinearModel = lmfit, Ridge = ridge, Lasso = lasso, ElasticNet = elnet)
res <- resamples(model_list)
summary(res)
xyplot(res, metric = "RMSE")
# best model
get_best_result = function(caret_fit) {
best = which(rownames(caret_fit$results) == rownames(caret_fit$bestTune))
best_result = caret_fit$results[best, ]
rownames(best_result) = NULL
best_result
}
get_best_result(elnet)
get_best_result(lasso)
get_best_result(ridge)
get_best_result(lmfit)
| /src_reg/News.R | no_license | gdzhben/data-analysis-project | R | false | false | 4,540 | r | # packages needed
install.packages("dplyr")
install.packages("caret")
install.packages("ggplot2")
install.packages("GGally")
install.packages("elasticnet")
library(dplyr) # for data cleaning
library(caret) # for fitting models
library(ggplot2) # for visualizing density
library(GGally) # for correlation matrix
library(elasticnet) # for elastic net
setwd("~/Google Drive/2_SL/assignment2")
News = read.csv("OnlineNewsPopularity.csv", header = TRUE)
str(News)
# ==== data pre-processing ==== #
# missing value
mean(is.na(News))
# # target variable density
ggplot(News, aes(shares))+stat_density(color="darkblue", fill="lightblue")+xlab("Shares (crim)")
# near zero variance feature
colDelete <- nearZeroVar(News, names = F)
# eliminate unneccessary variables
News <- News[,-c(1,2,6,23)]
# # determine correlation between predictors
# ggcorr(News, label = T, label_size = 2)+xlab('correlation coefficient between variables')
# split data in training and test set
set.seed(100)
train_ind <- createDataPartition(News$shares, p = 0.8, list = F)
train <- News[train_ind, ]
test <- News[-train_ind, ]
# ==== fit multiple regression models ==== #
# prepare training scheme
fitControl <- trainControl(method = "cv",
number = 10)
# ---- no regularisation ---- #
set.seed(2019)
lmfit <- train(shares ~., data = train,
method = 'lm',
trControl = fitControl,
preProces = c('scale', 'center'))
# model coefficients
coef(lmfit$finalModel)
summary(lmfit)
# predict on test set
lmfit.pred <- predict(lmfit, test)
sqrt(mean((lmfit.pred - test$shares)^2))
# lmfit.train <- predict(lmfit, train)
# sqrt(mean((lmfit.train - train$crim)^2))
# plot
plot(lmfit$finalModel)
# ----- ridge regression ---- #
set.seed(2019)
ridge <- train(shares ~., data = train,
method='glmnet',
tuneGrid = expand.grid(alpha = 0,
lambda = seq(5188.9,5189,length = 50)),
trControl = fitControl,
preProcess = c('scale', 'center'))
# prediction
ridge.pred <- predict(ridge, test)
sqrt(mean((ridge.pred - test$shares)^2))
# ridge.train <- predict(ridge, train)
# sqrt(mean((ridge.train - train$shares)^2))
# ridge regression result
ridge
plot(ridge, xlab = "lambda in ridge regression" )
plot(ridge$finalModel, xvar = "lambda", label = T, xlab = "log lambda in ridge regression")
abline(v=log(5188.951), col = "darkblue")
plot(ridge$finalModel, xvar = "dev", label = T)
plot(varImp(ridge, scale = T))
ridge$bestTune
# ---- lasso ---- #
set.seed(2019)
lasso <- train(shares ~., train,
method = 'glmnet',
tuneGrid = expand.grid(alpha = 1,
lambda = seq(30, 31, length = 50)),
preProcess = c('scale','center'),
trControl = fitControl)
# prediction and model performance
lasso.pred <- predict(lasso, test)
sqrt(mean((lasso.pred - test$shares)^2))
# lasso.train <- predict(lasso, train)
# sqrt(mean((lasso.train - train$crim)^2))
# best model
lasso$bestTune
# lasso result
lasso
plot(lasso, xlab = "lambda in lasso regression" )
plot(lasso$finalModel, xvar = "lambda", label = T, xlab = "log lambda in lasso")
abline(v=log(30.79592), col = "darkblue")
plot(lasso$finalModel, xvar = "dev", label = T)
plot(varImp(lasso, scale = T))
# ---- elastic net ---- #
set.seed(2019)
elnet <- train(
shares ~ .,
data = train,
method = "glmnet",
preProcess = c('scale','center'),
trControl = fitControl,
tuneGrid = expand.grid(lambda = seq(34, 35, length = 10),
alpha = seq(0, 1, length = 50))
)
# best model
elnet$bestTune
coef(elnet$finalModel, s= elnet$bestTune$lambda)
# model predictions
elnet.pred <- predict(elnet, test)
sqrt(mean((elnet.pred - test$shares)^2))
# result
plot(elnet)
# plot(elnet)
plot(elnet$finalModel, xvar = "lambda", label = T, xlab = "log lambda in elastic net")
abline(v=log(34), col = "darkblue")
plot(elnet$finalModel, xvar = "dev", label = T)
plot(varImp(elnet))
# comparison
model_list <- list(LinearModel = lmfit, Ridge = ridge, Lasso = lasso, ElasticNet = elnet)
res <- resamples(model_list)
summary(res)
xyplot(res, metric = "RMSE")
# best model
get_best_result = function(caret_fit) {
best = which(rownames(caret_fit$results) == rownames(caret_fit$bestTune))
best_result = caret_fit$results[best, ]
rownames(best_result) = NULL
best_result
}
get_best_result(elnet)
get_best_result(lasso)
get_best_result(ridge)
get_best_result(lmfit)
|
################################
#### *** Water Supply Element
################################
# dirs/URLs
#----------------------------------------------
site <- "http://deq1.bse.vt.edu/d.dh" #Specify the site of interest, either d.bet OR d.dh
#----------------------------------------------
# Load Libraries
basepath='/var/www/R';
source(paste(basepath,'config.R',sep='/'))
#save_directory <- "/var/www/html/data/proj3/out"
save_directory <- "C:/Users/nrf46657/Desktop/GitHub/vahydro/R/permitting/Salem WTP"
library(hydrotools)
# authenticate
ds <- RomDataSource$new(site, rest_uname)
ds$get_token(rest_pw)
# Load Local libs
library(stringr)
library(ggplot2)
library(sqldf)
library(ggnewscale)
library(dplyr)
# Read Args
# argst <- commandArgs(trailingOnly=T)
# pid <- as.integer(argst[1])
# elid <- as.integer(argst[2])
# runid <- as.integer(argst[3])
#omsite <- "http://deq1.bse.vt.edu"
pid <- 4827216 #Fac:Rseg model pid
elid <- 306768 #Fac:Rseg model om_element_connection
#runid <- 6011
runid <- 600
#facdat <- om_get_rundata(elid, runid, site = omsite)
finfo <- fn_get_runfile_info(elid, runid,37, site= omsite)
remote_url <- as.character(finfo$remote_url)
dat <- fn_get_runfile(elid, runid, site= omsite, cached = FALSE)
syear = min(dat$year)
eyear = max(dat$year)
if (syear != eyear) {
sdate <- as.Date(paste0(syear,"-10-01"))
edate <- as.Date(paste0(eyear,"-09-30"))
} else {
# special case to handle 1 year model runs
# just omit January in order to provide a short warmup period.
sdate <- as.Date(paste0(syear,"-02-01"))
edate <- as.Date(paste0(eyear,"-12-31"))
}
cols <- names(dat)
# # does this have an impoundment sub-comp and is imp_off = 0?
# # check for local_impoundment, and if so, rename to impoundment for processing
# if("local_impoundment" %in% cols) {
# dat$impoundment_use_remain_mg <- dat$local_impoundment_use_remain_mg
# dat$impoundment_max_usable <- dat$local_impoundment_max_usable
# dat$impoundment_Qin <- dat$local_impoundment_Qin
# dat$impoundment_Qout <- dat$local_impoundment_Qout
# dat$impoundment_demand <- dat$local_impoundment_demand
# dat$impoundment <- dat$local_impoundment
# cols <- names(dat)
# }
# imp_enabled = FALSE
# if("impoundment" %in% cols) {
# imp_enabled = TRUE
# }
# pump_store = FALSE
# # rename ps_refill_pump_mgd to refill_pump_mgd
# if (!("refill_pump_mgd" %in% cols)) {
# if ("ps_refill_pump_mgd" %in% cols) {
# dat$refill_pump_mgd <- dat$ps_refill_pump_mgd
# }
# }
# if ("refill_pump_mgd" %in% cols) {
# max_pump <- max(dat$refill_pump_mgd)
# if (max_pump > 0) {
# # this is a pump store
# pump_store = TRUE
# }
# }
#
# yrdat will be used for generating the heatmap with calendar years
yrdat <- dat
yr_sdate <- as.Date(paste0((as.numeric(syear) + 1),"-01-01"))
yr_edate <- as.Date(paste0(eyear,"-12-31"))
yrdat <- window(yrdat, start = yr_sdate, end = yr_edate);
#
# # water year data frame
# dat <- window(dat, start = sdate, end = edate);
# mode(dat) <- 'numeric'
# scen.propname<-paste0('runid_', runid)
#
# # GETTING SCENARIO PROPERTY FROM VA HYDRO
# sceninfo <- list(
# varkey = 'om_scenario',
# propname = scen.propname,
# featureid = pid,
# entity_type = "dh_properties",
# bundle = "dh_properties"
# )
# # newschool
# #scenprop <- getProperty(sceninfo, site, scenprop)
# scenprop <- RomProperty$new( ds, sceninfo, TRUE)
#
# # POST PROPERTY IF IT IS NOT YET CREATED
# if (is.na(scenprop$pid) | is.null(scenprop$pid) ) {
# # create
# scenprop$save(TRUE)
# }
# vahydro_post_metric_to_scenprop(scenprop$pid, 'external_file', remote_url, 'logfile', NULL, ds)
#
# #omsite = site <- "http://deq2.bse.vt.edu"
# #dat <- fn_get_runfile(elid, runid, site= omsite, cached = FALSE);
# #amn <- 10.0 * mean(as.numeric(dat$Qreach))
#
# #dat <- window(dat, start = as.Date("1984-10-01"), end = as.Date("2014-09-30"));
# #boxplot(as.numeric(dat$Qreach) ~ dat$year, ylim=c(0,amn))
#
# datdf <- as.data.frame(dat)
# modat <- sqldf("select month, avg(base_demand_mgd) as base_demand_mgd from datdf group by month")
# #barplot(wd_mgd ~ month, data=modat)
# fname <- paste(
# save_directory,paste0('fig.monthly_demand.', elid, '.', runid, '.png'),
# sep = '/'
# )
# furl <- paste(
# save_url,paste0('fig.monthly_demand.',elid, '.', runid, '.png'),
# sep = '/'
# )
# png(fname)
# barplot(modat$base_demand_mgd ~ modat$month, xlab="Month", ylab="Base Demand (mgd)")
# dev.off()
# print(paste("Saved file: ", fname, "with URL", furl))
# vahydro_post_metric_to_scenprop(scenprop$pid, 'dh_image_file', furl, 'fig.monthly_demand', 0.0, ds)
#
# # Calculate
# base_demand_mgd <- mean(as.numeric(dat$base_demand_mgd) )
# if (is.na(base_demand_mgd)) {
# base_demand_mgd = 0.0
# }
# wd_mgd <- mean(as.numeric(dat$wd_mgd) )
# if (is.na(wd_mgd)) {
# wd_mgd = 0.0
# }
# gw_demand_mgd <- mean(as.numeric(dat$gw_demand_mgd) )
# if (is.na(gw_demand_mgd)) {
# gw_demand_mgd = 0.0
# }
# unmet_demand_mgd <- mean(as.numeric(dat$unmet_demand_mgd) )
# if (is.na(unmet_demand_mgd)) {
# unmet_demand_mgd = 0.0
# }
# ps_mgd <- mean(as.numeric(dat$discharge_mgd) )
# if (is.na(ps_mgd)) {
# ps_mgd = 0.0
# }
# Analyze unmet demands
uds <- zoo(as.numeric(dat$unmet_demand_mgd), order.by = index(dat));
udflows <- group2(uds, 'calendar');
unmet90 <- udflows["90 Day Max"];
ndx = which.max(as.numeric(unmet90[,"90 Day Max"]));
unmet90 = round(udflows[ndx,]$"90 Day Max",6);
unmet30 <- udflows["30 Day Max"];
ndx1 = which.max(as.numeric(unmet30[,"30 Day Max"]));
unmet30 = round(udflows[ndx,]$"30 Day Max",6);
unmet7 <- udflows["7 Day Max"];
ndx = which.max(as.numeric(unmet7[,"7 Day Max"]));
unmet7 = round(udflows[ndx,]$"7 Day Max",6);
unmet1 <- udflows["1 Day Max"];
ndx = which.max(as.numeric(unmet1[,"1 Day Max"]));
unmet1 = round(udflows[ndx,]$"1 Day Max",6);
# # post em up'
# vahydro_post_metric_to_scenprop(scenprop$pid, 'om_class_Constant', NULL, 'base_demand_mgd', base_demand_mgd, ds)
# vahydro_post_metric_to_scenprop(scenprop$pid, 'om_class_Constant', NULL, 'base_demand_mgy', base_demand_mgd * 365.0, ds)
# vahydro_post_metric_to_scenprop(scenprop$pid, 'om_class_Constant', NULL, 'wd_mgd', wd_mgd, ds)
# vahydro_post_metric_to_scenprop(scenprop$pid, 'om_class_Constant', NULL, 'wd_mgy', wd_mgd * 365.0, ds)
# vahydro_post_metric_to_scenprop(scenprop$pid, 'om_class_Constant', NULL, 'gw_demand_mgd', gw_demand_mgd, ds)
# vahydro_post_metric_to_scenprop(scenprop$pid, 'om_class_Constant', NULL, 'unmet_demand_mgd', unmet_demand_mgd, ds)
# vahydro_post_metric_to_scenprop(scenprop$pid, 'om_class_Constant', NULL, 'unmet_demand_mgy', unmet_demand_mgd * 365.0, ds)
# vahydro_post_metric_to_scenprop(scenprop$pid, 'om_class_Constant', NULL, 'ps_mgd', ps_mgd, ds)
# vahydro_post_metric_to_scenprop(scenprop$pid, 'om_class_Constant', NULL, 'unmet90_mgd', unmet90, ds)
# vahydro_post_metric_to_scenprop(scenprop$pid, 'om_class_Constant', NULL, 'unmet30_mgd', unmet30, ds)
# vahydro_post_metric_to_scenprop(scenprop$pid, 'om_class_Constant', NULL, 'unmet7_mgd', unmet7, ds)
# vahydro_post_metric_to_scenprop(scenprop$pid, 'om_class_Constant', NULL, 'unmet1_mgd', unmet1, ds)
# # Intake Flows
# iflows <- zoo(as.numeric(dat$Qintake), order.by = index(dat));
# uiflows <- group2(iflows, 'calendar')
# Qin30 <- uiflows["30 Day Min"];
# l30_Qintake <- min(Qin30["30 Day Min"]);
# vahydro_post_metric_to_scenprop(scenprop$pid, 'om_class_Constant', NULL, 'l30_Qintake', l30_Qintake, ds)
#
# # Define year at which highest 30 Day Max occurs (Lal's code, line 405)
# #defines critical period based on Qintake if there is no unmet demand
# if (sum(datdf$unmet_demand_mgd)==0) {
# # base it on flow since we have no unmet demand.
# ndx1 = which.min(as.numeric(Qin30[,"30 Day Min"]))
# u30_year2 = uiflows[ndx1,]$"year";
# } else {
# u30_year2 = udflows[ndx1,]$"year";
# }
#
# # Metrics that need Zoo (IHA)
# flows <- zoo(as.numeric(as.character( dat$Qintake )), order.by = index(dat));
# loflows <- group2(flows);
# l90 <- loflows["90 Day Min"];
# ndx = which.min(as.numeric(l90[,"90 Day Min"]));
# l90_Qout = round(loflows[ndx,]$"90 Day Min",6);
# l90_year = loflows[ndx,]$"year";
# ##### Define fname before graphing
# # hydroImpoundment lines 144-151
#
# fname <- paste(
# save_directory,
# paste0(
# 'fig.30daymax_unmet.',
# elid, '.', runid, '.png'
# ),
# sep = '/'
# )
#
# furl <- paste(
# save_url,
# paste0(
# 'fig.30daymax_unmet.',
# elid, '.', runid, '.png'
# ),
# sep = '/'
# )
#
# #png(fname)
#
# ##### Define data for graph, just within that defined year, and graph it
# # Lal's code, lines 410-446 (412 commented out)
# if (sum(datdf$unmet_demand_mgd)==0) {
# # base it on flow since we have no unmet demand.
# dsql <- paste(
# "select min(month) as dsmo, max(month) as demo
# from datdf
# where Qintake <= ", l30_Qintake,
# " and year = ",
# u30_year2
# )
# } else {
# dsql <- paste(
# "select min(month) as dsmo, max(month) as demo
# from datdf
# where unmet_demand_mgd > 0
# and year = ",
# u30_year2
# )
# }
# drange <- sqldf(dsql)
# # Drought range dates
# dsy <- u30_year2
# dey <- u30_year2
# dsmo <- as.integer(drange$dsmo) - 1
# demo <- as.integer(drange$demo) + 1
# if (dsmo < 1) {
# dsmo <- 12 + dsmo
# dsy <- dsy - 1
# }
# if (demo > 12) {
# demo <- demo - 12
# dey <- dey + 1
# }
# dsmo <- sprintf('%02i',dsmo)
# demo <- sprintf('%02i',demo)
# ddat2 <- window(
# dat,
# start = as.Date(paste0(dsy, "-", dsmo, "-01")),
# end = as.Date(paste0(dey,"-", demo, "-28") )
# );
#
# #dmx2 = max(ddat2$Qintake)
# if (pump_store || !imp_enabled) {
# flow_ts <- ddat2$Qintake
# flow_ts_name = "Source Stream"
# } else {
# flow_ts <- ddat2$impoundment_Qin
# flow_ts_name = "Inflow"
# }
#
# png(fname)
# par(mar = c(5,5,2,5))
# plot(
# flow_ts,
# xlab=paste0("Critical Period: ",u30_year2),
# ylim=c(0,max(flow_ts)),
# col="blue"
# )
# par(new = TRUE)
# plot(
# ddat2$base_demand_mgd,col='green',
# xlab="",
# ylab="",
# axes=FALSE,
# ylim=c(0,max(ddat2$base_demand_mgd))
# )
# lines(ddat2$unmet_demand_mgd,col='red')
# axis(side = 4)
# mtext(side = 4, line = 3, 'Base/Unmet Demand (mgd)')
# legend("topleft", c(flow_ts_name,"Base Demand","Unmet"),
# col = c("blue", "green","red"),
# lty = c(1,1,1,1),
# bg='white',cex=0.8) #ADD LEGEND
# dev.off()
# map2<-as.data.frame(ddat2$Qintake + (ddat2$discharge_mgd - ddat2$wd_mgd) * 1.547)
# colnames(map2)<-"flow"
# map2$date <- rownames(map2)
# map2$base_demand_mgd<-ddat2$base_demand_mgd * 1.547
# map2$unmetdemand<-ddat2$unmet_demand_mgd * 1.547
# df <- data.frame(as.Date(map2$date), map2$flow, map2$base_demand_mgd,map2$unmetdemand);
# colnames(df)<-c("date","flow","base_demand_mgd","unmetdemand")
# #options(scipen=5, width = 1400, height = 950)
# ggplot(df, aes(x=date)) +
# geom_line(aes(y=flow, color="Flow"), size=0.5) +
# geom_line(aes(y=base_demand_mgd, colour="Base demand"), size=0.5)+
# geom_line(aes(y=unmetdemand, colour="Unmet demand"), size=0.5)+
# theme_bw()+
# theme(legend.position="top",
# legend.title=element_blank(),
# legend.box = "horizontal",
# legend.background = element_rect(fill="white",
# size=0.5, linetype="solid",
# colour ="white"),
# legend.text=element_text(size=12),
# axis.text=element_text(size=12, color = "black"),
# axis.title=element_text(size=14, color="black"),
# axis.line = element_line(color = "black",
# size = 0.5, linetype = "solid"),
# axis.ticks = element_line(color="black"),
# panel.grid.major=element_line(color = "light grey"),
# panel.grid.minor=element_blank())+
# scale_colour_manual(values=c("purple","black","blue"))+
# guides(colour = guide_legend(override.aes = list(size=5)))+
# labs(y = "Flow (cfs)", x= paste("Critical Period:",u30_year2, sep=' '))
# #dev.off()
# print(fname)
# ggsave(fname,width=7,height=4.75)
#
##### Naming for saving and posting to VAHydro
# print(paste("Saved file: ", fname, "with URL", furl))
#
# vahydro_post_metric_to_scenprop(scenprop$pid, 'dh_image_file', furl, 'fig.30daymax_unmet', 0.0, ds)
##### HEATMAP
# includes code needed for both the heatmap with counts and heatmap with counts and averages
# Uses dat2 for heatmap calendar years
# make numeric versions of syear and eyear
num_syear <- as.numeric(syear) + 1
num_eyear <- as.numeric(eyear)
mode(yrdat) <- 'numeric'
yrdatdf <- as.data.frame(yrdat)
#ADD FINAL UNMET COLUMN
#######################################################
yrdatdf <- sqldf("select *,
CASE WHEN (unmet_demand_mgd - (2.6 - gw_demand_mgd) < 1) THEN 0
ELSE unmet_demand_mgd - (2.6 - gw_demand_mgd)
END AS final_unmet_demand_mgd
from yrdatdf")
#colnames(yrdatdf)
#######################################################
# FOR QA PURPOSES ONLY
yrdatdf_qa <- sqldf("select *
from yrdatdf
WHERE year = 2001 AND month = 10
")
#######################################################
# yrmodat <- sqldf("SELECT month months,
# year years,
# sum(unmet_demand_mgd) sum_unmet,
# count(*) count
# FROM yrdatdf
# WHERE unmet_demand_mgd > 0
# GROUP BY month, year") #Counts sum of unmet_days by month and year
#NEW VERSION -> USING FINAL UNMET DEMAND
yrmodat <- sqldf("SELECT month months,
year years,
sum(final_unmet_demand_mgd) sum_unmet,
count(*) count
FROM yrdatdf
WHERE final_unmet_demand_mgd > 0
GROUP BY month, year") #Counts sum of unmet_days by month and year
#converts unmet_mgd sums to averages for cells
yrmodat$avg_unmet <- yrmodat$sum_unmet / yrmodat$count
#Join counts with original data frame to get missing month and year combos then selects just count month and year
yrmodat <- sqldf("SELECT * FROM yrdatdf LEFT JOIN yrmodat ON yrmodat.years = yrdatdf.year AND yrmodat.months = yrdatdf.month group by month, year")
yrmodat <- sqldf('SELECT month, year, avg_unmet, count count_unmet_days FROM yrmodat GROUP BY month, year')
#Replace NA for count with 0s
yrmodat[is.na(yrmodat)] = 0
########################################################### Calculating Totals
# monthly totals via sqldf
mosum <- sqldf("SELECT month, sum(count_unmet_days) count_unmet_days FROM yrmodat GROUP BY month")
mosum$year <- rep(num_eyear+1,12)
#JK addition 3/25/22: Cell of total days unmet in simulation period
total_unmet_days <- sum(yrmodat$count_unmet_days)
total_unmet_days_cell <- data.frame("month" = 13,
"count_unmet_days" = as.numeric(total_unmet_days),
"year" = num_eyear+1)
#yearly sum
yesum <- sqldf("SELECT year, sum(count_unmet_days) count_unmet_days FROM yrmodat GROUP BY year")
yesum$month <- rep(13,length(yesum$year))
# yesum <- rbind(yesum,data.frame(year = "Total",
# count_unmet_days = 999,
# month = 13))
# create monthly averages
moavg<- sqldf('SELECT * FROM mosum')
moavg$year <- moavg$year + 1
moavg$avg <- round(moavg$count_unmet_days/((num_eyear-num_syear)+1),1)
# create yearly averages
yeavg<- sqldf('SELECT * FROM yesum')
yeavg$month <- yeavg$month + 1
yeavg$avg <- round(yeavg$count_unmet_days/12,1)
# create x and y axis breaks
y_breaks <- seq(syear,num_eyear+2,1)
x_breaks <- seq(1,14,1)
# create x and y labels
y_labs <- c(seq(syear,eyear,1),'Totals', 'Avg')
x_labs <- c(month.abb,'Totals','Avg')
############################################################### Plot and Save count heatmap
# If loop makes sure plots are green if there is no unmet demand
if (sum(mosum$count_unmet_days) == 0) {
count_grid <- ggplot() +
geom_tile(data=yrmodat, color='black',aes(x = month, y = year, fill = count_unmet_days)) +
geom_text(aes(label=yrmodat$count_unmet_days, x=yrmodat$month, y= yrmodat$year), size = 3.5, colour = "black") +
scale_fill_gradient2(low = "#00cc00", mid= "#00cc00", high = "#00cc00", guide = "colourbar",
name= 'Unmet Days') +
theme(panel.background = element_rect(fill = "transparent"))+
theme() + labs(title = 'Unmet Demand Heatmap', y=NULL, x=NULL) +
scale_x_continuous(expand=c(0,0), breaks= x_breaks, labels=x_labs, position='top') +
scale_y_reverse(expand=c(0,0), breaks=y_breaks, labels= y_labs) +
theme(axis.ticks= element_blank()) +
theme(plot.title = element_text(size = 12, face = "bold", hjust = 0.5)) +
theme(legend.title.align = 0.5)
unmet <- count_grid + new_scale_fill() +
geom_tile(data = yesum, color='black', aes(x = month, y = year, fill = count_unmet_days)) +
geom_tile(data = mosum, color='black', aes(x = month, y = year, fill = count_unmet_days)) +
geom_text(data = yesum, size = 3.5, color='black', aes(x = month, y = year, label = count_unmet_days)) +
geom_text(data = mosum, size = 3.5, color='black', aes(x = month, y = year, label = count_unmet_days)) +
scale_fill_gradient2(low = "#63D1F4", high = "#8A2BE2", mid="#63D1F4",
midpoint = mean(mosum$count_unmet_days), name= 'Total Unmet Days')
total <- unmet + new_scale_fill() +
geom_tile(data = total_unmet_days_cell, color='black',fill="grey",aes(x = month, y = year, fill = count_unmet_days)) +
geom_text(data = total_unmet_days_cell, size = 3.5, color='black', aes(x = month, y = year, label = count_unmet_days))
#unmet_avg <- unmet + new_scale_fill()+
unmet_avg <- total + new_scale_fill()+
geom_tile(data = yeavg, color='black', aes(x = month, y = year, fill = avg)) +
geom_tile(data = moavg, color='black', aes(x = month, y = year, fill = avg)) +
geom_text(data = yeavg, size = 3.5, color='black', aes(x = month, y = year, label = avg)) +
geom_text(data = moavg, size = 3.5, color='black', aes(x = month, y = year, label = avg))+
scale_fill_gradient2(low = "#FFF8DC", mid = "#FFF8DC", high ="#FFF8DC",
name= 'Average Unmet Days', midpoint = mean(yeavg$avg))
} else{
count_grid <- ggplot() +
geom_tile(data=yrmodat, color='black',aes(x = month, y = year, fill = count_unmet_days)) +
geom_text(aes(label=yrmodat$count_unmet_days, x=yrmodat$month, y= yrmodat$year), size = 3.5, colour = "black") +
scale_fill_gradient2(low = "#00cc00", high = "red",mid ='yellow',
midpoint = 15, guide = "colourbar",
name= 'Unmet Days') +
theme(panel.background = element_rect(fill = "transparent"))+
theme() + labs(title = 'Unmet Demand Heatmap', y=NULL, x=NULL) +
scale_x_continuous(expand=c(0,0), breaks= x_breaks, labels=x_labs, position='top') +
scale_y_reverse(expand=c(0,0), breaks=y_breaks, labels= y_labs) +
theme(axis.ticks= element_blank()) +
theme(plot.title = element_text(size = 12, face = "bold", hjust = 0.5)) +
theme(legend.title.align = 0.5)
unmet <- count_grid + new_scale_fill() +
geom_tile(data = yesum, color='black', aes(x = month, y = year, fill = count_unmet_days)) +
geom_tile(data = mosum, color='black', aes(x = month, y = year, fill = count_unmet_days)) +
geom_text(data = yesum, size = 3.5, color='black', aes(x = month, y = year, label = count_unmet_days)) +
geom_text(data = mosum, size = 3.5, color='black', aes(x = month, y = year, label = count_unmet_days)) +
scale_fill_gradient2(low = "#63D1F4", high = "#8A2BE2", mid='#CAB8FF',
midpoint = mean(mosum$count_unmet_days), name= 'Total Unmet Days')
total <- unmet + new_scale_fill() +
geom_tile(data = total_unmet_days_cell, color='black',fill="grey",aes(x = month, y = year, fill = count_unmet_days)) +
geom_text(data = total_unmet_days_cell, size = 3.5, color='black', aes(x = month, y = year, label = count_unmet_days))
#unmet_avg <- unmet + new_scale_fill()+
unmet_avg <- total + new_scale_fill()+
geom_tile(data = yeavg, color='black', aes(x = month, y = year, fill = avg)) +
geom_tile(data = moavg, color='black', aes(x = month, y = year, fill = avg)) +
geom_text(data = yeavg, size = 3.5, color='black', aes(x = month, y = year, label = avg)) +
geom_text(data = moavg, size = 3.5, color='black', aes(x = month, y = year, label = avg))+
scale_fill_gradient2(low = "#FFF8DC", mid = "#FFDEAD", high ="#DEB887",
name= 'Average Unmet Days', midpoint = mean(yeavg$avg))
}
fname2 <- paste(save_directory,paste0('fig.unmet_heatmap_gw.',elid, '.', runid, '.png'),sep = '/')
#furl2 <- paste(save_url, paste0('fig.unmet_heatmap.',elid, '.', runid, '.png'),sep = '/')
ggsave(fname2,plot = unmet_avg, width= 7, height=7)
print(paste('File saved to save_directory:', fname2))
#vahydro_post_metric_to_scenprop(scenprop$pid, 'dh_image_file', furl2, 'fig.unmet_heatmap', 0.0, ds)
###################################### Plot and save Second unmet Demand Grid
# contains count/ Avg unmet demand mgd
if (sum(mosum$count_unmet_days) == 0) {
count_grid <- ggplot() +
geom_tile(data=yrmodat, color='black',aes(x = month, y = year, fill = count_unmet_days)) +
geom_text(aes(label=paste(yrmodat$count_unmet_days,' / ',round(yrmodat$avg_unmet,1), sep=''),
x=yrmodat$month, y= yrmodat$year), size = 3.5, colour = "black") +
scale_fill_gradient2(low = "#00cc00", mid= "#00cc00", high = "#00cc00", guide = "colourbar",
name= 'Unmet Days') +
theme(panel.background = element_rect(fill = "transparent"))+
theme() + labs(title = 'Unmet Demand Heatmap', y=NULL, x=NULL) +
scale_x_continuous(expand=c(0,0), breaks= x_breaks, labels=x_labs, position='top') +
scale_y_reverse(expand=c(0,0), breaks=y_breaks, labels= y_labs) +
theme(axis.ticks= element_blank()) +
theme(plot.title = element_text(size = 12, face = "bold", hjust = 0.5)) +
theme(legend.title.align = 0.5)
unmet <- count_grid + new_scale_fill() +
geom_tile(data = yesum, color='black', aes(x = month, y = year, fill = count_unmet_days)) +
geom_tile(data = mosum, color='black', aes(x = month, y = year, fill = count_unmet_days)) +
geom_text(data = yesum, size = 3.5, color='black', aes(x = month, y = year, label = count_unmet_days)) +
geom_text(data = mosum, size = 3.5, color='black', aes(x = month, y = year, label = count_unmet_days)) +
scale_fill_gradient2(low = "#63D1F4", high = "#8A2BE2", mid="#63D1F4",
midpoint = mean(mosum$count_unmet_days), name= 'Total Unmet Days')
total <- unmet + new_scale_fill() +
geom_tile(data = total_unmet_days_cell, color='black',fill="grey",aes(x = month, y = year, fill = count_unmet_days)) +
geom_text(data = total_unmet_days_cell, size = 3.5, color='black', aes(x = month, y = year, label = count_unmet_days))
#unmet_avg <- unmet + new_scale_fill()+
unmet_avg <- total + new_scale_fill()+
geom_tile(data = yeavg, color='black', aes(x = month, y = year, fill = avg)) +
geom_tile(data = moavg, color='black', aes(x = month, y = year, fill = avg)) +
geom_text(data = yeavg, size = 3.5, color='black', aes(x = month, y = year, label = avg)) +
geom_text(data = moavg, size = 3.5, color='black', aes(x = month, y = year, label = avg))+
scale_fill_gradient2(low = "#FFF8DC", mid = "#FFF8DC", high ="#FFF8DC",
name= 'Average Unmet Days', midpoint = mean(yeavg$avg))
} else{
count_grid <- ggplot() +
geom_tile(data=yrmodat, color='black',aes(x = month, y = year, fill = count_unmet_days)) +
geom_text(aes(label=paste(yrmodat$count_unmet_days,' / ',signif(yrmodat$avg_unmet,digits=1), sep=''),
x=yrmodat$month, y= yrmodat$year), size = 3, colour = "black") +
scale_fill_gradient2(low = "#00cc00", high = "red",mid ='yellow',
midpoint = 15, guide = "colourbar",
name= 'Unmet Days') +
theme(panel.background = element_rect(fill = "transparent"))+
theme() + labs(title = 'Unmet Demand Heatmap', y=NULL, x=NULL) +
scale_x_continuous(expand=c(0,0), breaks= x_breaks, labels=x_labs, position='top') +
scale_y_reverse(expand=c(0,0), breaks=y_breaks, labels= y_labs) +
theme(axis.ticks= element_blank()) +
theme(plot.title = element_text(size = 12, face = "bold", hjust = 0.5)) +
theme(legend.title.align = 0.5)
unmet <- count_grid + new_scale_fill() +
geom_tile(data = yesum, color='black', aes(x = month, y = year, fill = count_unmet_days)) +
geom_tile(data = mosum, color='black', aes(x = month, y = year, fill = count_unmet_days)) +
geom_text(data = yesum, size = 3.5, color='black', aes(x = month, y = year, label = count_unmet_days)) +
geom_text(data = mosum, size = 3.5, color='black', aes(x = month, y = year, label = count_unmet_days)) +
scale_fill_gradient2(low = "#63D1F4", high = "#8A2BE2", mid='#CAB8FF',
midpoint = mean(mosum$count_unmet_days), name= 'Total Unmet Days')
total <- unmet + new_scale_fill() +
geom_tile(data = total_unmet_days_cell, color='black',fill="grey",aes(x = month, y = year, fill = count_unmet_days)) +
geom_text(data = total_unmet_days_cell, size = 3.5, color='black', aes(x = month, y = year, label = count_unmet_days))
#unmet_avg <- unmet + new_scale_fill()+
unmet_avg <- total + new_scale_fill()+
geom_tile(data = yeavg, color='black', aes(x = month, y = year, fill = avg)) +
geom_tile(data = moavg, color='black', aes(x = month, y = year, fill = avg)) +
geom_text(data = yeavg, size = 3.5, color='black', aes(x = month, y = year, label = avg)) +
geom_text(data = moavg, size = 3.5, color='black', aes(x = month, y = year, label = avg))+
scale_fill_gradient2(low = "#FFF8DC", mid = "#FFDEAD", high ="#DEB887",
name= 'Average Unmet Days', midpoint = mean(yeavg$avg))
}
fname3 <- paste(save_directory,paste0('fig.unmet_heatmap_amt_gw.',elid,'.',runid ,'.png'),sep = '/')
# furl3 <- paste(save_url, paste0('fig.unmet_heatmap_amt.',elid, '.', runid, '.png'),sep = '/')
ggsave(fname3,plot = unmet_avg, width= 9.5, height=6)
print('File saved to save_directory')
# vahydro_post_metric_to_scenprop(scenprop$pid, 'dh_image_file', furl3, 'fig.unmet_heatmap_amt', 0.0, ds)
# if("impoundment" %in% cols) {
# # Plot and analyze impoundment sub-comps
# dat$storage_pct <- as.numeric(dat$impoundment_use_remain_mg) * 3.07 / as.numeric(dat$impoundment_max_usable)
# #set the storage percent
# storage_pct <- mean(as.numeric(dat$storage_pct) )
# if (is.na(storage_pct)) {
# usable_pct_p0 <- 0
# usable_pct_p10 <- 0
# usable_pct_p50 <- 0
# } else {
# usable_pcts = quantile(as.numeric(dat$storage_pct), c(0,0.1,0.5) )
# usable_pct_p0 <- usable_pcts["0%"]
# usable_pct_p10 <- usable_pcts["10%"]
# usable_pct_p50 <- usable_pcts["50%"]
# }
# # post em up
# vahydro_post_metric_to_scenprop(scenprop$pid, 'om_class_Constant', NULL, 'usable_pct_p0', usable_pct_p0, ds)
# vahydro_post_metric_to_scenprop(scenprop$pid, 'om_class_Constant', NULL, 'usable_pct_p10', usable_pct_p10, ds)
# vahydro_post_metric_to_scenprop(scenprop$pid, 'om_class_Constant', NULL, 'usable_pct_p50', usable_pct_p50, ds)
#
#
# # this has an impoundment. Plot it up.
# # Now zoom in on critical drought period
# pdstart = as.Date(paste0(l90_year,"-06-01") )
# pdend = as.Date(paste0(l90_year, "-11-15") )
# datpd <- window(
# dat,
# start = pdstart,
# end = pdend
# );
# fname <- paste(
# save_directory,
# paste0(
# 'l90_imp_storage.',
# elid, '.', runid, '.png'
# ),
# sep = '/'
# )
# furl <- paste(
# save_url,
# paste0(
# 'l90_imp_storage.',
# elid, '.', runid, '.png'
# ),
# sep = '/'
# )
# png(fname)
# ymn <- 0
# ymx <- 100
#
# par(mar = c(5,5,2,5))
# plot(
# datpd$storage_pct * 100.0,
# ylim=c(ymn,ymx),
# main="Minimum Modeled Reservoir Storage Period",
# ylab="Reservoir Storage (%)",
# xlab=paste("Model Time Period",pdstart,"to",pdend)
# )
# par(new = TRUE)
# if (pump_store) {
# flow_ts <- datpd$Qreach
# } else {
# flow_ts <- datpd$impoundment_Qin
# }
# plot(flow_ts,col='blue', axes=FALSE, xlab="", ylab="")
# lines(datpd$Qout,col='green')
# lines(datpd$wd_mgd * 1.547,col='red')
# axis(side = 4)
# mtext(side = 4, line = 3, 'Flow/Demand (cfs)')
# dev.off()
# print(paste("Saved file: ", fname, "with URL", furl))
# vahydro_post_metric_to_scenprop(scenprop$pid, 'dh_image_file', furl, 'fig.l90_imp_storage', 0.0, ds)
#
# # l90 2 year
# # this has an impoundment. Plot it up.
# # Now zoom in on critical drought period
# pdstart = as.Date(paste0( (as.integer(l90_year) - 1),"-01-01") )
# pdend = as.Date(paste0(l90_year, "-12-31") )
# datpd <- window(
# dat,
# start = pdstart,
# end = pdend
# );
# fname <- paste(
# save_directory,
# paste0(
# 'l90_imp_storage.2yr.',
# elid, '.', runid, '.png'
# ),
# sep = '/'
# )
# furl <- paste(
# save_url,
# paste0(
# 'l90_imp_storage.2yr.',
# elid, '.', runid, '.png'
# ),
# sep = '/'
# )
# png(fname)
# ymn <- 1
# ymx <- 100
# par(mar = c(5,5,2,5))
# par(mar = c(1,5,2,5),mfrow = c(2,1))
# plot(
# datpd$storage_pct * 100.0,
# ylim=c(0,100),
# ylab="Reservoir Storage (%)",
# xlab="",
# main=paste("Storage and Flows",sdate,"to",edate)
# )
# ymx <- ceiling(
# pmax(
# max(datpd$Qreach)
# )
# )
# # if this is a pump store, refill_pump_mgd > 0
# # then, plot Qreach first, overlaying impoundment_Qin
# if (pump_store) {
# flow_ts <- datpd$Qreach
# } else {
# flow_ts <- datpd$impoundment_Qin
# }
# plot(
# flow_ts,
# col='blue',
# xlab="",
# ylab='Flow/Demand (cfs)',
# #ylim=c(0,ymx),
# log="y",
# yaxt="n" # supress labeling till we format
# )
# #legend()
# y_ticks <- axTicks(2)
# y_ticks_fmt <- format(y_ticks, scientific = FALSE)
# axis(2, at = y_ticks, labels = y_ticks_fmt)
# ymx <- ceiling(
# pmax(
# max(datpd$refill_pump_mgd),
# max(datpd$impoundment_demand * 1.547)
# )
# )
# #par(new = TRUE)
# #plot(datpd$refill_pump_mgd * 1.547,col='green',xlab="",ylab="")
# lines(datpd$refill_pump_mgd * 1.547,col='red')
# lines(datpd$impoundment_demand * 1.547,col='green')
# #axis(side = 4)
# #mtext(side = 4, line = 3, 'Flow/Demand (cfs)')
#
# dev.off()
# print(paste("Saved file: ", fname, "with URL", furl))
# vahydro_post_metric_to_scenprop(scenprop$pid, 'dh_image_file', furl, 'fig.l90_imp_storage.2yr', 0.0, ds)
#
# # All Periods
# # this has an impoundment. Plot it up.
# # Now zoom in on critical drought period
# datpd <- dat
# fname <- paste(
# save_directory,
# paste0(
# 'fig.imp_storage.all.',
# elid, '.', runid, '.png'
# ),
# sep = '/'
# )
# furl <- paste(
# save_url,
# paste0(
# 'fig.imp_storage.all.',
# elid, '.', runid, '.png'
# ),
# sep = '/'
# )
# png(fname)
# ymn <- 0
# ymx <- 100
# par(mar = c(5,5,2,5))
# par(mar = c(1,5,2,5),mfrow = c(2,1))
# plot(
# datpd$storage_pct * 100.0,
# ylim=c(0,100),
# ylab="Reservoir Storage (%)",
# xlab="",
# main=paste("Storage and Flows",sdate,"to",edate)
# )
# ymx <- ceiling(
# pmax(
# max(datpd$Qreach)
# )
# )
# # if this is a pump store, refill_pump_mgd > 0
# # then, plot Qreach first, overlaying impoundment_Qin
# if (pump_store) {
# flow_ts <- datpd$Qreach
# } else {
# flow_ts <- datpd$impoundment_Qin
# }
# plot(
# flow_ts,
# col='blue',
# xlab="",
# ylab='Flow/Demand (cfs)',
# #ylim=c(0,ymx),
# log="y",
# yaxt="n" # supress labeling till we format
# )
# y_ticks <- axTicks(2)
# y_ticks_fmt <- format(y_ticks, scientific = FALSE)
# axis(2, at = y_ticks, labels = y_ticks_fmt)
# ymx <- ceiling(
# pmax(
# max(datpd$refill_pump_mgd),
# max(datpd$impoundment_demand * 1.547)
# )
# )
# #par(new = TRUE)
# #plot(datpd$refill_pump_mgd * 1.547,col='green',xlab="",ylab="")
# if (pump_store) {
# lines(datpd$refill_pump_mgd * 1.547,col='red')
# }
# lines(datpd$impoundment_demand * 1.547,col='green')
# #axis(side = 4)
# #mtext(side = 4, line = 3, 'Flow/Demand (cfs)')
#
# dev.off()
# print(paste("Saved file: ", fname, "with URL", furl))
# vahydro_post_metric_to_scenprop(scenprop$pid, 'dh_image_file', furl, 'fig.imp_storage.all', 0.0, ds)
#
# # Low Elevation Period
# # Dat for Critical Period
# elevs <- zoo(dat$storage_pct, order.by = index(dat));
# loelevs <- group2(elevs);
# l90 <- loelevs["90 Day Min"];
# ndx = which.min(as.numeric(l90[,"90 Day Min"]));
# l90_elev = round(loelevs[ndx,]$"90 Day Min",6);
# l90_elevyear = loelevs[ndx,]$"year";
# l90_elev_start = as.Date(paste0(l90_elevyear - 2,"-01-01"))
# l90_elev_end = as.Date(paste0(l90_elevyear,"-12-31"))
# elevdatpd <- window(
# dat,
# start = l90_elev_start,
# end = l90_elev_end
# );
# datpd <- elevdatpd
# fname <- paste(
# save_directory,
# paste0(
# 'elev90_imp_storage.all.',
# elid, '.', runid, '.png'
# ),
# sep = '/'
# )
# furl <- paste(
# save_url,
# paste0(
# 'elev90_imp_storage.all.',
# elid, '.', runid, '.png'
# ),
# sep = '/'
# )
# png(fname)
# ymn <- 1
# ymx <- 100
# par(mar = c(5,5,2,5))
# plot(
# datpd$storage_pct * 100.0,
# ylim=c(ymn,ymx),
# main="Summer/Fall of L-90 Period",
# ylab="Reservoir Storage (%)",
# xlab=paste("Model Time Period",l90_elev_start,"to",l90_elev_end)
# )
# par(new = TRUE)
# if (pump_store) {
# flow_ts <- datpd$Qreach
# } else {
# flow_ts <- datpd$impoundment_Qin
# }
# plot(flow_ts,col='blue', axes=FALSE, xlab="", ylab="")
# lines(datpd$Qout,col='green')
# lines(datpd$wd_mgd * 1.547,col='red')
# axis(side = 4)
# mtext(side = 4, line = 3, 'Flow/Demand (cfs)')
# dev.off()
# print(paste("Saved file: ", fname, "with URL", furl))
# vahydro_post_metric_to_scenprop(scenprop$pid, 'dh_image_file', furl, 'elev90_imp_storage.all', 0.0, ds)
#
# } | /R/permitting/Salem WTP/unmet_grid_code.R | no_license | HARPgroup/vahydro | R | false | false | 34,651 | r | ################################
#### *** Water Supply Element
################################
# dirs/URLs
#----------------------------------------------
site <- "http://deq1.bse.vt.edu/d.dh" #Specify the site of interest, either d.bet OR d.dh
#----------------------------------------------
# Load Libraries
basepath='/var/www/R';
source(paste(basepath,'config.R',sep='/'))
#save_directory <- "/var/www/html/data/proj3/out"
save_directory <- "C:/Users/nrf46657/Desktop/GitHub/vahydro/R/permitting/Salem WTP"
library(hydrotools)
# authenticate
ds <- RomDataSource$new(site, rest_uname)
ds$get_token(rest_pw)
# Load Local libs
library(stringr)
library(ggplot2)
library(sqldf)
library(ggnewscale)
library(dplyr)
# Read Args
# argst <- commandArgs(trailingOnly=T)
# pid <- as.integer(argst[1])
# elid <- as.integer(argst[2])
# runid <- as.integer(argst[3])
#omsite <- "http://deq1.bse.vt.edu"
pid <- 4827216 #Fac:Rseg model pid
elid <- 306768 #Fac:Rseg model om_element_connection
#runid <- 6011
runid <- 600
#facdat <- om_get_rundata(elid, runid, site = omsite)
finfo <- fn_get_runfile_info(elid, runid,37, site= omsite)
remote_url <- as.character(finfo$remote_url)
dat <- fn_get_runfile(elid, runid, site= omsite, cached = FALSE)
syear = min(dat$year)
eyear = max(dat$year)
if (syear != eyear) {
sdate <- as.Date(paste0(syear,"-10-01"))
edate <- as.Date(paste0(eyear,"-09-30"))
} else {
# special case to handle 1 year model runs
# just omit January in order to provide a short warmup period.
sdate <- as.Date(paste0(syear,"-02-01"))
edate <- as.Date(paste0(eyear,"-12-31"))
}
cols <- names(dat)
# # does this have an impoundment sub-comp and is imp_off = 0?
# # check for local_impoundment, and if so, rename to impoundment for processing
# if("local_impoundment" %in% cols) {
# dat$impoundment_use_remain_mg <- dat$local_impoundment_use_remain_mg
# dat$impoundment_max_usable <- dat$local_impoundment_max_usable
# dat$impoundment_Qin <- dat$local_impoundment_Qin
# dat$impoundment_Qout <- dat$local_impoundment_Qout
# dat$impoundment_demand <- dat$local_impoundment_demand
# dat$impoundment <- dat$local_impoundment
# cols <- names(dat)
# }
# imp_enabled = FALSE
# if("impoundment" %in% cols) {
# imp_enabled = TRUE
# }
# pump_store = FALSE
# # rename ps_refill_pump_mgd to refill_pump_mgd
# if (!("refill_pump_mgd" %in% cols)) {
# if ("ps_refill_pump_mgd" %in% cols) {
# dat$refill_pump_mgd <- dat$ps_refill_pump_mgd
# }
# }
# if ("refill_pump_mgd" %in% cols) {
# max_pump <- max(dat$refill_pump_mgd)
# if (max_pump > 0) {
# # this is a pump store
# pump_store = TRUE
# }
# }
#
# yrdat will be used for generating the heatmap with calendar years
yrdat <- dat
yr_sdate <- as.Date(paste0((as.numeric(syear) + 1),"-01-01"))
yr_edate <- as.Date(paste0(eyear,"-12-31"))
yrdat <- window(yrdat, start = yr_sdate, end = yr_edate);
#
# # water year data frame
# dat <- window(dat, start = sdate, end = edate);
# mode(dat) <- 'numeric'
# scen.propname<-paste0('runid_', runid)
#
# # GETTING SCENARIO PROPERTY FROM VA HYDRO
# sceninfo <- list(
# varkey = 'om_scenario',
# propname = scen.propname,
# featureid = pid,
# entity_type = "dh_properties",
# bundle = "dh_properties"
# )
# # newschool
# #scenprop <- getProperty(sceninfo, site, scenprop)
# scenprop <- RomProperty$new( ds, sceninfo, TRUE)
#
# # POST PROPERTY IF IT IS NOT YET CREATED
# if (is.na(scenprop$pid) | is.null(scenprop$pid) ) {
# # create
# scenprop$save(TRUE)
# }
# vahydro_post_metric_to_scenprop(scenprop$pid, 'external_file', remote_url, 'logfile', NULL, ds)
#
# #omsite = site <- "http://deq2.bse.vt.edu"
# #dat <- fn_get_runfile(elid, runid, site= omsite, cached = FALSE);
# #amn <- 10.0 * mean(as.numeric(dat$Qreach))
#
# #dat <- window(dat, start = as.Date("1984-10-01"), end = as.Date("2014-09-30"));
# #boxplot(as.numeric(dat$Qreach) ~ dat$year, ylim=c(0,amn))
#
# datdf <- as.data.frame(dat)
# modat <- sqldf("select month, avg(base_demand_mgd) as base_demand_mgd from datdf group by month")
# #barplot(wd_mgd ~ month, data=modat)
# fname <- paste(
# save_directory,paste0('fig.monthly_demand.', elid, '.', runid, '.png'),
# sep = '/'
# )
# furl <- paste(
# save_url,paste0('fig.monthly_demand.',elid, '.', runid, '.png'),
# sep = '/'
# )
# png(fname)
# barplot(modat$base_demand_mgd ~ modat$month, xlab="Month", ylab="Base Demand (mgd)")
# dev.off()
# print(paste("Saved file: ", fname, "with URL", furl))
# vahydro_post_metric_to_scenprop(scenprop$pid, 'dh_image_file', furl, 'fig.monthly_demand', 0.0, ds)
#
# # Calculate
# base_demand_mgd <- mean(as.numeric(dat$base_demand_mgd) )
# if (is.na(base_demand_mgd)) {
# base_demand_mgd = 0.0
# }
# wd_mgd <- mean(as.numeric(dat$wd_mgd) )
# if (is.na(wd_mgd)) {
# wd_mgd = 0.0
# }
# gw_demand_mgd <- mean(as.numeric(dat$gw_demand_mgd) )
# if (is.na(gw_demand_mgd)) {
# gw_demand_mgd = 0.0
# }
# unmet_demand_mgd <- mean(as.numeric(dat$unmet_demand_mgd) )
# if (is.na(unmet_demand_mgd)) {
# unmet_demand_mgd = 0.0
# }
# ps_mgd <- mean(as.numeric(dat$discharge_mgd) )
# if (is.na(ps_mgd)) {
# ps_mgd = 0.0
# }
# Analyze unmet demands
uds <- zoo(as.numeric(dat$unmet_demand_mgd), order.by = index(dat));
udflows <- group2(uds, 'calendar');
unmet90 <- udflows["90 Day Max"];
ndx = which.max(as.numeric(unmet90[,"90 Day Max"]));
unmet90 = round(udflows[ndx,]$"90 Day Max",6);
unmet30 <- udflows["30 Day Max"];
ndx1 = which.max(as.numeric(unmet30[,"30 Day Max"]));
unmet30 = round(udflows[ndx,]$"30 Day Max",6);
unmet7 <- udflows["7 Day Max"];
ndx = which.max(as.numeric(unmet7[,"7 Day Max"]));
unmet7 = round(udflows[ndx,]$"7 Day Max",6);
unmet1 <- udflows["1 Day Max"];
ndx = which.max(as.numeric(unmet1[,"1 Day Max"]));
unmet1 = round(udflows[ndx,]$"1 Day Max",6);
# # post em up'
# vahydro_post_metric_to_scenprop(scenprop$pid, 'om_class_Constant', NULL, 'base_demand_mgd', base_demand_mgd, ds)
# vahydro_post_metric_to_scenprop(scenprop$pid, 'om_class_Constant', NULL, 'base_demand_mgy', base_demand_mgd * 365.0, ds)
# vahydro_post_metric_to_scenprop(scenprop$pid, 'om_class_Constant', NULL, 'wd_mgd', wd_mgd, ds)
# vahydro_post_metric_to_scenprop(scenprop$pid, 'om_class_Constant', NULL, 'wd_mgy', wd_mgd * 365.0, ds)
# vahydro_post_metric_to_scenprop(scenprop$pid, 'om_class_Constant', NULL, 'gw_demand_mgd', gw_demand_mgd, ds)
# vahydro_post_metric_to_scenprop(scenprop$pid, 'om_class_Constant', NULL, 'unmet_demand_mgd', unmet_demand_mgd, ds)
# vahydro_post_metric_to_scenprop(scenprop$pid, 'om_class_Constant', NULL, 'unmet_demand_mgy', unmet_demand_mgd * 365.0, ds)
# vahydro_post_metric_to_scenprop(scenprop$pid, 'om_class_Constant', NULL, 'ps_mgd', ps_mgd, ds)
# vahydro_post_metric_to_scenprop(scenprop$pid, 'om_class_Constant', NULL, 'unmet90_mgd', unmet90, ds)
# vahydro_post_metric_to_scenprop(scenprop$pid, 'om_class_Constant', NULL, 'unmet30_mgd', unmet30, ds)
# vahydro_post_metric_to_scenprop(scenprop$pid, 'om_class_Constant', NULL, 'unmet7_mgd', unmet7, ds)
# vahydro_post_metric_to_scenprop(scenprop$pid, 'om_class_Constant', NULL, 'unmet1_mgd', unmet1, ds)
# # Intake Flows
# iflows <- zoo(as.numeric(dat$Qintake), order.by = index(dat));
# uiflows <- group2(iflows, 'calendar')
# Qin30 <- uiflows["30 Day Min"];
# l30_Qintake <- min(Qin30["30 Day Min"]);
# vahydro_post_metric_to_scenprop(scenprop$pid, 'om_class_Constant', NULL, 'l30_Qintake', l30_Qintake, ds)
#
# # Define year at which highest 30 Day Max occurs (Lal's code, line 405)
# #defines critical period based on Qintake if there is no unmet demand
# if (sum(datdf$unmet_demand_mgd)==0) {
# # base it on flow since we have no unmet demand.
# ndx1 = which.min(as.numeric(Qin30[,"30 Day Min"]))
# u30_year2 = uiflows[ndx1,]$"year";
# } else {
# u30_year2 = udflows[ndx1,]$"year";
# }
#
# # Metrics that need Zoo (IHA)
# flows <- zoo(as.numeric(as.character( dat$Qintake )), order.by = index(dat));
# loflows <- group2(flows);
# l90 <- loflows["90 Day Min"];
# ndx = which.min(as.numeric(l90[,"90 Day Min"]));
# l90_Qout = round(loflows[ndx,]$"90 Day Min",6);
# l90_year = loflows[ndx,]$"year";
# ##### Define fname before graphing
# # hydroImpoundment lines 144-151
#
# fname <- paste(
# save_directory,
# paste0(
# 'fig.30daymax_unmet.',
# elid, '.', runid, '.png'
# ),
# sep = '/'
# )
#
# furl <- paste(
# save_url,
# paste0(
# 'fig.30daymax_unmet.',
# elid, '.', runid, '.png'
# ),
# sep = '/'
# )
#
# #png(fname)
#
# ##### Define data for graph, just within that defined year, and graph it
# # Lal's code, lines 410-446 (412 commented out)
# if (sum(datdf$unmet_demand_mgd)==0) {
# # base it on flow since we have no unmet demand.
# dsql <- paste(
# "select min(month) as dsmo, max(month) as demo
# from datdf
# where Qintake <= ", l30_Qintake,
# " and year = ",
# u30_year2
# )
# } else {
# dsql <- paste(
# "select min(month) as dsmo, max(month) as demo
# from datdf
# where unmet_demand_mgd > 0
# and year = ",
# u30_year2
# )
# }
# drange <- sqldf(dsql)
# # Drought range dates
# dsy <- u30_year2
# dey <- u30_year2
# dsmo <- as.integer(drange$dsmo) - 1
# demo <- as.integer(drange$demo) + 1
# if (dsmo < 1) {
# dsmo <- 12 + dsmo
# dsy <- dsy - 1
# }
# if (demo > 12) {
# demo <- demo - 12
# dey <- dey + 1
# }
# dsmo <- sprintf('%02i',dsmo)
# demo <- sprintf('%02i',demo)
# ddat2 <- window(
# dat,
# start = as.Date(paste0(dsy, "-", dsmo, "-01")),
# end = as.Date(paste0(dey,"-", demo, "-28") )
# );
#
# #dmx2 = max(ddat2$Qintake)
# if (pump_store || !imp_enabled) {
# flow_ts <- ddat2$Qintake
# flow_ts_name = "Source Stream"
# } else {
# flow_ts <- ddat2$impoundment_Qin
# flow_ts_name = "Inflow"
# }
#
# png(fname)
# par(mar = c(5,5,2,5))
# plot(
# flow_ts,
# xlab=paste0("Critical Period: ",u30_year2),
# ylim=c(0,max(flow_ts)),
# col="blue"
# )
# par(new = TRUE)
# plot(
# ddat2$base_demand_mgd,col='green',
# xlab="",
# ylab="",
# axes=FALSE,
# ylim=c(0,max(ddat2$base_demand_mgd))
# )
# lines(ddat2$unmet_demand_mgd,col='red')
# axis(side = 4)
# mtext(side = 4, line = 3, 'Base/Unmet Demand (mgd)')
# legend("topleft", c(flow_ts_name,"Base Demand","Unmet"),
# col = c("blue", "green","red"),
# lty = c(1,1,1,1),
# bg='white',cex=0.8) #ADD LEGEND
# dev.off()
# map2<-as.data.frame(ddat2$Qintake + (ddat2$discharge_mgd - ddat2$wd_mgd) * 1.547)
# colnames(map2)<-"flow"
# map2$date <- rownames(map2)
# map2$base_demand_mgd<-ddat2$base_demand_mgd * 1.547
# map2$unmetdemand<-ddat2$unmet_demand_mgd * 1.547
# df <- data.frame(as.Date(map2$date), map2$flow, map2$base_demand_mgd,map2$unmetdemand);
# colnames(df)<-c("date","flow","base_demand_mgd","unmetdemand")
# #options(scipen=5, width = 1400, height = 950)
# ggplot(df, aes(x=date)) +
# geom_line(aes(y=flow, color="Flow"), size=0.5) +
# geom_line(aes(y=base_demand_mgd, colour="Base demand"), size=0.5)+
# geom_line(aes(y=unmetdemand, colour="Unmet demand"), size=0.5)+
# theme_bw()+
# theme(legend.position="top",
# legend.title=element_blank(),
# legend.box = "horizontal",
# legend.background = element_rect(fill="white",
# size=0.5, linetype="solid",
# colour ="white"),
# legend.text=element_text(size=12),
# axis.text=element_text(size=12, color = "black"),
# axis.title=element_text(size=14, color="black"),
# axis.line = element_line(color = "black",
# size = 0.5, linetype = "solid"),
# axis.ticks = element_line(color="black"),
# panel.grid.major=element_line(color = "light grey"),
# panel.grid.minor=element_blank())+
# scale_colour_manual(values=c("purple","black","blue"))+
# guides(colour = guide_legend(override.aes = list(size=5)))+
# labs(y = "Flow (cfs)", x= paste("Critical Period:",u30_year2, sep=' '))
# #dev.off()
# print(fname)
# ggsave(fname,width=7,height=4.75)
#
##### Naming for saving and posting to VAHydro
# print(paste("Saved file: ", fname, "with URL", furl))
#
# vahydro_post_metric_to_scenprop(scenprop$pid, 'dh_image_file', furl, 'fig.30daymax_unmet', 0.0, ds)
##### HEATMAP
# includes code needed for both the heatmap with counts and heatmap with counts and averages
# Uses dat2 for heatmap calendar years
# make numeric versions of syear and eyear
num_syear <- as.numeric(syear) + 1
num_eyear <- as.numeric(eyear)
mode(yrdat) <- 'numeric'
yrdatdf <- as.data.frame(yrdat)
#ADD FINAL UNMET COLUMN
#######################################################
yrdatdf <- sqldf("select *,
CASE WHEN (unmet_demand_mgd - (2.6 - gw_demand_mgd) < 1) THEN 0
ELSE unmet_demand_mgd - (2.6 - gw_demand_mgd)
END AS final_unmet_demand_mgd
from yrdatdf")
#colnames(yrdatdf)
#######################################################
# FOR QA PURPOSES ONLY
yrdatdf_qa <- sqldf("select *
from yrdatdf
WHERE year = 2001 AND month = 10
")
#######################################################
# yrmodat <- sqldf("SELECT month months,
# year years,
# sum(unmet_demand_mgd) sum_unmet,
# count(*) count
# FROM yrdatdf
# WHERE unmet_demand_mgd > 0
# GROUP BY month, year") #Counts sum of unmet_days by month and year
#NEW VERSION -> USING FINAL UNMET DEMAND
yrmodat <- sqldf("SELECT month months,
year years,
sum(final_unmet_demand_mgd) sum_unmet,
count(*) count
FROM yrdatdf
WHERE final_unmet_demand_mgd > 0
GROUP BY month, year") #Counts sum of unmet_days by month and year
#converts unmet_mgd sums to averages for cells
yrmodat$avg_unmet <- yrmodat$sum_unmet / yrmodat$count
#Join counts with original data frame to get missing month and year combos then selects just count month and year
yrmodat <- sqldf("SELECT * FROM yrdatdf LEFT JOIN yrmodat ON yrmodat.years = yrdatdf.year AND yrmodat.months = yrdatdf.month group by month, year")
yrmodat <- sqldf('SELECT month, year, avg_unmet, count count_unmet_days FROM yrmodat GROUP BY month, year')
#Replace NA for count with 0s
yrmodat[is.na(yrmodat)] = 0
########################################################### Calculating Totals
# monthly totals via sqldf
mosum <- sqldf("SELECT month, sum(count_unmet_days) count_unmet_days FROM yrmodat GROUP BY month")
mosum$year <- rep(num_eyear+1,12)
#JK addition 3/25/22: Cell of total days unmet in simulation period
total_unmet_days <- sum(yrmodat$count_unmet_days)
total_unmet_days_cell <- data.frame("month" = 13,
"count_unmet_days" = as.numeric(total_unmet_days),
"year" = num_eyear+1)
#yearly sum
yesum <- sqldf("SELECT year, sum(count_unmet_days) count_unmet_days FROM yrmodat GROUP BY year")
yesum$month <- rep(13,length(yesum$year))
# yesum <- rbind(yesum,data.frame(year = "Total",
# count_unmet_days = 999,
# month = 13))
# create monthly averages
moavg<- sqldf('SELECT * FROM mosum')
moavg$year <- moavg$year + 1
moavg$avg <- round(moavg$count_unmet_days/((num_eyear-num_syear)+1),1)
# create yearly averages
yeavg<- sqldf('SELECT * FROM yesum')
yeavg$month <- yeavg$month + 1
yeavg$avg <- round(yeavg$count_unmet_days/12,1)
# create x and y axis breaks
y_breaks <- seq(syear,num_eyear+2,1)
x_breaks <- seq(1,14,1)
# create x and y labels
y_labs <- c(seq(syear,eyear,1),'Totals', 'Avg')
x_labs <- c(month.abb,'Totals','Avg')
############################################################### Plot and Save count heatmap
# If loop makes sure plots are green if there is no unmet demand
if (sum(mosum$count_unmet_days) == 0) {
count_grid <- ggplot() +
geom_tile(data=yrmodat, color='black',aes(x = month, y = year, fill = count_unmet_days)) +
geom_text(aes(label=yrmodat$count_unmet_days, x=yrmodat$month, y= yrmodat$year), size = 3.5, colour = "black") +
scale_fill_gradient2(low = "#00cc00", mid= "#00cc00", high = "#00cc00", guide = "colourbar",
name= 'Unmet Days') +
theme(panel.background = element_rect(fill = "transparent"))+
theme() + labs(title = 'Unmet Demand Heatmap', y=NULL, x=NULL) +
scale_x_continuous(expand=c(0,0), breaks= x_breaks, labels=x_labs, position='top') +
scale_y_reverse(expand=c(0,0), breaks=y_breaks, labels= y_labs) +
theme(axis.ticks= element_blank()) +
theme(plot.title = element_text(size = 12, face = "bold", hjust = 0.5)) +
theme(legend.title.align = 0.5)
unmet <- count_grid + new_scale_fill() +
geom_tile(data = yesum, color='black', aes(x = month, y = year, fill = count_unmet_days)) +
geom_tile(data = mosum, color='black', aes(x = month, y = year, fill = count_unmet_days)) +
geom_text(data = yesum, size = 3.5, color='black', aes(x = month, y = year, label = count_unmet_days)) +
geom_text(data = mosum, size = 3.5, color='black', aes(x = month, y = year, label = count_unmet_days)) +
scale_fill_gradient2(low = "#63D1F4", high = "#8A2BE2", mid="#63D1F4",
midpoint = mean(mosum$count_unmet_days), name= 'Total Unmet Days')
total <- unmet + new_scale_fill() +
geom_tile(data = total_unmet_days_cell, color='black',fill="grey",aes(x = month, y = year, fill = count_unmet_days)) +
geom_text(data = total_unmet_days_cell, size = 3.5, color='black', aes(x = month, y = year, label = count_unmet_days))
#unmet_avg <- unmet + new_scale_fill()+
unmet_avg <- total + new_scale_fill()+
geom_tile(data = yeavg, color='black', aes(x = month, y = year, fill = avg)) +
geom_tile(data = moavg, color='black', aes(x = month, y = year, fill = avg)) +
geom_text(data = yeavg, size = 3.5, color='black', aes(x = month, y = year, label = avg)) +
geom_text(data = moavg, size = 3.5, color='black', aes(x = month, y = year, label = avg))+
scale_fill_gradient2(low = "#FFF8DC", mid = "#FFF8DC", high ="#FFF8DC",
name= 'Average Unmet Days', midpoint = mean(yeavg$avg))
} else{
count_grid <- ggplot() +
geom_tile(data=yrmodat, color='black',aes(x = month, y = year, fill = count_unmet_days)) +
geom_text(aes(label=yrmodat$count_unmet_days, x=yrmodat$month, y= yrmodat$year), size = 3.5, colour = "black") +
scale_fill_gradient2(low = "#00cc00", high = "red",mid ='yellow',
midpoint = 15, guide = "colourbar",
name= 'Unmet Days') +
theme(panel.background = element_rect(fill = "transparent"))+
theme() + labs(title = 'Unmet Demand Heatmap', y=NULL, x=NULL) +
scale_x_continuous(expand=c(0,0), breaks= x_breaks, labels=x_labs, position='top') +
scale_y_reverse(expand=c(0,0), breaks=y_breaks, labels= y_labs) +
theme(axis.ticks= element_blank()) +
theme(plot.title = element_text(size = 12, face = "bold", hjust = 0.5)) +
theme(legend.title.align = 0.5)
unmet <- count_grid + new_scale_fill() +
geom_tile(data = yesum, color='black', aes(x = month, y = year, fill = count_unmet_days)) +
geom_tile(data = mosum, color='black', aes(x = month, y = year, fill = count_unmet_days)) +
geom_text(data = yesum, size = 3.5, color='black', aes(x = month, y = year, label = count_unmet_days)) +
geom_text(data = mosum, size = 3.5, color='black', aes(x = month, y = year, label = count_unmet_days)) +
scale_fill_gradient2(low = "#63D1F4", high = "#8A2BE2", mid='#CAB8FF',
midpoint = mean(mosum$count_unmet_days), name= 'Total Unmet Days')
total <- unmet + new_scale_fill() +
geom_tile(data = total_unmet_days_cell, color='black',fill="grey",aes(x = month, y = year, fill = count_unmet_days)) +
geom_text(data = total_unmet_days_cell, size = 3.5, color='black', aes(x = month, y = year, label = count_unmet_days))
#unmet_avg <- unmet + new_scale_fill()+
unmet_avg <- total + new_scale_fill()+
geom_tile(data = yeavg, color='black', aes(x = month, y = year, fill = avg)) +
geom_tile(data = moavg, color='black', aes(x = month, y = year, fill = avg)) +
geom_text(data = yeavg, size = 3.5, color='black', aes(x = month, y = year, label = avg)) +
geom_text(data = moavg, size = 3.5, color='black', aes(x = month, y = year, label = avg))+
scale_fill_gradient2(low = "#FFF8DC", mid = "#FFDEAD", high ="#DEB887",
name= 'Average Unmet Days', midpoint = mean(yeavg$avg))
}
fname2 <- paste(save_directory,paste0('fig.unmet_heatmap_gw.',elid, '.', runid, '.png'),sep = '/')
#furl2 <- paste(save_url, paste0('fig.unmet_heatmap.',elid, '.', runid, '.png'),sep = '/')
ggsave(fname2,plot = unmet_avg, width= 7, height=7)
print(paste('File saved to save_directory:', fname2))
#vahydro_post_metric_to_scenprop(scenprop$pid, 'dh_image_file', furl2, 'fig.unmet_heatmap', 0.0, ds)
###################################### Plot and save Second unmet Demand Grid
# contains count/ Avg unmet demand mgd
if (sum(mosum$count_unmet_days) == 0) {
count_grid <- ggplot() +
geom_tile(data=yrmodat, color='black',aes(x = month, y = year, fill = count_unmet_days)) +
geom_text(aes(label=paste(yrmodat$count_unmet_days,' / ',round(yrmodat$avg_unmet,1), sep=''),
x=yrmodat$month, y= yrmodat$year), size = 3.5, colour = "black") +
scale_fill_gradient2(low = "#00cc00", mid= "#00cc00", high = "#00cc00", guide = "colourbar",
name= 'Unmet Days') +
theme(panel.background = element_rect(fill = "transparent"))+
theme() + labs(title = 'Unmet Demand Heatmap', y=NULL, x=NULL) +
scale_x_continuous(expand=c(0,0), breaks= x_breaks, labels=x_labs, position='top') +
scale_y_reverse(expand=c(0,0), breaks=y_breaks, labels= y_labs) +
theme(axis.ticks= element_blank()) +
theme(plot.title = element_text(size = 12, face = "bold", hjust = 0.5)) +
theme(legend.title.align = 0.5)
unmet <- count_grid + new_scale_fill() +
geom_tile(data = yesum, color='black', aes(x = month, y = year, fill = count_unmet_days)) +
geom_tile(data = mosum, color='black', aes(x = month, y = year, fill = count_unmet_days)) +
geom_text(data = yesum, size = 3.5, color='black', aes(x = month, y = year, label = count_unmet_days)) +
geom_text(data = mosum, size = 3.5, color='black', aes(x = month, y = year, label = count_unmet_days)) +
scale_fill_gradient2(low = "#63D1F4", high = "#8A2BE2", mid="#63D1F4",
midpoint = mean(mosum$count_unmet_days), name= 'Total Unmet Days')
total <- unmet + new_scale_fill() +
geom_tile(data = total_unmet_days_cell, color='black',fill="grey",aes(x = month, y = year, fill = count_unmet_days)) +
geom_text(data = total_unmet_days_cell, size = 3.5, color='black', aes(x = month, y = year, label = count_unmet_days))
#unmet_avg <- unmet + new_scale_fill()+
unmet_avg <- total + new_scale_fill()+
geom_tile(data = yeavg, color='black', aes(x = month, y = year, fill = avg)) +
geom_tile(data = moavg, color='black', aes(x = month, y = year, fill = avg)) +
geom_text(data = yeavg, size = 3.5, color='black', aes(x = month, y = year, label = avg)) +
geom_text(data = moavg, size = 3.5, color='black', aes(x = month, y = year, label = avg))+
scale_fill_gradient2(low = "#FFF8DC", mid = "#FFF8DC", high ="#FFF8DC",
name= 'Average Unmet Days', midpoint = mean(yeavg$avg))
} else{
count_grid <- ggplot() +
geom_tile(data=yrmodat, color='black',aes(x = month, y = year, fill = count_unmet_days)) +
geom_text(aes(label=paste(yrmodat$count_unmet_days,' / ',signif(yrmodat$avg_unmet,digits=1), sep=''),
x=yrmodat$month, y= yrmodat$year), size = 3, colour = "black") +
scale_fill_gradient2(low = "#00cc00", high = "red",mid ='yellow',
midpoint = 15, guide = "colourbar",
name= 'Unmet Days') +
theme(panel.background = element_rect(fill = "transparent"))+
theme() + labs(title = 'Unmet Demand Heatmap', y=NULL, x=NULL) +
scale_x_continuous(expand=c(0,0), breaks= x_breaks, labels=x_labs, position='top') +
scale_y_reverse(expand=c(0,0), breaks=y_breaks, labels= y_labs) +
theme(axis.ticks= element_blank()) +
theme(plot.title = element_text(size = 12, face = "bold", hjust = 0.5)) +
theme(legend.title.align = 0.5)
unmet <- count_grid + new_scale_fill() +
geom_tile(data = yesum, color='black', aes(x = month, y = year, fill = count_unmet_days)) +
geom_tile(data = mosum, color='black', aes(x = month, y = year, fill = count_unmet_days)) +
geom_text(data = yesum, size = 3.5, color='black', aes(x = month, y = year, label = count_unmet_days)) +
geom_text(data = mosum, size = 3.5, color='black', aes(x = month, y = year, label = count_unmet_days)) +
scale_fill_gradient2(low = "#63D1F4", high = "#8A2BE2", mid='#CAB8FF',
midpoint = mean(mosum$count_unmet_days), name= 'Total Unmet Days')
total <- unmet + new_scale_fill() +
geom_tile(data = total_unmet_days_cell, color='black',fill="grey",aes(x = month, y = year, fill = count_unmet_days)) +
geom_text(data = total_unmet_days_cell, size = 3.5, color='black', aes(x = month, y = year, label = count_unmet_days))
#unmet_avg <- unmet + new_scale_fill()+
unmet_avg <- total + new_scale_fill()+
geom_tile(data = yeavg, color='black', aes(x = month, y = year, fill = avg)) +
geom_tile(data = moavg, color='black', aes(x = month, y = year, fill = avg)) +
geom_text(data = yeavg, size = 3.5, color='black', aes(x = month, y = year, label = avg)) +
geom_text(data = moavg, size = 3.5, color='black', aes(x = month, y = year, label = avg))+
scale_fill_gradient2(low = "#FFF8DC", mid = "#FFDEAD", high ="#DEB887",
name= 'Average Unmet Days', midpoint = mean(yeavg$avg))
}
fname3 <- paste(save_directory,paste0('fig.unmet_heatmap_amt_gw.',elid,'.',runid ,'.png'),sep = '/')
# furl3 <- paste(save_url, paste0('fig.unmet_heatmap_amt.',elid, '.', runid, '.png'),sep = '/')
ggsave(fname3,plot = unmet_avg, width= 9.5, height=6)
print('File saved to save_directory')
# vahydro_post_metric_to_scenprop(scenprop$pid, 'dh_image_file', furl3, 'fig.unmet_heatmap_amt', 0.0, ds)
# if("impoundment" %in% cols) {
# # Plot and analyze impoundment sub-comps
# dat$storage_pct <- as.numeric(dat$impoundment_use_remain_mg) * 3.07 / as.numeric(dat$impoundment_max_usable)
# #set the storage percent
# storage_pct <- mean(as.numeric(dat$storage_pct) )
# if (is.na(storage_pct)) {
# usable_pct_p0 <- 0
# usable_pct_p10 <- 0
# usable_pct_p50 <- 0
# } else {
# usable_pcts = quantile(as.numeric(dat$storage_pct), c(0,0.1,0.5) )
# usable_pct_p0 <- usable_pcts["0%"]
# usable_pct_p10 <- usable_pcts["10%"]
# usable_pct_p50 <- usable_pcts["50%"]
# }
# # post em up
# vahydro_post_metric_to_scenprop(scenprop$pid, 'om_class_Constant', NULL, 'usable_pct_p0', usable_pct_p0, ds)
# vahydro_post_metric_to_scenprop(scenprop$pid, 'om_class_Constant', NULL, 'usable_pct_p10', usable_pct_p10, ds)
# vahydro_post_metric_to_scenprop(scenprop$pid, 'om_class_Constant', NULL, 'usable_pct_p50', usable_pct_p50, ds)
#
#
# # this has an impoundment. Plot it up.
# # Now zoom in on critical drought period
# pdstart = as.Date(paste0(l90_year,"-06-01") )
# pdend = as.Date(paste0(l90_year, "-11-15") )
# datpd <- window(
# dat,
# start = pdstart,
# end = pdend
# );
# fname <- paste(
# save_directory,
# paste0(
# 'l90_imp_storage.',
# elid, '.', runid, '.png'
# ),
# sep = '/'
# )
# furl <- paste(
# save_url,
# paste0(
# 'l90_imp_storage.',
# elid, '.', runid, '.png'
# ),
# sep = '/'
# )
# png(fname)
# ymn <- 0
# ymx <- 100
#
# par(mar = c(5,5,2,5))
# plot(
# datpd$storage_pct * 100.0,
# ylim=c(ymn,ymx),
# main="Minimum Modeled Reservoir Storage Period",
# ylab="Reservoir Storage (%)",
# xlab=paste("Model Time Period",pdstart,"to",pdend)
# )
# par(new = TRUE)
# if (pump_store) {
# flow_ts <- datpd$Qreach
# } else {
# flow_ts <- datpd$impoundment_Qin
# }
# plot(flow_ts,col='blue', axes=FALSE, xlab="", ylab="")
# lines(datpd$Qout,col='green')
# lines(datpd$wd_mgd * 1.547,col='red')
# axis(side = 4)
# mtext(side = 4, line = 3, 'Flow/Demand (cfs)')
# dev.off()
# print(paste("Saved file: ", fname, "with URL", furl))
# vahydro_post_metric_to_scenprop(scenprop$pid, 'dh_image_file', furl, 'fig.l90_imp_storage', 0.0, ds)
#
# # l90 2 year
# # this has an impoundment. Plot it up.
# # Now zoom in on critical drought period
# pdstart = as.Date(paste0( (as.integer(l90_year) - 1),"-01-01") )
# pdend = as.Date(paste0(l90_year, "-12-31") )
# datpd <- window(
# dat,
# start = pdstart,
# end = pdend
# );
# fname <- paste(
# save_directory,
# paste0(
# 'l90_imp_storage.2yr.',
# elid, '.', runid, '.png'
# ),
# sep = '/'
# )
# furl <- paste(
# save_url,
# paste0(
# 'l90_imp_storage.2yr.',
# elid, '.', runid, '.png'
# ),
# sep = '/'
# )
# png(fname)
# ymn <- 1
# ymx <- 100
# par(mar = c(5,5,2,5))
# par(mar = c(1,5,2,5),mfrow = c(2,1))
# plot(
# datpd$storage_pct * 100.0,
# ylim=c(0,100),
# ylab="Reservoir Storage (%)",
# xlab="",
# main=paste("Storage and Flows",sdate,"to",edate)
# )
# ymx <- ceiling(
# pmax(
# max(datpd$Qreach)
# )
# )
# # if this is a pump store, refill_pump_mgd > 0
# # then, plot Qreach first, overlaying impoundment_Qin
# if (pump_store) {
# flow_ts <- datpd$Qreach
# } else {
# flow_ts <- datpd$impoundment_Qin
# }
# plot(
# flow_ts,
# col='blue',
# xlab="",
# ylab='Flow/Demand (cfs)',
# #ylim=c(0,ymx),
# log="y",
# yaxt="n" # supress labeling till we format
# )
# #legend()
# y_ticks <- axTicks(2)
# y_ticks_fmt <- format(y_ticks, scientific = FALSE)
# axis(2, at = y_ticks, labels = y_ticks_fmt)
# ymx <- ceiling(
# pmax(
# max(datpd$refill_pump_mgd),
# max(datpd$impoundment_demand * 1.547)
# )
# )
# #par(new = TRUE)
# #plot(datpd$refill_pump_mgd * 1.547,col='green',xlab="",ylab="")
# lines(datpd$refill_pump_mgd * 1.547,col='red')
# lines(datpd$impoundment_demand * 1.547,col='green')
# #axis(side = 4)
# #mtext(side = 4, line = 3, 'Flow/Demand (cfs)')
#
# dev.off()
# print(paste("Saved file: ", fname, "with URL", furl))
# vahydro_post_metric_to_scenprop(scenprop$pid, 'dh_image_file', furl, 'fig.l90_imp_storage.2yr', 0.0, ds)
#
# # All Periods
# # this has an impoundment. Plot it up.
# # Now zoom in on critical drought period
# datpd <- dat
# fname <- paste(
# save_directory,
# paste0(
# 'fig.imp_storage.all.',
# elid, '.', runid, '.png'
# ),
# sep = '/'
# )
# furl <- paste(
# save_url,
# paste0(
# 'fig.imp_storage.all.',
# elid, '.', runid, '.png'
# ),
# sep = '/'
# )
# png(fname)
# ymn <- 0
# ymx <- 100
# par(mar = c(5,5,2,5))
# par(mar = c(1,5,2,5),mfrow = c(2,1))
# plot(
# datpd$storage_pct * 100.0,
# ylim=c(0,100),
# ylab="Reservoir Storage (%)",
# xlab="",
# main=paste("Storage and Flows",sdate,"to",edate)
# )
# ymx <- ceiling(
# pmax(
# max(datpd$Qreach)
# )
# )
# # if this is a pump store, refill_pump_mgd > 0
# # then, plot Qreach first, overlaying impoundment_Qin
# if (pump_store) {
# flow_ts <- datpd$Qreach
# } else {
# flow_ts <- datpd$impoundment_Qin
# }
# plot(
# flow_ts,
# col='blue',
# xlab="",
# ylab='Flow/Demand (cfs)',
# #ylim=c(0,ymx),
# log="y",
# yaxt="n" # supress labeling till we format
# )
# y_ticks <- axTicks(2)
# y_ticks_fmt <- format(y_ticks, scientific = FALSE)
# axis(2, at = y_ticks, labels = y_ticks_fmt)
# ymx <- ceiling(
# pmax(
# max(datpd$refill_pump_mgd),
# max(datpd$impoundment_demand * 1.547)
# )
# )
# #par(new = TRUE)
# #plot(datpd$refill_pump_mgd * 1.547,col='green',xlab="",ylab="")
# if (pump_store) {
# lines(datpd$refill_pump_mgd * 1.547,col='red')
# }
# lines(datpd$impoundment_demand * 1.547,col='green')
# #axis(side = 4)
# #mtext(side = 4, line = 3, 'Flow/Demand (cfs)')
#
# dev.off()
# print(paste("Saved file: ", fname, "with URL", furl))
# vahydro_post_metric_to_scenprop(scenprop$pid, 'dh_image_file', furl, 'fig.imp_storage.all', 0.0, ds)
#
# # Low Elevation Period
# # Dat for Critical Period
# elevs <- zoo(dat$storage_pct, order.by = index(dat));
# loelevs <- group2(elevs);
# l90 <- loelevs["90 Day Min"];
# ndx = which.min(as.numeric(l90[,"90 Day Min"]));
# l90_elev = round(loelevs[ndx,]$"90 Day Min",6);
# l90_elevyear = loelevs[ndx,]$"year";
# l90_elev_start = as.Date(paste0(l90_elevyear - 2,"-01-01"))
# l90_elev_end = as.Date(paste0(l90_elevyear,"-12-31"))
# elevdatpd <- window(
# dat,
# start = l90_elev_start,
# end = l90_elev_end
# );
# datpd <- elevdatpd
# fname <- paste(
# save_directory,
# paste0(
# 'elev90_imp_storage.all.',
# elid, '.', runid, '.png'
# ),
# sep = '/'
# )
# furl <- paste(
# save_url,
# paste0(
# 'elev90_imp_storage.all.',
# elid, '.', runid, '.png'
# ),
# sep = '/'
# )
# png(fname)
# ymn <- 1
# ymx <- 100
# par(mar = c(5,5,2,5))
# plot(
# datpd$storage_pct * 100.0,
# ylim=c(ymn,ymx),
# main="Summer/Fall of L-90 Period",
# ylab="Reservoir Storage (%)",
# xlab=paste("Model Time Period",l90_elev_start,"to",l90_elev_end)
# )
# par(new = TRUE)
# if (pump_store) {
# flow_ts <- datpd$Qreach
# } else {
# flow_ts <- datpd$impoundment_Qin
# }
# plot(flow_ts,col='blue', axes=FALSE, xlab="", ylab="")
# lines(datpd$Qout,col='green')
# lines(datpd$wd_mgd * 1.547,col='red')
# axis(side = 4)
# mtext(side = 4, line = 3, 'Flow/Demand (cfs)')
# dev.off()
# print(paste("Saved file: ", fname, "with URL", furl))
# vahydro_post_metric_to_scenprop(scenprop$pid, 'dh_image_file', furl, 'elev90_imp_storage.all', 0.0, ds)
#
# } |
setwd("/Documentation/Video Trainings/InProgress/Data Science Track/Getting and Cleaning Data/Project/GettingAndCleaningData");
activity_labels <- read.table("activity_labels.txt", header=FALSE, sep=" ",stringsAsFactors=FALSE)
features_labels <- read.table("features.txt",header=FALSE, sep=" ",stringsAsFactors=FALSE)
names(features_labels) <- c("id", "features")
x_train <- read.table("./train/X_train.txt")
x_test <- read.table("./test/X_test.txt")
y_train <- read.table("./train/y_train.txt")
y_test <- read.table("./test/y_test.txt")
subject_train <- read.table("./train/subject_train.txt")
subject_test <- read.table("./test/subject_test.txt")
data_train <- cbind.data.frame(subject_train, y_train, x_train)
data_test <- cbind.data.frame(subject_test, y_test, x_test)
data <- rbind(data_test, data_train)
names(data) <- c("subject_id", "activity", features_labels$features)
names(data)
mean_std_index <- sort(union(grep("mean", names(data), fixed=T), grep("std", names(data), fixed=T)))
data_mean_std <- data[, mean_std_index]
write.table(data_mean_std,"tidy.txt",sep=" ")
| /run_analysis.R | no_license | bogerm/GettingAndCleaningData | R | false | false | 1,107 | r | setwd("/Documentation/Video Trainings/InProgress/Data Science Track/Getting and Cleaning Data/Project/GettingAndCleaningData");
activity_labels <- read.table("activity_labels.txt", header=FALSE, sep=" ",stringsAsFactors=FALSE)
features_labels <- read.table("features.txt",header=FALSE, sep=" ",stringsAsFactors=FALSE)
names(features_labels) <- c("id", "features")
x_train <- read.table("./train/X_train.txt")
x_test <- read.table("./test/X_test.txt")
y_train <- read.table("./train/y_train.txt")
y_test <- read.table("./test/y_test.txt")
subject_train <- read.table("./train/subject_train.txt")
subject_test <- read.table("./test/subject_test.txt")
data_train <- cbind.data.frame(subject_train, y_train, x_train)
data_test <- cbind.data.frame(subject_test, y_test, x_test)
data <- rbind(data_test, data_train)
names(data) <- c("subject_id", "activity", features_labels$features)
names(data)
mean_std_index <- sort(union(grep("mean", names(data), fixed=T), grep("std", names(data), fixed=T)))
data_mean_std <- data[, mean_std_index]
write.table(data_mean_std,"tidy.txt",sep=" ")
|
test_that("GDAL doesn't work (part 3)", {
hasGDAL <- findGDAL()
if (!isTRUE(hasGDAL))
skip("no GDAL installation found")
#if (requireNamespace("rgeos")) {
#testInitOut <- testInit(c("raster", "sf", "rgeos"), tmpFileExt = c(".grd", ".tif"),
testInitOut <- testInit(c("raster", "sf"), tmpFileExt = c(".grd", ".tif"),
opts = list(
"rasterTmpDir" = tempdir2(rndstr(1,6)),
"reproducible.inputPaths" = NULL,
"reproducible.overwrite" = TRUE,
'reproducible.useGDAL' = TRUE)
)
on.exit({
testOnExit(testInitOut)
}, add = TRUE)
options("reproducible.cachePath" = tmpdir)
#test GDAL
coords <- structure(c(-122.98, -116.1, -99.2, -106, -122.98, 59.9, 65.73, 63.58, 54.79, 59.9),
.Dim = c(5L, 2L))
coords2 <- structure(c(-115.98, -116.1, -99.2, -106, -122.98, 59.9, 65.73, 63.58, 54.79, 59.9),
.Dim = c(5L, 2L))
Sr1 <- Polygon(coords)
Srs1 <- Polygons(list(Sr1), "s1")
StudyArea <- SpatialPolygons(list(Srs1), 1L)
crs(StudyArea) <- crsToUse
nonLatLongProj <- paste("+proj=lcc +lat_1=49 +lat_2=77 +lat_0=0 +lon_0=-95",
"+x_0=0 +y_0=0 +ellps=GRS80 +units=m +no_defs")
nc <- sf::st_as_sf(StudyArea)#system.file("shape/nc.shp", package="sf"))
nc1 <- sf::st_transform(nc, nonLatLongProj)
#ncSmall <- sf::st_buffer(nc1, dist = -10000)
ncSmall <- sf::st_buffer(nc1, dist = -2000)
rasterBig <- raster(extent(nc), vals = as.integer(runif(n = 1056, min = 0, max = 10)),
res = c(0.5, 0.5),
crs = crs(nc))
rasterSmall <- raster(extent(ncSmall), vals = 1, res = c(10000, 10000), crs = crs(ncSmall))
#The extent of a negatively buffered SpatialPolygonsDataFrame doesn't change
rasterSmall <- rasterize(ncSmall, rasterSmall)
ccc <- testthat::capture_output(
out <- postProcess(rasterBig,
studyArea = ncSmall,
rasterToMatch = rasterSmall,
useGDAL = 'force')
)
expect_true(compareCRS(out, rasterSmall))
out2 <- cropReprojMaskWGDAL(rasterBig, useSAcrs = FALSE,
rasterToMatch = rasterSmall, dots = list(),
cores = 1)
expect_true(raster::compareRaster(out2, rasterSmall))
ccc <- testthat::capture_output(
out3 <- cropReprojMaskWGDAL(rasterBig, ncSmall, useSAcrs = FALSE,
dots = list(),
cores = 1)
)
expect_true(raster::compareCRS(out3, rasterBig))
ccc <- testthat::capture_output(
expect_error(out3a <- cropReprojMaskWGDAL(rasterBig, ncSmall, useSAcrs = TRUE,
dots = list(),
cores = 1), regexp = "Cannot set useSAcrs to TRUE")
)
ncSmallCRSNonLatLong <- sf::st_transform(ncSmall, crs = st_crs(rasterSmall))
ccc <- testthat::capture_output(
expect_error(out3b <- cropReprojMaskWGDAL(rasterBig, ncSmallCRSNonLatLong, useSAcrs = TRUE,
dots = list(),
cores = 1), regexp = "Cannot set useSAcrs to TRUE")
)
rasterBigOnDisk <- writeRaster(rasterBig, file = tmpfile[2], overwrite = TRUE)
out4 <- cropReprojMaskWGDAL(rasterBigOnDisk, rasterToMatch = rasterSmall,
dots = list(),
cores = 1)
expect_true(raster::compareRaster(out4, rasterSmall))
# }
})
| /tests/testthat/test-gdal.R | no_license | shaoyoucheng/reproducible | R | false | false | 3,643 | r | test_that("GDAL doesn't work (part 3)", {
hasGDAL <- findGDAL()
if (!isTRUE(hasGDAL))
skip("no GDAL installation found")
#if (requireNamespace("rgeos")) {
#testInitOut <- testInit(c("raster", "sf", "rgeos"), tmpFileExt = c(".grd", ".tif"),
testInitOut <- testInit(c("raster", "sf"), tmpFileExt = c(".grd", ".tif"),
opts = list(
"rasterTmpDir" = tempdir2(rndstr(1,6)),
"reproducible.inputPaths" = NULL,
"reproducible.overwrite" = TRUE,
'reproducible.useGDAL' = TRUE)
)
on.exit({
testOnExit(testInitOut)
}, add = TRUE)
options("reproducible.cachePath" = tmpdir)
#test GDAL
coords <- structure(c(-122.98, -116.1, -99.2, -106, -122.98, 59.9, 65.73, 63.58, 54.79, 59.9),
.Dim = c(5L, 2L))
coords2 <- structure(c(-115.98, -116.1, -99.2, -106, -122.98, 59.9, 65.73, 63.58, 54.79, 59.9),
.Dim = c(5L, 2L))
Sr1 <- Polygon(coords)
Srs1 <- Polygons(list(Sr1), "s1")
StudyArea <- SpatialPolygons(list(Srs1), 1L)
crs(StudyArea) <- crsToUse
nonLatLongProj <- paste("+proj=lcc +lat_1=49 +lat_2=77 +lat_0=0 +lon_0=-95",
"+x_0=0 +y_0=0 +ellps=GRS80 +units=m +no_defs")
nc <- sf::st_as_sf(StudyArea)#system.file("shape/nc.shp", package="sf"))
nc1 <- sf::st_transform(nc, nonLatLongProj)
#ncSmall <- sf::st_buffer(nc1, dist = -10000)
ncSmall <- sf::st_buffer(nc1, dist = -2000)
rasterBig <- raster(extent(nc), vals = as.integer(runif(n = 1056, min = 0, max = 10)),
res = c(0.5, 0.5),
crs = crs(nc))
rasterSmall <- raster(extent(ncSmall), vals = 1, res = c(10000, 10000), crs = crs(ncSmall))
#The extent of a negatively buffered SpatialPolygonsDataFrame doesn't change
rasterSmall <- rasterize(ncSmall, rasterSmall)
ccc <- testthat::capture_output(
out <- postProcess(rasterBig,
studyArea = ncSmall,
rasterToMatch = rasterSmall,
useGDAL = 'force')
)
expect_true(compareCRS(out, rasterSmall))
out2 <- cropReprojMaskWGDAL(rasterBig, useSAcrs = FALSE,
rasterToMatch = rasterSmall, dots = list(),
cores = 1)
expect_true(raster::compareRaster(out2, rasterSmall))
ccc <- testthat::capture_output(
out3 <- cropReprojMaskWGDAL(rasterBig, ncSmall, useSAcrs = FALSE,
dots = list(),
cores = 1)
)
expect_true(raster::compareCRS(out3, rasterBig))
ccc <- testthat::capture_output(
expect_error(out3a <- cropReprojMaskWGDAL(rasterBig, ncSmall, useSAcrs = TRUE,
dots = list(),
cores = 1), regexp = "Cannot set useSAcrs to TRUE")
)
ncSmallCRSNonLatLong <- sf::st_transform(ncSmall, crs = st_crs(rasterSmall))
ccc <- testthat::capture_output(
expect_error(out3b <- cropReprojMaskWGDAL(rasterBig, ncSmallCRSNonLatLong, useSAcrs = TRUE,
dots = list(),
cores = 1), regexp = "Cannot set useSAcrs to TRUE")
)
rasterBigOnDisk <- writeRaster(rasterBig, file = tmpfile[2], overwrite = TRUE)
out4 <- cropReprojMaskWGDAL(rasterBigOnDisk, rasterToMatch = rasterSmall,
dots = list(),
cores = 1)
expect_true(raster::compareRaster(out4, rasterSmall))
# }
})
|
# Second challenge for Day 1 of 10 Days of Statistics
# Calculating the standard deviation
data <- suppressWarnings(readLines(file("stdin", open = "r")))
data <- as.matrix(as.data.frame(t(data)))
n <- as.integer(data[1])
values <- as.integer(strsplit(data[2:n][1], " ")[[1]])
miu <- mean(values)
sigma <- sqrt((sum((values - miu)^2))/n)
cat(format(round(sigma, 1), nsmall = 1))
| /R/day-1-1.R | no_license | nelanz/10-Days-of-Statistics | R | false | false | 382 | r | # Second challenge for Day 1 of 10 Days of Statistics
# Calculating the standard deviation
data <- suppressWarnings(readLines(file("stdin", open = "r")))
data <- as.matrix(as.data.frame(t(data)))
n <- as.integer(data[1])
values <- as.integer(strsplit(data[2:n][1], " ")[[1]])
miu <- mean(values)
sigma <- sqrt((sum((values - miu)^2))/n)
cat(format(round(sigma, 1), nsmall = 1))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rdf.R
\name{rdf_add}
\alias{rdf_add}
\title{add a triple (subject, predicate, object) to the rdf graph}
\usage{
rdf_add(x, subject, predicate, object, subjectType = as.character(NA),
objectType = as.character(NA), datatype_uri = as.character(NA))
}
\arguments{
\item{x}{an rdf graph object}
\item{subject}{character string containing the subject}
\item{predicate}{character string containing the predicate}
\item{object}{character string containing the object}
\item{subjectType}{the Node type of the subject, i.e. "blank", "uri"}
\item{objectType}{the Node type of the object, i.e. "blank", "uri"}
\item{datatype_uri}{the datatype URI to associate with a object literal value}
}
\value{
the rdf graph object.
}
\description{
add a triple (subject, predicate, object) to the rdf graph
}
\details{
Since the rdf graph object simply contains external pointers
to the model object in C code, note that the input object is modified
directly.
}
\examples{
x <- rdf()
rdf_add(x,
subject="http://www.dajobe.org/",
predicate="http://purl.org/dc/elements/1.1/language",
object="en")
}
| /man/rdf_add.Rd | no_license | annakrystalli/rdflib | R | false | true | 1,176 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rdf.R
\name{rdf_add}
\alias{rdf_add}
\title{add a triple (subject, predicate, object) to the rdf graph}
\usage{
rdf_add(x, subject, predicate, object, subjectType = as.character(NA),
objectType = as.character(NA), datatype_uri = as.character(NA))
}
\arguments{
\item{x}{an rdf graph object}
\item{subject}{character string containing the subject}
\item{predicate}{character string containing the predicate}
\item{object}{character string containing the object}
\item{subjectType}{the Node type of the subject, i.e. "blank", "uri"}
\item{objectType}{the Node type of the object, i.e. "blank", "uri"}
\item{datatype_uri}{the datatype URI to associate with a object literal value}
}
\value{
the rdf graph object.
}
\description{
add a triple (subject, predicate, object) to the rdf graph
}
\details{
Since the rdf graph object simply contains external pointers
to the model object in C code, note that the input object is modified
directly.
}
\examples{
x <- rdf()
rdf_add(x,
subject="http://www.dajobe.org/",
predicate="http://purl.org/dc/elements/1.1/language",
object="en")
}
|
## Depth curves - model and application
## adult
## produces probability curves for depth, and application to sample node data (time series) for adult and Juvenile
## also data distributions
## packages
library(tidyverse)
library(tidyr)
library(sm)
library(lubridate) # work with dates
library(dplyr) # data manipulation (filter, summarize, mutate)
library(ggplot2) # graphics
library(gridExtra) # tile several plots next to each other
library(scales)
library(data.table)
library(zoo)
library(scales)
## function to find roots
load(file="root_interpolation_function.Rdata")
## define root equation
load(file="expression_Q_limit_function.RData")
# Combine with hydraulic data -------------------------------------------
## upload habitat curve data
fitdata <- read.csv("output_data/old_data/adult_depth_prob_curve_data.csv")
## upload hydraulic data
setwd("input_data/HecRas")
h <- list.files(pattern="hydraulic")
length(h) ## 20
## set wd back to main
setwd("/Users/katieirving/Documents/git/flow_eco_mech")
for(n in 1: length(h)) {
NodeData <- read.csv(file=paste("input_data/HecRas/", h[n], sep=""))
F34D <- read.csv("input_data/HecRas/hydraulic_ts_F34D.csv") ## for dates
## format hydraulic data
NodeData <- NodeData %>%
mutate(Q_ts.datetime = F34D$Q_ts.datetime)
hydraul <-NodeData[,-1]
## change some names
hydraul <- hydraul %>%
rename(DateTime = Q_ts.datetime, node = Gage, Q = Flow)
## define node name
NodeName <- unique(hydraul$node)
## convert units and change names
hyd_dep <- hydraul %>%
mutate(depth_cm_LOB = (Hydr..Depth..ft..LOB*0.3048)*100,
depth_cm_MC = (Hydr..Depth..ft..MC*0.3048)*100,
depth_cm_ROB = (Hydr..Depth..ft..ROB*0.3048)*100) %>%
mutate(shear_pa_LOB = (Shear..lb.sq.ft..LOB/0.020885),
shear_pa_MC = (Shear..lb.sq.ft..MC/0.020885),
shear_pa_ROB = (Shear..lb.sq.ft..ROB/0.020885)) %>%
mutate(sp_w_LOB = (Shear..lb.sq.ft..LOB*4.44822)/0.3048,
sp_w_MC = (Shear..lb.sq.ft..MC*4.44822)/0.3048,
sp_w_ROB = (Shear..lb.sq.ft..ROB*4.44822)/0.3048) %>%
mutate(vel_m_LOB = (Avg..Vel...ft.s..LOB*0.3048),
vel_m_MC = (Avg..Vel...ft.s..MC*0.3048),
vel_m_ROB = (Avg..Vel...ft.s..ROB*0.3048)) %>%
select(-contains("ft")) %>%
mutate(date_num = seq(1,length(DateTime), 1))
## take only depth variable
hyd_dep <- hyd_dep %>% select(DateTime, node, Q, contains("depth"), date_num)
# ## melt channel position data
hyd_dep<-reshape2::melt(hyd_dep, id=c("DateTime","Q", "node", "date_num"))
labels <- c(depth_cm_LOB = "Left Over Bank", depth_cm_MC = "Main Channel", depth_cm_ROB = "Right Over Bank")
### node figure for depth ~ Q
file_name <- paste("figures/Application_curves/nodes/", NodeName, "_Depth_Q.png", sep="")
png(file_name, width = 500, height = 600)
ggplot(hyd_dep, aes(x = Q, y=value)) +
geom_line(aes( group = variable, lty = variable)) +
scale_linetype_manual(values= c("dotted", "solid", "dashed"),
breaks=c("depth_cm_LOB", "depth_cm_MC", "depth_cm_ROB"))+
facet_wrap(~variable, scales="free_x", nrow=3, labeller=labeller(variable = labels)) +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(), legend.position = "none") +
labs(title = paste(NodeName, ": Depth ~ Q"),
y = "Depth (cm)",
x = "Q (cfs)") #+ theme_bw(base_size = 15)
dev.off()
## change NAs to 0 in concrete overbanks
hyd_dep[is.na(hyd_dep)] <- 0
## use smooth spline to predict on new data set
new_values <-smooth.spline(fitdata$depth_fit, fitdata$prob_fit)
all_data <- hyd_dep %>%
group_by(variable) %>%
mutate(prob_fit = predict(new_values, value)$y) %>%
rename(depth_cm = value)
## save out
save(all_data, file=paste("output_data/F1_", NodeName, "_SAS_adult_depth_discharge_probability.RData", sep=""))
# format probability time series ------------------------------------------
## look at data using lubridate etc
## format date time
all_data$DateTime<-as.POSIXct(all_data$DateTime,
format = "%Y-%m-%d %H:%M",
tz = "GMT")
## create year, month, day and hour columns and add water year
all_data <- all_data %>%
mutate(month = month(DateTime)) %>%
mutate(year = year(DateTime)) %>%
mutate(day = day(DateTime)) %>%
mutate(hour = hour(DateTime)) %>%
mutate(water_year = ifelse(month == 10 | month == 11 | month == 12, year, year-1))
save(all_data, file=paste("output_data/F1_", NodeName, "_SAS_depth_adult_discharge_probs_2010_2017_TS.RData", sep=""))
### define dataframes for 2nd loop
## Q Limits
limits <- as.data.frame(matrix(ncol=3, nrow=12)) %>%
rename(LOB = V1, MC = V2, ROB = V3)
rownames(limits)<-c("Low_Prob_1", "Low_Prob_2", "Low_Prob_3", "Low_Prob_4",
"Med_Prob_1", "Med_Prob_2", "Med_Prob_3", "Med_Prob_4",
"High_Prob_1", "High_Prob_2", "High_Prob_3", "High_Prob_4")
time_statsx <- NULL
days_data <- NULL
## define positions
positions <- unique(all_data$variable)
# probability as a function of discharge -----------------------------------
for(p in 1:length(positions)) {
new_data <- all_data %>%
filter(variable == positions[p])
## define position
PositionName <- str_split(positions[p], "_", 3)[[1]]
PositionName <- PositionName[3]
## bind shallow and deeper depths by 0.1 - 10cm & 120cm
## change all prob_fit lower than 0.1 to 0.1
new_data[which(new_data$prob_fit < 0.1),"prob_fit"] <- 0.1
peak <- new_data %>%
filter(prob_fit == max(prob_fit)) #%>%
peakQ <- max(peak$Q)
min_limit <- filter(new_data, depth_cm >= 3)
min_limit <- min(min_limit$Q)
## Main channel curves
## find roots for each probability
newx1a <- RootLinearInterpolant(new_data$Q, new_data$prob_fit, 0.1)
newx1a <- c(min(new_data$Q), max(new_data$Q))
newx2a <- RootLinearInterpolant(new_data$Q, new_data$prob_fit, 0.2)
if(length(newx2a) > 4) {
newx2a <- c(newx2a[1], newx2a[length(newx2a)])
} else {
newx2a <- newx2a
}
newx3a <- RootLinearInterpolant(new_data$Q, new_data$prob_fit, 0.3)
if(min(new_data$prob_fit)>0.3) {
newx3a <- min(new_data$Q)
} else {
newx3a <- newx3a
}
if(length(newx3a) > 4) {
newx3a <- c(newx3a[1], newx3a[length(newx3a)])
} else {
newx3a <- newx3a
}
## MAKE DF OF Q LIMITS
limits[,p] <- c(newx1a[1], newx1a[2],newx1a[3], newx1a[4],
newx2a[1], newx2a[2],newx2a[3], newx2a[4],
newx3a[1], newx3a[2],newx3a[3],newx3a[4])
# create year_month column
new_datax <- new_data %>% unite(month_year, c(water_year,month), sep="-", remove=F)
# dataframe for stats -----------------------------------------------------
## define critical period or season for adult as all year is critical
winter <- c(1,2,3,4,11,12) ## winter months
summer <- c(5:10) ## summer months
new_datax <- new_datax %>%
mutate(season = ifelse(month %in% winter, "winter", "summer") )
## define equation for roots
## produces percentage of time for each year and season within year for each threshold
## Main channel curves
low_thresh <- expression_Q(newx1a, peakQ)
low_thresh <-as.expression(do.call("substitute", list(low_thresh[[1]], list(limit = as.name("newx1a")))))
med_thresh <- expression_Q(newx2a, peakQ)
med_thresh <-as.expression(do.call("substitute", list(med_thresh[[1]], list(limit = as.name("newx2a")))))
high_thresh <- expression_Q(newx3a, peakQ)
high_thresh <-as.expression(do.call("substitute", list(high_thresh[[1]], list(limit = as.name("newx3a")))))
###### calculate amount of time
time_stats <- new_datax %>%
dplyr::group_by(water_year) %>%
dplyr::mutate(Low = sum(eval(low_thresh))/length(DateTime)*100) %>%
dplyr::mutate(Medium = sum(eval(med_thresh))/length(DateTime)*100) %>%
dplyr::mutate(High = sum(eval(high_thresh))/length(DateTime)*100) %>%
ungroup() %>%
dplyr::group_by(water_year, season) %>%
dplyr::mutate(Low.Seasonal = sum(eval(low_thresh))/length(DateTime)*100) %>%
dplyr::mutate(Medium.Seasonal = sum(eval(med_thresh))/length(DateTime)*100) %>%
dplyr::mutate(High.Seasonal = sum(eval(high_thresh))/length(DateTime)*100) %>%
distinct(water_year, Low , Medium , High , Low.Seasonal, Medium.Seasonal, High.Seasonal) %>%
mutate(position= paste(PositionName), Node = NodeName)
time_statsx <- rbind(time_statsx, time_stats)
### count days per month
new_datax <- new_datax %>%
ungroup() %>%
group_by(month, day, water_year, ID01 = data.table::rleid(eval(low_thresh))) %>%
mutate(Low = if_else(eval(low_thresh), row_number(), 0L)) %>%
ungroup() %>%
group_by(month, day, water_year, ID02 = data.table::rleid(eval(med_thresh))) %>%
mutate(Medium = if_else(eval(med_thresh), row_number(), 0L)) %>%
ungroup() %>%
group_by(month, day, water_year, ID03 = data.table::rleid(eval(high_thresh))) %>%
mutate(High = if_else(eval(high_thresh), row_number(), 0L)) %>%
mutate(position= paste(PositionName)) #%>%
# select(Q, month, water_year, day, ID01, Low, ID02, Medium, ID03, High, position, DateTime, node)
days_data <- rbind(days_data, new_datax)
} ## end 2nd loop
## limits
## note that 0.1 upper/lower limit is max/min Q to adhere to 0.1 bound
limits <- limits %>%
mutate(Species ="SAS", Life_Stage = "Adult", Hydraulic = "Depth", Node = NodeName)
write.csv(limits, paste("output_data/F1_",NodeName,"_SAS_adult_depth_Q_limits.csv", sep=""))
all_data[which(all_data$prob_fit < 0.1),"prob_fit"] <- 0.1
file_name = paste("figures/Application_curves/Depth/", NodeName, "_SAS_adult_depth_prob_Q_thresholds.png", sep ="")
png(file_name, width = 500, height = 600)
ggplot(all_data, aes(x = Q, y=prob_fit)) +
geom_line(aes(group = variable, lty = variable)) +
scale_linetype_manual(values= c("dotted", "solid", "dashed"))+
# name="Cross\nSection\nPosition",
# breaks=c("depth_cm_LOB", "depth_cm_MC", "depth_cm_ROB"),
# labels = c("LOB", "MC", "ROB")) +
facet_wrap(~variable, scales="free_x", nrow=3, labeller=labeller(variable = labels)) +
geom_point(data = subset(all_data, variable =="depth_cm_MC"), aes(y=0.1, x=limits[1,2]), color="green") +
geom_point(data = subset(all_data, variable =="depth_cm_MC"), aes(y=0.1, x=limits[2,2]), color="green") +
geom_point(data = subset(all_data, variable =="depth_cm_MC"), aes(y=0.1, x=limits[3,2]), color="green") +
geom_point(data = subset(all_data, variable =="depth_cm_MC"), aes(y=0.1, x=limits[4,2]), color="green") +
geom_point(data = subset(all_data, variable =="depth_cm_MC"), aes(y=0.2, x=limits[5,2]), color="red") +
geom_point(data = subset(all_data, variable =="depth_cm_MC"), aes(y=0.2, x=limits[6,2]), color="red") +
geom_point(data = subset(all_data, variable =="depth_cm_MC"), aes(y=0.2, x=limits[7,2]), color="red") +
geom_point(data = subset(all_data, variable =="depth_cm_MC"), aes(y=0.2, x=limits[8,2]), color="red") +
geom_point(data = subset(all_data, variable =="depth_cm_MC"), aes(y=0.3, x=limits[9,2]), color="blue") +
geom_point(data = subset(all_data, variable =="depth_cm_MC"), aes(y=0.3, x=limits[10,2]), color="blue") +
geom_point(data = subset(all_data, variable =="depth_cm_MC"), aes(y=0.3, x=limits[11,2]), color="blue") +
geom_point(data = subset(all_data, variable =="depth_cm_MC"), aes(y=0.3, x=limits[12,2]), color="blue") +
geom_point(data = subset(all_data, variable =="depth_cm_LOB"), aes(y=0.1, x=limits[1,1]), color="green") +
geom_point(data = subset(all_data, variable =="depth_cm_LOB"), aes(y=0.1, x=limits[2,1]), color="green") +
geom_point(data = subset(all_data, variable =="depth_cm_LOB"), aes(y=0.1, x=limits[3,1]), color="green") +
geom_point(data = subset(all_data, variable =="depth_cm_LOB"), aes(y=0.1, x=limits[4,1]), color="green") +
geom_point(data = subset(all_data, variable =="depth_cm_LOB"), aes(y=0.2, x=limits[5,1]), color="red") +
geom_point(data = subset(all_data, variable =="depth_cm_LOB"), aes(y=0.2, x=limits[6,1]), color="red") +
geom_point(data = subset(all_data, variable =="depth_cm_LOB"), aes(y=0.2, x=limits[7,1]), color="red") +
geom_point(data = subset(all_data, variable =="depth_cm_LOB"), aes(y=0.2, x=limits[8,1]), color="red") +
geom_point(data = subset(all_data, variable =="depth_cm_LOB"), aes(y=0.3, x=limits[9,1]), color="blue") +
geom_point(data = subset(all_data, variable =="depth_cm_LOB"), aes(y=0.3, x=limits[10,1]), color="blue") +
geom_point(data = subset(all_data, variable =="depth_cm_LOB"), aes(y=0.3, x=limits[11,1]), color="blue") +
geom_point(data = subset(all_data, variable =="depth_cm_LOB"), aes(y=0.3, x=limits[12,1]), color="blue") +
geom_point(data = subset(all_data, variable =="depth_cm_ROB"), aes(y=0.1, x=limits[1,3]), color="green") +
geom_point(data = subset(all_data, variable =="depth_cm_ROB"), aes(y=0.1, x=limits[2,3]), color="green") +
geom_point(data = subset(all_data, variable =="depth_cm_ROB"), aes(y=0.1, x=limits[3,3]), color="green") +
geom_point(data = subset(all_data, variable =="depth_cm_ROB"), aes(y=0.1, x=limits[4,3]), color="green") +
geom_point(data = subset(all_data, variable =="depth_cm_ROB"), aes(y=0.2, x=limits[5,3]), color="red") +
geom_point(data = subset(all_data, variable =="depth_cm_ROB"), aes(y=0.2, x=limits[6,3]), color="red") +
geom_point(data = subset(all_data, variable =="depth_cm_ROB"), aes(y=0.2, x=limits[7,3]), color="red") +
geom_point(data = subset(all_data, variable =="depth_cm_ROB"), aes(y=0.2, x=limits[8,3]), color="red") +
geom_point(data = subset(all_data, variable =="depth_cm_ROB"), aes(y=0.3, x=limits[9,3]), color="blue") +
geom_point(data = subset(all_data, variable =="depth_cm_ROB"), aes(y=0.3, x=limits[10,3]), color="blue") +
geom_point(data = subset(all_data, variable =="depth_cm_ROB"), aes(y=0.3, x=limits[11,3]), color="blue") +
geom_point(data = subset(all_data, variable =="depth_cm_ROB"), aes(y=0.3, x=limits[12,3]), color="blue") +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(), legend.position = "none") +
labs(title = paste(NodeName, ": Adult/Depth: Probability ~ Q", sep=""),
y = "Probability",
x = "Q (cfs)") #+ theme_bw(base_size = 15)
dev.off()
## percentage time
melt_time<-reshape2::melt(time_statsx, id=c("season", "position", "water_year", "Node"))
melt_time <- melt_time %>%
rename( Probability_Threshold = variable) %>%
mutate(Species ="SAS", Life_Stage = "Adult", Hydraulic = "Depth", Node = NodeName)
write.csv(melt_time, paste("output_data/F1_", NodeName, "_SAS_adult_depth_time_stats.csv", sep=""))
### days per month
days_data <- select(days_data, c(Q, month, water_year, day, ID01, Low, ID02, Medium, ID03, High, position, DateTime, node) )# all probs
melt_data<-reshape2::melt(days_data, id=c("ID01", "ID02", "ID03", "day", "month", "water_year", "Q", "position", "node"))
melt_data <- rename(melt_data, Probability_Threshold = variable,
consec_hours = value)
## count how many full days i.e. 24 hours
total_days01 <- melt_data %>%
filter(Probability_Threshold == "Low") %>%
group_by(ID01, day, month, water_year, position) %>%
summarise(n_hours = max(consec_hours)) %>%
mutate(n_days_low = ifelse(n_hours >= 24, 1, 0)) # %>%
## count the number of days in each month
total_days_per_month01 <- total_days01 %>%
group_by(month, water_year, position) %>%
summarise(days_per_month_low = sum(n_days_low))
total_days02 <- melt_data %>%
filter(Probability_Threshold == "Medium") %>%
group_by(ID02, day, month, water_year, position) %>%
summarise(n_hours = max(consec_hours)) %>%
mutate(n_days_medium = ifelse(n_hours >= 24, 1, 0)) # %>%
total_days_per_month02 <- total_days02 %>%
group_by(month, water_year, position) %>%
summarise(days_per_month_medium = sum(n_days_medium))
# total_days_per_month02
total_days03 <- melt_data %>%
filter(Probability_Threshold == "High") %>%
group_by(ID03, day, month, water_year, position) %>%
summarise(n_hours = max(consec_hours)) %>%
mutate(n_days_high = ifelse(n_hours >= 24, 1, 0)) # %>%
total_days_per_month03 <- total_days03 %>%
group_by(month, water_year, position) %>%
summarise(days_per_month_high = sum(n_days_high))
## combine all thresholds
total_days <- cbind( total_days_per_month01,total_days_per_month02[,4], total_days_per_month03[,4])
# # create year_month column
total_days <- ungroup(total_days) %>%
unite(month_year, water_year:month, sep="-", remove=F) %>%
mutate(Node= paste(NodeName)) #%>%
## convert month year to date format
total_days$month_year <- zoo::as.yearmon(total_days$month_year)
total_days$month_year <- as.Date(total_days$month_year)
## change names of columns
total_days <- rename(total_days, Low = days_per_month_low, Medium = days_per_month_medium, High = days_per_month_high)
## define seasons
winter <- c(1,2,3,4,11,12) ## winter months
summer <- c(5:10) ## summer months
total_days <- total_days %>%
mutate(season = ifelse(month %in% winter, "winter", "summer") )
# ## melt data
melt_days<-reshape2::melt(total_days, id=c("month_year", "water_year", "month", "season", "position", "Node"))
melt_days <- melt_days %>%
rename(Probability_Threshold = variable, n_days = value) %>%
mutate(Species ="SAS", Life_Stage = "Adult", Hydraulic = "Depth")
## save df
write.csv(melt_days, paste("output_data/F1_", NodeName, "_SAS_adult_depth_total_days_long.csv", sep="") )
} ## end 1st loop
# Velocity ----------------------------------------------------------------
## upload habitat curve data
fitdata <- read.csv("output_data/adult_velocity_prob_curve_data.csv")
## upload hydraulic data
setwd("input_data/HecRas")
h <- list.files(pattern="hydraulic")
length(h) ## 18
## set wd back to main
setwd("/Users/katieirving/Documents/git/flow_eco_mech")
n=1
for(n in 1: length(h)) {
NodeData <- read.csv(file=paste("input_data/HecRas/", h[n], sep=""))
F34D <- read.csv("input_data/HecRas/hydraulic_ts_F34D.csv") ## for dates
## format hydraulic data
NodeData <- NodeData %>%
mutate(Q_ts.datetime = F34D$Q_ts.datetime)
hydraul <-NodeData[,-1]
## change some names
hydraul <- hydraul %>%
rename(DateTime = Q_ts.datetime, node = Gage, Q = Flow)
## define node name
NodeName <- unique(hydraul$node)
## convert units and change names
hyd_vel <- hydraul %>%
mutate(depth_cm_LOB = (Hydr..Depth..ft..LOB*0.3048)*100,
depth_cm_MC = (Hydr..Depth..ft..MC*0.3048)*100,
depth_cm_ROB = (Hydr..Depth..ft..ROB*0.3048)*100) %>%
mutate(shear_pa_LOB = (Shear..lb.sq.ft..LOB/0.020885),
shear_pa_MC = (Shear..lb.sq.ft..MC/0.020885),
shear_pa_ROB = (Shear..lb.sq.ft..ROB/0.020885)) %>%
mutate(sp_w_LOB = (Shear..lb.sq.ft..LOB*4.44822)/0.3048,
sp_w_MC = (Shear..lb.sq.ft..MC*4.44822)/0.3048,
sp_w_ROB = (Shear..lb.sq.ft..ROB*4.44822)/0.3048) %>%
mutate(vel_m_LOB = (Avg..Vel...ft.s..LOB*0.3048),
vel_m_MC = (Avg..Vel...ft.s..MC*0.3048),
vel_m_ROB = (Avg..Vel...ft.s..ROB*0.3048)) %>%
select(-contains("ft")) %>%
mutate(date_num = seq(1,length(DateTime), 1))
## take only depth variable
hyd_vel <- hyd_vel %>% select(DateTime, node, Q, contains("vel"), date_num)
# ## melt channel position data
hyd_vel<-reshape2::melt(hyd_vel, id=c("DateTime","Q", "node", "date_num"))
labels <- c(vel_m_LOB = "Left Over Bank", vel_m_MC = "Main Channel", vel_m_ROB = "Right Over Bank")
### node figure for depth ~ Q
file_name <- paste("figures/Application_curves/nodes/", NodeName, "_Velocity_Q.png", sep="")
png(file_name, width = 500, height = 600)
ggplot(hyd_vel, aes(x = Q, y=value)) +
geom_line(aes( group = variable, lty = variable)) +
scale_linetype_manual(values= c("dotted", "solid", "dashed"),
breaks=c("vel_m_LOB", "vel_m_MC", "vel_m_ROB"))+
facet_wrap(~variable, scales="free_x", nrow=3, labeller=labeller(variable = labels)) +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(), legend.position = "none") +
labs(title = paste(NodeName, ": Velocity ~ Q"),
y = "Velocity (m/s)",
x = "Q (cfs)") #+ theme_bw(base_size = 15)
dev.off()
## change NAs to 0 in concrete overbanks
hyd_vel[is.na(hyd_vel)] <- 0
## use smooth spline to predict on new data set
new_values <-smooth.spline(fitdata$velocity_fit, fitdata$prob_fit)
all_data <- hyd_vel %>%
group_by(variable) %>%
mutate(prob_fit = predict(new_values, value)$y) %>%
rename(vel_m = value)
## save out
save(all_data, file=paste("output_data/F1_", NodeName, "_SAS_adult_velocity_discharge_probability.RData", sep=""))
# format probability time series ------------------------------------------
## look at data using lubridate etc
## format date time
all_data$DateTime<-as.POSIXct(all_data$DateTime,
format = "%Y-%m-%d %H:%M",
tz = "GMT")
## create year, month, day and hour columns and add water year
all_data <- all_data %>%
mutate(month = month(DateTime)) %>%
mutate(year = year(DateTime)) %>%
mutate(day = day(DateTime)) %>%
mutate(hour = hour(DateTime)) %>%
mutate(water_year = ifelse(month == 10 | month == 11 | month == 12, year, year-1))
save(all_data, file=paste("output_data/F1_", NodeName, "_SAS_velocity_adult_discharge_probs_2010_2017_TS.RData", sep=""))
### define dataframes for 2nd loop
## Q Limits
limits <- as.data.frame(matrix(ncol=3, nrow=12)) %>%
rename(LOB = V1, MC = V2, ROB = V3)
rownames(limits)<-c("Low_Prob_1", "Low_Prob_2", "Low_Prob_3", "Low_Prob_4",
"Med_Prob_1", "Med_Prob_2", "Med_Prob_3", "Med_Prob_4",
"High_Prob_1", "High_Prob_2", "High_Prob_3", "High_Prob_4")
time_statsx <- NULL
days_data <- NULL
## define positions
positions <- unique(all_data$variable)
# probability as a function of discharge -----------------------------------
for(p in 1:length(positions)) {
new_data <- all_data %>%
filter(variable == positions[p])
## define position
PositionName <- str_split(positions[p], "_", 3)[[1]]
PositionName <- PositionName[3]
## bind shallow and deeper depths by 0.1 - 10cm & 120cm
## change all prob_fit lower than 0.1 to 0.1
peak <- new_data %>%
filter(prob_fit == max(prob_fit)) #%>%
peakQ <- max(peak$Q)
min_limit <- filter(new_data, vel_m >0)
min_limit <- min(min_limit$Q)
## Main channel curves
## find roots for each probability
newx1a <- RootLinearInterpolant(new_data$Q, new_data$prob_fit, 0.1)
if(length(newx1a) > 4) {
newx1a <- c(newx1a[1], newx1aR[length(newx1a)])
} else {
newx1a <- newx1a
}
newx2a <- RootLinearInterpolant(new_data$Q, new_data$prob_fit, 0.2)
if(length(newx2a) > 4) {
newx2a <- c(newx2a[1], newx2a[length(newx2a)])
} else {
newx2a <- newx2a
}
newx3a <- RootLinearInterpolant(new_data$Q, new_data$prob_fit, 0.3)
if(min(new_data$prob_fit)>0.3) {
newx3a <- min(new_data$Q)
} else {
newx3a <- newx3a
}
if(length(newx3a) > 4) {
newx3a <- c(newx3a[1], newx3a[length(newx3a)])
} else {
newx3a <- newx3a
}
## MAKE DF OF Q LIMITS
limits[,p] <- c(newx1a[1], newx1a[2],newx1a[3], newx1a[4],
newx2a[1], newx2a[2],newx2a[3], newx2a[4],
newx3a[1], newx3a[2],newx3a[3],newx3a[4])
# create year_month column
new_datax <- new_data %>% unite(month_year, c(water_year,month), sep="-", remove=F)
# dataframe for stats -----------------------------------------------------
## define critical period or season for adult as all year is critical
winter <- c(1,2,3,4,11,12) ## winter months
summer <- c(5:10) ## summer months
new_datax <- new_datax %>%
mutate(season = ifelse(month %in% winter, "winter", "summer") )
## define equation for roots
## produces percentage of time for each year and season within year for each threshold
## Main channel curves
low_thresh <- expression_Q(newx1a, peakQ)
low_thresh <-as.expression(do.call("substitute", list(low_thresh[[1]], list(limit = as.name("newx1a")))))
med_thresh <- expression_Q(newx2a, peakQ)
med_thresh <-as.expression(do.call("substitute", list(med_thresh[[1]], list(limit = as.name("newx2a")))))
high_thresh <- expression_Q(newx3a, peakQ)
high_thresh <-as.expression(do.call("substitute", list(high_thresh[[1]], list(limit = as.name("newx3a")))))
###### calculate amount of time
time_stats <- new_datax %>%
dplyr::group_by(water_year) %>%
dplyr::mutate(Low = sum(eval(low_thresh))/length(DateTime)*100) %>%
dplyr::mutate(Medium = sum(eval(med_thresh))/length(DateTime)*100) %>%
dplyr::mutate(High = sum(eval(high_thresh))/length(DateTime)*100) %>%
ungroup() %>%
dplyr::group_by(water_year, season) %>%
dplyr::mutate(Low.Seasonal = sum(eval(low_thresh))/length(DateTime)*100) %>%
dplyr::mutate(Medium.Seasonal = sum(eval(med_thresh))/length(DateTime)*100) %>%
dplyr::mutate(High.Seasonal = sum(eval(high_thresh))/length(DateTime)*100) %>%
distinct(water_year, Low , Medium , High , Low.Seasonal, Medium.Seasonal, High.Seasonal) %>%
mutate(position= paste(PositionName), Node = NodeName)
time_statsx <- rbind(time_statsx, time_stats)
### count days per month
new_datax <- new_datax %>%
ungroup() %>%
group_by(month, day, water_year, ID01 = data.table::rleid(eval(low_thresh))) %>%
mutate(Low = if_else(eval(low_thresh), row_number(), 0L)) %>%
ungroup() %>%
group_by(month, day, water_year, ID02 = data.table::rleid(eval(med_thresh))) %>%
mutate(Medium = if_else(eval(med_thresh), row_number(), 0L)) %>%
ungroup() %>%
group_by(month, day, water_year, ID03 = data.table::rleid(eval(high_thresh))) %>%
mutate(High = if_else(eval(high_thresh), row_number(), 0L)) %>%
mutate(position= paste(PositionName)) #%>%
# select(Q, month, water_year, day, ID01, Low, ID02, Medium, ID03, High, position, DateTime, node)
days_data <- rbind(days_data, new_datax)
} ## end 2nd loop
## limits
## note that 0.1 upper/lower limit is max/min Q to adhere to 0.1 bound
limits <- limits %>%
mutate(Species ="SAS", Life_Stage = "Adult", Hydraulic = "Velocity", Node = NodeName)
write.csv(limits, paste("output_data/F1_",NodeName,"_SAS_adult_velocity_Q_limits.csv", sep=""))
## plot thresholds
file_name = paste("figures/Application_curves/Velocity/", NodeName, "_adult_depth_prob_Q_thresholds.png", sep ="")
png(file_name, width = 500, height = 600)
ggplot(all_data, aes(x = Q, y=prob_fit)) +
geom_line(aes(group = variable, lty = variable)) +
scale_linetype_manual(values= c("dotted", "solid", "dashed"))+
# name="Cross\nSection\nPosition",
# breaks=c("depth_cm_LOB", "depth_cm_MC", "depth_cm_ROB"),
# labels = c("LOB", "MC", "ROB")) +
facet_wrap(~variable, scales="free_x", nrow=3, labeller=labeller(variable = labels)) +
geom_point(data = subset(all_data, variable =="vel_m_MC"), aes(y=0.1, x=limits[1,2]), color="green") +
geom_point(data = subset(all_data, variable =="vel_m_MC"), aes(y=0.1, x=limits[2,2]), color="green") +
geom_point(data = subset(all_data, variable =="vel_m_MC"), aes(y=0.1, x=limits[3,2]), color="green") +
geom_point(data = subset(all_data, variable =="vel_m_MC"), aes(y=0.1, x=limits[4,2]), color="green") +
geom_point(data = subset(all_data, variable =="vel_m_MC"), aes(y=0.2, x=limits[5,2]), color="red") +
geom_point(data = subset(all_data, variable =="vel_m_MC"), aes(y=0.2, x=limits[6,2]), color="red") +
geom_point(data = subset(all_data, variable =="vel_m_MC"), aes(y=0.2, x=limits[7,2]), color="red") +
geom_point(data = subset(all_data, variable =="vel_m_MC"), aes(y=0.2, x=limits[8,2]), color="red") +
geom_point(data = subset(all_data, variable =="vel_m_MC"), aes(y=0.3, x=limits[9,2]), color="blue") +
geom_point(data = subset(all_data, variable =="vel_m_MC"), aes(y=0.3, x=limits[10,2]), color="blue") +
geom_point(data = subset(all_data, variable =="vel_m_MC"), aes(y=0.3, x=limits[11,2]), color="blue") +
geom_point(data = subset(all_data, variable =="vel_m_MC"), aes(y=0.3, x=limits[12,2]), color="blue") +
geom_point(data = subset(all_data, variable =="vel_m_LOB"), aes(y=0.1, x=limits[1,1]), color="green") +
geom_point(data = subset(all_data, variable =="vel_m_LOB"), aes(y=0.1, x=limits[2,1]), color="green") +
geom_point(data = subset(all_data, variable =="vel_m_LOB"), aes(y=0.1, x=limits[3,1]), color="green") +
geom_point(data = subset(all_data, variable =="vel_m_LOB"), aes(y=0.1, x=limits[4,1]), color="green") +
geom_point(data = subset(all_data, variable =="vel_m_LOB"), aes(y=0.2, x=limits[5,1]), color="red") +
geom_point(data = subset(all_data, variable =="vel_m_LOB"), aes(y=0.2, x=limits[6,1]), color="red") +
geom_point(data = subset(all_data, variable =="vel_m_LOB"), aes(y=0.2, x=limits[7,1]), color="red") +
geom_point(data = subset(all_data, variable =="vel_m_LOB"), aes(y=0.2, x=limits[8,1]), color="red") +
geom_point(data = subset(all_data, variable =="vel_m_LOB"), aes(y=0.3, x=limits[9,1]), color="blue") +
geom_point(data = subset(all_data, variable =="vel_m_LOB"), aes(y=0.3, x=limits[10,1]), color="blue") +
geom_point(data = subset(all_data, variable =="vel_m_LOB"), aes(y=0.3, x=limits[11,1]), color="blue") +
geom_point(data = subset(all_data, variable =="vel_m_LOB"), aes(y=0.3, x=limits[12,1]), color="blue") +
geom_point(data = subset(all_data, variable =="vel_m_ROB"), aes(y=0.1, x=limits[1,3]), color="green") +
geom_point(data = subset(all_data, variable =="vel_m_ROB"), aes(y=0.1, x=limits[2,3]), color="green") +
geom_point(data = subset(all_data, variable =="vel_m_ROB"), aes(y=0.1, x=limits[3,3]), color="green") +
geom_point(data = subset(all_data, variable =="vel_m_ROB"), aes(y=0.1, x=limits[4,3]), color="green") +
geom_point(data = subset(all_data, variable =="vel_m_ROB"), aes(y=0.2, x=limits[5,3]), color="red") +
geom_point(data = subset(all_data, variable =="vel_m_ROB"), aes(y=0.2, x=limits[6,3]), color="red") +
geom_point(data = subset(all_data, variable =="vel_m_ROB"), aes(y=0.2, x=limits[7,3]), color="red") +
geom_point(data = subset(all_data, variable =="vel_m_ROB"), aes(y=0.2, x=limits[8,3]), color="red") +
geom_point(data = subset(all_data, variable =="vel_m_ROB"), aes(y=0.3, x=limits[9,3]), color="blue") +
geom_point(data = subset(all_data, variable =="vel_m_ROB"), aes(y=0.3, x=limits[10,3]), color="blue") +
geom_point(data = subset(all_data, variable =="vel_m_ROB"), aes(y=0.3, x=limits[11,3]), color="blue") +
geom_point(data = subset(all_data, variable =="vel_m_ROB"), aes(y=0.3, x=limits[12,3]), color="blue") +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(), legend.position = "none") +
labs(title = paste(NodeName, ": Adult/Velocity: Probability ~ Q", sep=""),
y = "Probability",
x = "Q (cfs)") #+ theme_bw(base_size = 15)
dev.off()
## percentage time
melt_time<-reshape2::melt(time_statsx, id=c("season", "position", "water_year", "Node"))
melt_time <- melt_time %>%
rename( Probability_Threshold = variable) %>%
mutate(Species ="SAS", Life_Stage = "Adult", Hydraulic = "Velocity", Node = NodeName)
write.csv(melt_time, paste("output_data/F1_", NodeName, "_SAS_adult_velocity_time_stats.csv", sep=""))
### days per month
days_data <- select(days_data, c(Q, month, water_year, day, ID01, Low, ID02, Medium, ID03, High, position, DateTime, node) )# all probs
melt_data<-reshape2::melt(days_data, id=c("ID01", "ID02", "ID03", "day", "month", "water_year", "Q", "position", "node"))
melt_data <- rename(melt_data, Probability_Threshold = variable,
consec_hours = value)
## count how many full days i.e. 24 hours
total_days01 <- melt_data %>%
filter(Probability_Threshold == "Low") %>%
group_by(ID01, day, month, water_year, position) %>%
summarise(n_hours = max(consec_hours)) %>%
mutate(n_days_low = ifelse(n_hours >= 24, 1, 0)) # %>%
## count the number of days in each month
total_days_per_month01 <- total_days01 %>%
group_by(month, water_year, position) %>%
summarise(days_per_month_low = sum(n_days_low))
total_days02 <- melt_data %>%
filter(Probability_Threshold == "Medium") %>%
group_by(ID02, day, month, water_year, position) %>%
summarise(n_hours = max(consec_hours)) %>%
mutate(n_days_medium = ifelse(n_hours >= 24, 1, 0)) # %>%
total_days_per_month02 <- total_days02 %>%
group_by(month, water_year, position) %>%
summarise(days_per_month_medium = sum(n_days_medium))
# total_days_per_month02
total_days03 <- melt_data %>%
filter(Probability_Threshold == "High") %>%
group_by(ID03, day, month, water_year, position) %>%
summarise(n_hours = max(consec_hours)) %>%
mutate(n_days_high = ifelse(n_hours >= 24, 1, 0)) # %>%
total_days_per_month03 <- total_days03 %>%
group_by(month, water_year, position) %>%
summarise(days_per_month_high = sum(n_days_high))
## combine all thresholds
total_days <- cbind( total_days_per_month01,total_days_per_month02[,4], total_days_per_month03[,4])
# # create year_month column
total_days <- ungroup(total_days) %>%
unite(month_year, water_year:month, sep="-", remove=F) %>%
mutate(Node= paste(NodeName)) #%>%
## convert month year to date format
total_days$month_year <- zoo::as.yearmon(total_days$month_year)
total_days$month_year <- as.Date(total_days$month_year)
## change names of columns
total_days <- rename(total_days, Low = days_per_month_low, Medium = days_per_month_medium, High = days_per_month_high)
## define seasons
winter <- c(1,2,3,4,11,12) ## winter months
summer <- c(5:10) ## summer months
total_days <- total_days %>%
mutate(season = ifelse(month %in% winter, "winter", "summer") )
# ## melt data
melt_days<-reshape2::melt(total_days, id=c("month_year", "water_year", "month", "season", "position", "Node"))
melt_days <- melt_days %>%
rename(Probability_Threshold = variable, n_days = value) %>%
mutate(Species ="SAS", Life_Stage = "Adult", Hydraulic = "Velocity")
## save df
write.csv(melt_days, paste("output_data/F1_", NodeName, "_SAS_adult_velocity_total_days_long.csv", sep="") )
} ## end 1st loop
| /scripts/Auto/F1_auto_sas_adult_depth_velocity.R | no_license | ksirving/flow_eco_mech | R | false | false | 34,965 | r | ## Depth curves - model and application
## adult
## produces probability curves for depth, and application to sample node data (time series) for adult and Juvenile
## also data distributions
## packages
library(tidyverse)
library(tidyr)
library(sm)
library(lubridate) # work with dates
library(dplyr) # data manipulation (filter, summarize, mutate)
library(ggplot2) # graphics
library(gridExtra) # tile several plots next to each other
library(scales)
library(data.table)
library(zoo)
library(scales)
## function to find roots
load(file="root_interpolation_function.Rdata")
## define root equation
load(file="expression_Q_limit_function.RData")
# Combine with hydraulic data -------------------------------------------
## upload habitat curve data
fitdata <- read.csv("output_data/old_data/adult_depth_prob_curve_data.csv")
## upload hydraulic data
setwd("input_data/HecRas")
h <- list.files(pattern="hydraulic")
length(h) ## 20
## set wd back to main
setwd("/Users/katieirving/Documents/git/flow_eco_mech")
for(n in 1: length(h)) {
NodeData <- read.csv(file=paste("input_data/HecRas/", h[n], sep=""))
F34D <- read.csv("input_data/HecRas/hydraulic_ts_F34D.csv") ## for dates
## format hydraulic data
NodeData <- NodeData %>%
mutate(Q_ts.datetime = F34D$Q_ts.datetime)
hydraul <-NodeData[,-1]
## change some names
hydraul <- hydraul %>%
rename(DateTime = Q_ts.datetime, node = Gage, Q = Flow)
## define node name
NodeName <- unique(hydraul$node)
## convert units and change names
hyd_dep <- hydraul %>%
mutate(depth_cm_LOB = (Hydr..Depth..ft..LOB*0.3048)*100,
depth_cm_MC = (Hydr..Depth..ft..MC*0.3048)*100,
depth_cm_ROB = (Hydr..Depth..ft..ROB*0.3048)*100) %>%
mutate(shear_pa_LOB = (Shear..lb.sq.ft..LOB/0.020885),
shear_pa_MC = (Shear..lb.sq.ft..MC/0.020885),
shear_pa_ROB = (Shear..lb.sq.ft..ROB/0.020885)) %>%
mutate(sp_w_LOB = (Shear..lb.sq.ft..LOB*4.44822)/0.3048,
sp_w_MC = (Shear..lb.sq.ft..MC*4.44822)/0.3048,
sp_w_ROB = (Shear..lb.sq.ft..ROB*4.44822)/0.3048) %>%
mutate(vel_m_LOB = (Avg..Vel...ft.s..LOB*0.3048),
vel_m_MC = (Avg..Vel...ft.s..MC*0.3048),
vel_m_ROB = (Avg..Vel...ft.s..ROB*0.3048)) %>%
select(-contains("ft")) %>%
mutate(date_num = seq(1,length(DateTime), 1))
## take only depth variable
hyd_dep <- hyd_dep %>% select(DateTime, node, Q, contains("depth"), date_num)
# ## melt channel position data
hyd_dep<-reshape2::melt(hyd_dep, id=c("DateTime","Q", "node", "date_num"))
labels <- c(depth_cm_LOB = "Left Over Bank", depth_cm_MC = "Main Channel", depth_cm_ROB = "Right Over Bank")
### node figure for depth ~ Q
file_name <- paste("figures/Application_curves/nodes/", NodeName, "_Depth_Q.png", sep="")
png(file_name, width = 500, height = 600)
ggplot(hyd_dep, aes(x = Q, y=value)) +
geom_line(aes( group = variable, lty = variable)) +
scale_linetype_manual(values= c("dotted", "solid", "dashed"),
breaks=c("depth_cm_LOB", "depth_cm_MC", "depth_cm_ROB"))+
facet_wrap(~variable, scales="free_x", nrow=3, labeller=labeller(variable = labels)) +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(), legend.position = "none") +
labs(title = paste(NodeName, ": Depth ~ Q"),
y = "Depth (cm)",
x = "Q (cfs)") #+ theme_bw(base_size = 15)
dev.off()
## change NAs to 0 in concrete overbanks
hyd_dep[is.na(hyd_dep)] <- 0
## use smooth spline to predict on new data set
new_values <-smooth.spline(fitdata$depth_fit, fitdata$prob_fit)
all_data <- hyd_dep %>%
group_by(variable) %>%
mutate(prob_fit = predict(new_values, value)$y) %>%
rename(depth_cm = value)
## save out
save(all_data, file=paste("output_data/F1_", NodeName, "_SAS_adult_depth_discharge_probability.RData", sep=""))
# format probability time series ------------------------------------------
## look at data using lubridate etc
## format date time
all_data$DateTime<-as.POSIXct(all_data$DateTime,
format = "%Y-%m-%d %H:%M",
tz = "GMT")
## create year, month, day and hour columns and add water year
all_data <- all_data %>%
mutate(month = month(DateTime)) %>%
mutate(year = year(DateTime)) %>%
mutate(day = day(DateTime)) %>%
mutate(hour = hour(DateTime)) %>%
mutate(water_year = ifelse(month == 10 | month == 11 | month == 12, year, year-1))
save(all_data, file=paste("output_data/F1_", NodeName, "_SAS_depth_adult_discharge_probs_2010_2017_TS.RData", sep=""))
### define dataframes for 2nd loop
## Q Limits
limits <- as.data.frame(matrix(ncol=3, nrow=12)) %>%
rename(LOB = V1, MC = V2, ROB = V3)
rownames(limits)<-c("Low_Prob_1", "Low_Prob_2", "Low_Prob_3", "Low_Prob_4",
"Med_Prob_1", "Med_Prob_2", "Med_Prob_3", "Med_Prob_4",
"High_Prob_1", "High_Prob_2", "High_Prob_3", "High_Prob_4")
time_statsx <- NULL
days_data <- NULL
## define positions
positions <- unique(all_data$variable)
# probability as a function of discharge -----------------------------------
for(p in 1:length(positions)) {
new_data <- all_data %>%
filter(variable == positions[p])
## define position
PositionName <- str_split(positions[p], "_", 3)[[1]]
PositionName <- PositionName[3]
## bind shallow and deeper depths by 0.1 - 10cm & 120cm
## change all prob_fit lower than 0.1 to 0.1
new_data[which(new_data$prob_fit < 0.1),"prob_fit"] <- 0.1
peak <- new_data %>%
filter(prob_fit == max(prob_fit)) #%>%
peakQ <- max(peak$Q)
min_limit <- filter(new_data, depth_cm >= 3)
min_limit <- min(min_limit$Q)
## Main channel curves
## find roots for each probability
newx1a <- RootLinearInterpolant(new_data$Q, new_data$prob_fit, 0.1)
newx1a <- c(min(new_data$Q), max(new_data$Q))
newx2a <- RootLinearInterpolant(new_data$Q, new_data$prob_fit, 0.2)
if(length(newx2a) > 4) {
newx2a <- c(newx2a[1], newx2a[length(newx2a)])
} else {
newx2a <- newx2a
}
newx3a <- RootLinearInterpolant(new_data$Q, new_data$prob_fit, 0.3)
if(min(new_data$prob_fit)>0.3) {
newx3a <- min(new_data$Q)
} else {
newx3a <- newx3a
}
if(length(newx3a) > 4) {
newx3a <- c(newx3a[1], newx3a[length(newx3a)])
} else {
newx3a <- newx3a
}
## MAKE DF OF Q LIMITS
limits[,p] <- c(newx1a[1], newx1a[2],newx1a[3], newx1a[4],
newx2a[1], newx2a[2],newx2a[3], newx2a[4],
newx3a[1], newx3a[2],newx3a[3],newx3a[4])
# create year_month column
new_datax <- new_data %>% unite(month_year, c(water_year,month), sep="-", remove=F)
# dataframe for stats -----------------------------------------------------
## define critical period or season for adult as all year is critical
winter <- c(1,2,3,4,11,12) ## winter months
summer <- c(5:10) ## summer months
new_datax <- new_datax %>%
mutate(season = ifelse(month %in% winter, "winter", "summer") )
## define equation for roots
## produces percentage of time for each year and season within year for each threshold
## Main channel curves
low_thresh <- expression_Q(newx1a, peakQ)
low_thresh <-as.expression(do.call("substitute", list(low_thresh[[1]], list(limit = as.name("newx1a")))))
med_thresh <- expression_Q(newx2a, peakQ)
med_thresh <-as.expression(do.call("substitute", list(med_thresh[[1]], list(limit = as.name("newx2a")))))
high_thresh <- expression_Q(newx3a, peakQ)
high_thresh <-as.expression(do.call("substitute", list(high_thresh[[1]], list(limit = as.name("newx3a")))))
###### calculate amount of time
time_stats <- new_datax %>%
dplyr::group_by(water_year) %>%
dplyr::mutate(Low = sum(eval(low_thresh))/length(DateTime)*100) %>%
dplyr::mutate(Medium = sum(eval(med_thresh))/length(DateTime)*100) %>%
dplyr::mutate(High = sum(eval(high_thresh))/length(DateTime)*100) %>%
ungroup() %>%
dplyr::group_by(water_year, season) %>%
dplyr::mutate(Low.Seasonal = sum(eval(low_thresh))/length(DateTime)*100) %>%
dplyr::mutate(Medium.Seasonal = sum(eval(med_thresh))/length(DateTime)*100) %>%
dplyr::mutate(High.Seasonal = sum(eval(high_thresh))/length(DateTime)*100) %>%
distinct(water_year, Low , Medium , High , Low.Seasonal, Medium.Seasonal, High.Seasonal) %>%
mutate(position= paste(PositionName), Node = NodeName)
time_statsx <- rbind(time_statsx, time_stats)
### count days per month
new_datax <- new_datax %>%
ungroup() %>%
group_by(month, day, water_year, ID01 = data.table::rleid(eval(low_thresh))) %>%
mutate(Low = if_else(eval(low_thresh), row_number(), 0L)) %>%
ungroup() %>%
group_by(month, day, water_year, ID02 = data.table::rleid(eval(med_thresh))) %>%
mutate(Medium = if_else(eval(med_thresh), row_number(), 0L)) %>%
ungroup() %>%
group_by(month, day, water_year, ID03 = data.table::rleid(eval(high_thresh))) %>%
mutate(High = if_else(eval(high_thresh), row_number(), 0L)) %>%
mutate(position= paste(PositionName)) #%>%
# select(Q, month, water_year, day, ID01, Low, ID02, Medium, ID03, High, position, DateTime, node)
days_data <- rbind(days_data, new_datax)
} ## end 2nd loop
## limits
## note that 0.1 upper/lower limit is max/min Q to adhere to 0.1 bound
limits <- limits %>%
mutate(Species ="SAS", Life_Stage = "Adult", Hydraulic = "Depth", Node = NodeName)
write.csv(limits, paste("output_data/F1_",NodeName,"_SAS_adult_depth_Q_limits.csv", sep=""))
all_data[which(all_data$prob_fit < 0.1),"prob_fit"] <- 0.1
file_name = paste("figures/Application_curves/Depth/", NodeName, "_SAS_adult_depth_prob_Q_thresholds.png", sep ="")
png(file_name, width = 500, height = 600)
ggplot(all_data, aes(x = Q, y=prob_fit)) +
geom_line(aes(group = variable, lty = variable)) +
scale_linetype_manual(values= c("dotted", "solid", "dashed"))+
# name="Cross\nSection\nPosition",
# breaks=c("depth_cm_LOB", "depth_cm_MC", "depth_cm_ROB"),
# labels = c("LOB", "MC", "ROB")) +
facet_wrap(~variable, scales="free_x", nrow=3, labeller=labeller(variable = labels)) +
geom_point(data = subset(all_data, variable =="depth_cm_MC"), aes(y=0.1, x=limits[1,2]), color="green") +
geom_point(data = subset(all_data, variable =="depth_cm_MC"), aes(y=0.1, x=limits[2,2]), color="green") +
geom_point(data = subset(all_data, variable =="depth_cm_MC"), aes(y=0.1, x=limits[3,2]), color="green") +
geom_point(data = subset(all_data, variable =="depth_cm_MC"), aes(y=0.1, x=limits[4,2]), color="green") +
geom_point(data = subset(all_data, variable =="depth_cm_MC"), aes(y=0.2, x=limits[5,2]), color="red") +
geom_point(data = subset(all_data, variable =="depth_cm_MC"), aes(y=0.2, x=limits[6,2]), color="red") +
geom_point(data = subset(all_data, variable =="depth_cm_MC"), aes(y=0.2, x=limits[7,2]), color="red") +
geom_point(data = subset(all_data, variable =="depth_cm_MC"), aes(y=0.2, x=limits[8,2]), color="red") +
geom_point(data = subset(all_data, variable =="depth_cm_MC"), aes(y=0.3, x=limits[9,2]), color="blue") +
geom_point(data = subset(all_data, variable =="depth_cm_MC"), aes(y=0.3, x=limits[10,2]), color="blue") +
geom_point(data = subset(all_data, variable =="depth_cm_MC"), aes(y=0.3, x=limits[11,2]), color="blue") +
geom_point(data = subset(all_data, variable =="depth_cm_MC"), aes(y=0.3, x=limits[12,2]), color="blue") +
geom_point(data = subset(all_data, variable =="depth_cm_LOB"), aes(y=0.1, x=limits[1,1]), color="green") +
geom_point(data = subset(all_data, variable =="depth_cm_LOB"), aes(y=0.1, x=limits[2,1]), color="green") +
geom_point(data = subset(all_data, variable =="depth_cm_LOB"), aes(y=0.1, x=limits[3,1]), color="green") +
geom_point(data = subset(all_data, variable =="depth_cm_LOB"), aes(y=0.1, x=limits[4,1]), color="green") +
geom_point(data = subset(all_data, variable =="depth_cm_LOB"), aes(y=0.2, x=limits[5,1]), color="red") +
geom_point(data = subset(all_data, variable =="depth_cm_LOB"), aes(y=0.2, x=limits[6,1]), color="red") +
geom_point(data = subset(all_data, variable =="depth_cm_LOB"), aes(y=0.2, x=limits[7,1]), color="red") +
geom_point(data = subset(all_data, variable =="depth_cm_LOB"), aes(y=0.2, x=limits[8,1]), color="red") +
geom_point(data = subset(all_data, variable =="depth_cm_LOB"), aes(y=0.3, x=limits[9,1]), color="blue") +
geom_point(data = subset(all_data, variable =="depth_cm_LOB"), aes(y=0.3, x=limits[10,1]), color="blue") +
geom_point(data = subset(all_data, variable =="depth_cm_LOB"), aes(y=0.3, x=limits[11,1]), color="blue") +
geom_point(data = subset(all_data, variable =="depth_cm_LOB"), aes(y=0.3, x=limits[12,1]), color="blue") +
geom_point(data = subset(all_data, variable =="depth_cm_ROB"), aes(y=0.1, x=limits[1,3]), color="green") +
geom_point(data = subset(all_data, variable =="depth_cm_ROB"), aes(y=0.1, x=limits[2,3]), color="green") +
geom_point(data = subset(all_data, variable =="depth_cm_ROB"), aes(y=0.1, x=limits[3,3]), color="green") +
geom_point(data = subset(all_data, variable =="depth_cm_ROB"), aes(y=0.1, x=limits[4,3]), color="green") +
geom_point(data = subset(all_data, variable =="depth_cm_ROB"), aes(y=0.2, x=limits[5,3]), color="red") +
geom_point(data = subset(all_data, variable =="depth_cm_ROB"), aes(y=0.2, x=limits[6,3]), color="red") +
geom_point(data = subset(all_data, variable =="depth_cm_ROB"), aes(y=0.2, x=limits[7,3]), color="red") +
geom_point(data = subset(all_data, variable =="depth_cm_ROB"), aes(y=0.2, x=limits[8,3]), color="red") +
geom_point(data = subset(all_data, variable =="depth_cm_ROB"), aes(y=0.3, x=limits[9,3]), color="blue") +
geom_point(data = subset(all_data, variable =="depth_cm_ROB"), aes(y=0.3, x=limits[10,3]), color="blue") +
geom_point(data = subset(all_data, variable =="depth_cm_ROB"), aes(y=0.3, x=limits[11,3]), color="blue") +
geom_point(data = subset(all_data, variable =="depth_cm_ROB"), aes(y=0.3, x=limits[12,3]), color="blue") +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(), legend.position = "none") +
labs(title = paste(NodeName, ": Adult/Depth: Probability ~ Q", sep=""),
y = "Probability",
x = "Q (cfs)") #+ theme_bw(base_size = 15)
dev.off()
## percentage time
melt_time<-reshape2::melt(time_statsx, id=c("season", "position", "water_year", "Node"))
melt_time <- melt_time %>%
rename( Probability_Threshold = variable) %>%
mutate(Species ="SAS", Life_Stage = "Adult", Hydraulic = "Depth", Node = NodeName)
write.csv(melt_time, paste("output_data/F1_", NodeName, "_SAS_adult_depth_time_stats.csv", sep=""))
### days per month
days_data <- select(days_data, c(Q, month, water_year, day, ID01, Low, ID02, Medium, ID03, High, position, DateTime, node) )# all probs
melt_data<-reshape2::melt(days_data, id=c("ID01", "ID02", "ID03", "day", "month", "water_year", "Q", "position", "node"))
melt_data <- rename(melt_data, Probability_Threshold = variable,
consec_hours = value)
## count how many full days i.e. 24 hours
total_days01 <- melt_data %>%
filter(Probability_Threshold == "Low") %>%
group_by(ID01, day, month, water_year, position) %>%
summarise(n_hours = max(consec_hours)) %>%
mutate(n_days_low = ifelse(n_hours >= 24, 1, 0)) # %>%
## count the number of days in each month
total_days_per_month01 <- total_days01 %>%
group_by(month, water_year, position) %>%
summarise(days_per_month_low = sum(n_days_low))
total_days02 <- melt_data %>%
filter(Probability_Threshold == "Medium") %>%
group_by(ID02, day, month, water_year, position) %>%
summarise(n_hours = max(consec_hours)) %>%
mutate(n_days_medium = ifelse(n_hours >= 24, 1, 0)) # %>%
total_days_per_month02 <- total_days02 %>%
group_by(month, water_year, position) %>%
summarise(days_per_month_medium = sum(n_days_medium))
# total_days_per_month02
total_days03 <- melt_data %>%
filter(Probability_Threshold == "High") %>%
group_by(ID03, day, month, water_year, position) %>%
summarise(n_hours = max(consec_hours)) %>%
mutate(n_days_high = ifelse(n_hours >= 24, 1, 0)) # %>%
total_days_per_month03 <- total_days03 %>%
group_by(month, water_year, position) %>%
summarise(days_per_month_high = sum(n_days_high))
## combine all thresholds
total_days <- cbind( total_days_per_month01,total_days_per_month02[,4], total_days_per_month03[,4])
# # create year_month column
total_days <- ungroup(total_days) %>%
unite(month_year, water_year:month, sep="-", remove=F) %>%
mutate(Node= paste(NodeName)) #%>%
## convert month year to date format
total_days$month_year <- zoo::as.yearmon(total_days$month_year)
total_days$month_year <- as.Date(total_days$month_year)
## change names of columns
total_days <- rename(total_days, Low = days_per_month_low, Medium = days_per_month_medium, High = days_per_month_high)
## define seasons
winter <- c(1,2,3,4,11,12) ## winter months
summer <- c(5:10) ## summer months
total_days <- total_days %>%
mutate(season = ifelse(month %in% winter, "winter", "summer") )
# ## melt data
melt_days<-reshape2::melt(total_days, id=c("month_year", "water_year", "month", "season", "position", "Node"))
melt_days <- melt_days %>%
rename(Probability_Threshold = variable, n_days = value) %>%
mutate(Species ="SAS", Life_Stage = "Adult", Hydraulic = "Depth")
## save df
write.csv(melt_days, paste("output_data/F1_", NodeName, "_SAS_adult_depth_total_days_long.csv", sep="") )
} ## end 1st loop
# Velocity ----------------------------------------------------------------
## upload habitat curve data
fitdata <- read.csv("output_data/adult_velocity_prob_curve_data.csv")
## upload hydraulic data
setwd("input_data/HecRas")
h <- list.files(pattern="hydraulic")
length(h) ## 18
## set wd back to main
setwd("/Users/katieirving/Documents/git/flow_eco_mech")
n=1
for(n in 1: length(h)) {
NodeData <- read.csv(file=paste("input_data/HecRas/", h[n], sep=""))
F34D <- read.csv("input_data/HecRas/hydraulic_ts_F34D.csv") ## for dates
## format hydraulic data
NodeData <- NodeData %>%
mutate(Q_ts.datetime = F34D$Q_ts.datetime)
hydraul <-NodeData[,-1]
## change some names
hydraul <- hydraul %>%
rename(DateTime = Q_ts.datetime, node = Gage, Q = Flow)
## define node name
NodeName <- unique(hydraul$node)
## convert units and change names
hyd_vel <- hydraul %>%
mutate(depth_cm_LOB = (Hydr..Depth..ft..LOB*0.3048)*100,
depth_cm_MC = (Hydr..Depth..ft..MC*0.3048)*100,
depth_cm_ROB = (Hydr..Depth..ft..ROB*0.3048)*100) %>%
mutate(shear_pa_LOB = (Shear..lb.sq.ft..LOB/0.020885),
shear_pa_MC = (Shear..lb.sq.ft..MC/0.020885),
shear_pa_ROB = (Shear..lb.sq.ft..ROB/0.020885)) %>%
mutate(sp_w_LOB = (Shear..lb.sq.ft..LOB*4.44822)/0.3048,
sp_w_MC = (Shear..lb.sq.ft..MC*4.44822)/0.3048,
sp_w_ROB = (Shear..lb.sq.ft..ROB*4.44822)/0.3048) %>%
mutate(vel_m_LOB = (Avg..Vel...ft.s..LOB*0.3048),
vel_m_MC = (Avg..Vel...ft.s..MC*0.3048),
vel_m_ROB = (Avg..Vel...ft.s..ROB*0.3048)) %>%
select(-contains("ft")) %>%
mutate(date_num = seq(1,length(DateTime), 1))
## take only depth variable
hyd_vel <- hyd_vel %>% select(DateTime, node, Q, contains("vel"), date_num)
# ## melt channel position data
hyd_vel<-reshape2::melt(hyd_vel, id=c("DateTime","Q", "node", "date_num"))
labels <- c(vel_m_LOB = "Left Over Bank", vel_m_MC = "Main Channel", vel_m_ROB = "Right Over Bank")
### node figure for depth ~ Q
file_name <- paste("figures/Application_curves/nodes/", NodeName, "_Velocity_Q.png", sep="")
png(file_name, width = 500, height = 600)
ggplot(hyd_vel, aes(x = Q, y=value)) +
geom_line(aes( group = variable, lty = variable)) +
scale_linetype_manual(values= c("dotted", "solid", "dashed"),
breaks=c("vel_m_LOB", "vel_m_MC", "vel_m_ROB"))+
facet_wrap(~variable, scales="free_x", nrow=3, labeller=labeller(variable = labels)) +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(), legend.position = "none") +
labs(title = paste(NodeName, ": Velocity ~ Q"),
y = "Velocity (m/s)",
x = "Q (cfs)") #+ theme_bw(base_size = 15)
dev.off()
## change NAs to 0 in concrete overbanks
hyd_vel[is.na(hyd_vel)] <- 0
## use smooth spline to predict on new data set
new_values <-smooth.spline(fitdata$velocity_fit, fitdata$prob_fit)
all_data <- hyd_vel %>%
group_by(variable) %>%
mutate(prob_fit = predict(new_values, value)$y) %>%
rename(vel_m = value)
## save out
save(all_data, file=paste("output_data/F1_", NodeName, "_SAS_adult_velocity_discharge_probability.RData", sep=""))
# format probability time series ------------------------------------------
## look at data using lubridate etc
## format date time
all_data$DateTime<-as.POSIXct(all_data$DateTime,
format = "%Y-%m-%d %H:%M",
tz = "GMT")
## create year, month, day and hour columns and add water year
all_data <- all_data %>%
mutate(month = month(DateTime)) %>%
mutate(year = year(DateTime)) %>%
mutate(day = day(DateTime)) %>%
mutate(hour = hour(DateTime)) %>%
mutate(water_year = ifelse(month == 10 | month == 11 | month == 12, year, year-1))
save(all_data, file=paste("output_data/F1_", NodeName, "_SAS_velocity_adult_discharge_probs_2010_2017_TS.RData", sep=""))
### define dataframes for 2nd loop
## Q Limits
limits <- as.data.frame(matrix(ncol=3, nrow=12)) %>%
rename(LOB = V1, MC = V2, ROB = V3)
rownames(limits)<-c("Low_Prob_1", "Low_Prob_2", "Low_Prob_3", "Low_Prob_4",
"Med_Prob_1", "Med_Prob_2", "Med_Prob_3", "Med_Prob_4",
"High_Prob_1", "High_Prob_2", "High_Prob_3", "High_Prob_4")
time_statsx <- NULL
days_data <- NULL
## define positions
positions <- unique(all_data$variable)
# probability as a function of discharge -----------------------------------
for(p in 1:length(positions)) {
new_data <- all_data %>%
filter(variable == positions[p])
## define position
PositionName <- str_split(positions[p], "_", 3)[[1]]
PositionName <- PositionName[3]
## bind shallow and deeper depths by 0.1 - 10cm & 120cm
## change all prob_fit lower than 0.1 to 0.1
peak <- new_data %>%
filter(prob_fit == max(prob_fit)) #%>%
peakQ <- max(peak$Q)
min_limit <- filter(new_data, vel_m >0)
min_limit <- min(min_limit$Q)
## Main channel curves
## find roots for each probability
newx1a <- RootLinearInterpolant(new_data$Q, new_data$prob_fit, 0.1)
if(length(newx1a) > 4) {
newx1a <- c(newx1a[1], newx1aR[length(newx1a)])
} else {
newx1a <- newx1a
}
newx2a <- RootLinearInterpolant(new_data$Q, new_data$prob_fit, 0.2)
if(length(newx2a) > 4) {
newx2a <- c(newx2a[1], newx2a[length(newx2a)])
} else {
newx2a <- newx2a
}
newx3a <- RootLinearInterpolant(new_data$Q, new_data$prob_fit, 0.3)
if(min(new_data$prob_fit)>0.3) {
newx3a <- min(new_data$Q)
} else {
newx3a <- newx3a
}
if(length(newx3a) > 4) {
newx3a <- c(newx3a[1], newx3a[length(newx3a)])
} else {
newx3a <- newx3a
}
## MAKE DF OF Q LIMITS
limits[,p] <- c(newx1a[1], newx1a[2],newx1a[3], newx1a[4],
newx2a[1], newx2a[2],newx2a[3], newx2a[4],
newx3a[1], newx3a[2],newx3a[3],newx3a[4])
# create year_month column
new_datax <- new_data %>% unite(month_year, c(water_year,month), sep="-", remove=F)
# dataframe for stats -----------------------------------------------------
## define critical period or season for adult as all year is critical
winter <- c(1,2,3,4,11,12) ## winter months
summer <- c(5:10) ## summer months
new_datax <- new_datax %>%
mutate(season = ifelse(month %in% winter, "winter", "summer") )
## define equation for roots
## produces percentage of time for each year and season within year for each threshold
## Main channel curves
low_thresh <- expression_Q(newx1a, peakQ)
low_thresh <-as.expression(do.call("substitute", list(low_thresh[[1]], list(limit = as.name("newx1a")))))
med_thresh <- expression_Q(newx2a, peakQ)
med_thresh <-as.expression(do.call("substitute", list(med_thresh[[1]], list(limit = as.name("newx2a")))))
high_thresh <- expression_Q(newx3a, peakQ)
high_thresh <-as.expression(do.call("substitute", list(high_thresh[[1]], list(limit = as.name("newx3a")))))
###### calculate amount of time
time_stats <- new_datax %>%
dplyr::group_by(water_year) %>%
dplyr::mutate(Low = sum(eval(low_thresh))/length(DateTime)*100) %>%
dplyr::mutate(Medium = sum(eval(med_thresh))/length(DateTime)*100) %>%
dplyr::mutate(High = sum(eval(high_thresh))/length(DateTime)*100) %>%
ungroup() %>%
dplyr::group_by(water_year, season) %>%
dplyr::mutate(Low.Seasonal = sum(eval(low_thresh))/length(DateTime)*100) %>%
dplyr::mutate(Medium.Seasonal = sum(eval(med_thresh))/length(DateTime)*100) %>%
dplyr::mutate(High.Seasonal = sum(eval(high_thresh))/length(DateTime)*100) %>%
distinct(water_year, Low , Medium , High , Low.Seasonal, Medium.Seasonal, High.Seasonal) %>%
mutate(position= paste(PositionName), Node = NodeName)
time_statsx <- rbind(time_statsx, time_stats)
### count days per month
new_datax <- new_datax %>%
ungroup() %>%
group_by(month, day, water_year, ID01 = data.table::rleid(eval(low_thresh))) %>%
mutate(Low = if_else(eval(low_thresh), row_number(), 0L)) %>%
ungroup() %>%
group_by(month, day, water_year, ID02 = data.table::rleid(eval(med_thresh))) %>%
mutate(Medium = if_else(eval(med_thresh), row_number(), 0L)) %>%
ungroup() %>%
group_by(month, day, water_year, ID03 = data.table::rleid(eval(high_thresh))) %>%
mutate(High = if_else(eval(high_thresh), row_number(), 0L)) %>%
mutate(position= paste(PositionName)) #%>%
# select(Q, month, water_year, day, ID01, Low, ID02, Medium, ID03, High, position, DateTime, node)
days_data <- rbind(days_data, new_datax)
} ## end 2nd loop
## limits
## note that 0.1 upper/lower limit is max/min Q to adhere to 0.1 bound
limits <- limits %>%
mutate(Species ="SAS", Life_Stage = "Adult", Hydraulic = "Velocity", Node = NodeName)
write.csv(limits, paste("output_data/F1_",NodeName,"_SAS_adult_velocity_Q_limits.csv", sep=""))
## plot thresholds
file_name = paste("figures/Application_curves/Velocity/", NodeName, "_adult_depth_prob_Q_thresholds.png", sep ="")
png(file_name, width = 500, height = 600)
ggplot(all_data, aes(x = Q, y=prob_fit)) +
geom_line(aes(group = variable, lty = variable)) +
scale_linetype_manual(values= c("dotted", "solid", "dashed"))+
# name="Cross\nSection\nPosition",
# breaks=c("depth_cm_LOB", "depth_cm_MC", "depth_cm_ROB"),
# labels = c("LOB", "MC", "ROB")) +
facet_wrap(~variable, scales="free_x", nrow=3, labeller=labeller(variable = labels)) +
geom_point(data = subset(all_data, variable =="vel_m_MC"), aes(y=0.1, x=limits[1,2]), color="green") +
geom_point(data = subset(all_data, variable =="vel_m_MC"), aes(y=0.1, x=limits[2,2]), color="green") +
geom_point(data = subset(all_data, variable =="vel_m_MC"), aes(y=0.1, x=limits[3,2]), color="green") +
geom_point(data = subset(all_data, variable =="vel_m_MC"), aes(y=0.1, x=limits[4,2]), color="green") +
geom_point(data = subset(all_data, variable =="vel_m_MC"), aes(y=0.2, x=limits[5,2]), color="red") +
geom_point(data = subset(all_data, variable =="vel_m_MC"), aes(y=0.2, x=limits[6,2]), color="red") +
geom_point(data = subset(all_data, variable =="vel_m_MC"), aes(y=0.2, x=limits[7,2]), color="red") +
geom_point(data = subset(all_data, variable =="vel_m_MC"), aes(y=0.2, x=limits[8,2]), color="red") +
geom_point(data = subset(all_data, variable =="vel_m_MC"), aes(y=0.3, x=limits[9,2]), color="blue") +
geom_point(data = subset(all_data, variable =="vel_m_MC"), aes(y=0.3, x=limits[10,2]), color="blue") +
geom_point(data = subset(all_data, variable =="vel_m_MC"), aes(y=0.3, x=limits[11,2]), color="blue") +
geom_point(data = subset(all_data, variable =="vel_m_MC"), aes(y=0.3, x=limits[12,2]), color="blue") +
geom_point(data = subset(all_data, variable =="vel_m_LOB"), aes(y=0.1, x=limits[1,1]), color="green") +
geom_point(data = subset(all_data, variable =="vel_m_LOB"), aes(y=0.1, x=limits[2,1]), color="green") +
geom_point(data = subset(all_data, variable =="vel_m_LOB"), aes(y=0.1, x=limits[3,1]), color="green") +
geom_point(data = subset(all_data, variable =="vel_m_LOB"), aes(y=0.1, x=limits[4,1]), color="green") +
geom_point(data = subset(all_data, variable =="vel_m_LOB"), aes(y=0.2, x=limits[5,1]), color="red") +
geom_point(data = subset(all_data, variable =="vel_m_LOB"), aes(y=0.2, x=limits[6,1]), color="red") +
geom_point(data = subset(all_data, variable =="vel_m_LOB"), aes(y=0.2, x=limits[7,1]), color="red") +
geom_point(data = subset(all_data, variable =="vel_m_LOB"), aes(y=0.2, x=limits[8,1]), color="red") +
geom_point(data = subset(all_data, variable =="vel_m_LOB"), aes(y=0.3, x=limits[9,1]), color="blue") +
geom_point(data = subset(all_data, variable =="vel_m_LOB"), aes(y=0.3, x=limits[10,1]), color="blue") +
geom_point(data = subset(all_data, variable =="vel_m_LOB"), aes(y=0.3, x=limits[11,1]), color="blue") +
geom_point(data = subset(all_data, variable =="vel_m_LOB"), aes(y=0.3, x=limits[12,1]), color="blue") +
geom_point(data = subset(all_data, variable =="vel_m_ROB"), aes(y=0.1, x=limits[1,3]), color="green") +
geom_point(data = subset(all_data, variable =="vel_m_ROB"), aes(y=0.1, x=limits[2,3]), color="green") +
geom_point(data = subset(all_data, variable =="vel_m_ROB"), aes(y=0.1, x=limits[3,3]), color="green") +
geom_point(data = subset(all_data, variable =="vel_m_ROB"), aes(y=0.1, x=limits[4,3]), color="green") +
geom_point(data = subset(all_data, variable =="vel_m_ROB"), aes(y=0.2, x=limits[5,3]), color="red") +
geom_point(data = subset(all_data, variable =="vel_m_ROB"), aes(y=0.2, x=limits[6,3]), color="red") +
geom_point(data = subset(all_data, variable =="vel_m_ROB"), aes(y=0.2, x=limits[7,3]), color="red") +
geom_point(data = subset(all_data, variable =="vel_m_ROB"), aes(y=0.2, x=limits[8,3]), color="red") +
geom_point(data = subset(all_data, variable =="vel_m_ROB"), aes(y=0.3, x=limits[9,3]), color="blue") +
geom_point(data = subset(all_data, variable =="vel_m_ROB"), aes(y=0.3, x=limits[10,3]), color="blue") +
geom_point(data = subset(all_data, variable =="vel_m_ROB"), aes(y=0.3, x=limits[11,3]), color="blue") +
geom_point(data = subset(all_data, variable =="vel_m_ROB"), aes(y=0.3, x=limits[12,3]), color="blue") +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(), legend.position = "none") +
labs(title = paste(NodeName, ": Adult/Velocity: Probability ~ Q", sep=""),
y = "Probability",
x = "Q (cfs)") #+ theme_bw(base_size = 15)
dev.off()
## percentage time
melt_time<-reshape2::melt(time_statsx, id=c("season", "position", "water_year", "Node"))
melt_time <- melt_time %>%
rename( Probability_Threshold = variable) %>%
mutate(Species ="SAS", Life_Stage = "Adult", Hydraulic = "Velocity", Node = NodeName)
write.csv(melt_time, paste("output_data/F1_", NodeName, "_SAS_adult_velocity_time_stats.csv", sep=""))
### days per month
days_data <- select(days_data, c(Q, month, water_year, day, ID01, Low, ID02, Medium, ID03, High, position, DateTime, node) )# all probs
melt_data<-reshape2::melt(days_data, id=c("ID01", "ID02", "ID03", "day", "month", "water_year", "Q", "position", "node"))
melt_data <- rename(melt_data, Probability_Threshold = variable,
consec_hours = value)
## count how many full days i.e. 24 hours
total_days01 <- melt_data %>%
filter(Probability_Threshold == "Low") %>%
group_by(ID01, day, month, water_year, position) %>%
summarise(n_hours = max(consec_hours)) %>%
mutate(n_days_low = ifelse(n_hours >= 24, 1, 0)) # %>%
## count the number of days in each month
total_days_per_month01 <- total_days01 %>%
group_by(month, water_year, position) %>%
summarise(days_per_month_low = sum(n_days_low))
total_days02 <- melt_data %>%
filter(Probability_Threshold == "Medium") %>%
group_by(ID02, day, month, water_year, position) %>%
summarise(n_hours = max(consec_hours)) %>%
mutate(n_days_medium = ifelse(n_hours >= 24, 1, 0)) # %>%
total_days_per_month02 <- total_days02 %>%
group_by(month, water_year, position) %>%
summarise(days_per_month_medium = sum(n_days_medium))
# total_days_per_month02
total_days03 <- melt_data %>%
filter(Probability_Threshold == "High") %>%
group_by(ID03, day, month, water_year, position) %>%
summarise(n_hours = max(consec_hours)) %>%
mutate(n_days_high = ifelse(n_hours >= 24, 1, 0)) # %>%
total_days_per_month03 <- total_days03 %>%
group_by(month, water_year, position) %>%
summarise(days_per_month_high = sum(n_days_high))
## combine all thresholds
total_days <- cbind( total_days_per_month01,total_days_per_month02[,4], total_days_per_month03[,4])
# # create year_month column
total_days <- ungroup(total_days) %>%
unite(month_year, water_year:month, sep="-", remove=F) %>%
mutate(Node= paste(NodeName)) #%>%
## convert month year to date format
total_days$month_year <- zoo::as.yearmon(total_days$month_year)
total_days$month_year <- as.Date(total_days$month_year)
## change names of columns
total_days <- rename(total_days, Low = days_per_month_low, Medium = days_per_month_medium, High = days_per_month_high)
## define seasons
winter <- c(1,2,3,4,11,12) ## winter months
summer <- c(5:10) ## summer months
total_days <- total_days %>%
mutate(season = ifelse(month %in% winter, "winter", "summer") )
# ## melt data
melt_days<-reshape2::melt(total_days, id=c("month_year", "water_year", "month", "season", "position", "Node"))
melt_days <- melt_days %>%
rename(Probability_Threshold = variable, n_days = value) %>%
mutate(Species ="SAS", Life_Stage = "Adult", Hydraulic = "Velocity")
## save df
write.csv(melt_days, paste("output_data/F1_", NodeName, "_SAS_adult_velocity_total_days_long.csv", sep="") )
} ## end 1st loop
|
tabItem(
tabName = 'data_update',
fluidRow(
column(12, h3('Inserção de dados de negociação'))
),
fluidRow(
column(6, dateRangeInput("dt_range", "Período para inserção", start=add.bizdays(dates=Sys.Date(), n=-1),
end=add.bizdays(dates=Sys.Date(), n=-1)))
),
fluidRow(
column(4, actionButton("run_update_neg", "Baixar arquivos e atualizar banco"))
),
fluidRow(
column(6, htmlOutput('execution_log_neg'))
),
fluidRow(
column(12, h3('Inserção de dados de empresas'))
),
fluidRow(
column(4, actionButton("run_update_corp", "Obter dados e atualizar banco"))
),
fluidRow(
column(6, htmlOutput('execution_log_corp'))
),
fluidRow(
column(12, h3('Alteração de dados'))
),
fluidRow(
column(3, textInput("from_cpy", "Nome da empresa a ser alterada")),
column(3, textInput("to_cpy", "Nome novo")),
column(3, br(), actionButton("run_update_cpy", "Alterar empresa"))
),
fluidRow(
column(6, htmlOutput('execution_log_updt'))
)
) | /tabs/data_update.R | no_license | ogaw4/labbd_fase3 | R | false | false | 1,048 | r |
tabItem(
tabName = 'data_update',
fluidRow(
column(12, h3('Inserção de dados de negociação'))
),
fluidRow(
column(6, dateRangeInput("dt_range", "Período para inserção", start=add.bizdays(dates=Sys.Date(), n=-1),
end=add.bizdays(dates=Sys.Date(), n=-1)))
),
fluidRow(
column(4, actionButton("run_update_neg", "Baixar arquivos e atualizar banco"))
),
fluidRow(
column(6, htmlOutput('execution_log_neg'))
),
fluidRow(
column(12, h3('Inserção de dados de empresas'))
),
fluidRow(
column(4, actionButton("run_update_corp", "Obter dados e atualizar banco"))
),
fluidRow(
column(6, htmlOutput('execution_log_corp'))
),
fluidRow(
column(12, h3('Alteração de dados'))
),
fluidRow(
column(3, textInput("from_cpy", "Nome da empresa a ser alterada")),
column(3, textInput("to_cpy", "Nome novo")),
column(3, br(), actionButton("run_update_cpy", "Alterar empresa"))
),
fluidRow(
column(6, htmlOutput('execution_log_updt'))
)
) |
# Exploratory data analysis : nature of covariates and distribution of response, presence of outliers
# or missing values, range of variables.
source("main.R") # get the function plot.Country
library(gam) # not default lib in my current version of R
library(ggplot2)
library(dyn)
DeathsByCountry <- unlist(readRDS("DeathsByCountry.rds")[[1]])
CasesByCountry <- unlist(readRDS("CasesByCountry.rds")[[1]])
CountryPop <- unlist(readRDS("CountryPop.rds")[[1]])
CountryNames <- readRDS("CountryNames.rds")
# Explore some patterns in Asian, in specific in Kuwait,Saudi_Arabia and United_Arab_Emirates
#Europe
par(mar=c(1,1,1,1))
deaths.cases.Continent("Europe")
par(mfrow=c(1,3))
plot.Country("Kuwait",names=CountryNames, deaths=DeathsByCountry, cases=CasesByCountry, pop=CountryPop,plot=T,plot.cumul = T,xmin=50)
plot.Country("Saudi_Arabia",names=CountryNames, deaths=DeathsByCountry, cases=CasesByCountry, pop=CountryPop, plot=T,plot.cumul=T,xmin=50)
plot.Country("United_Arab_Emirates", names=CountryNames, deaths=DeathsByCountry, cases=CasesByCountry, pop=CountryPop, plot=T,plot.cumul=T,xmin=50)
#plots Asia
plot.all.countries.Continent("Asia")
dist.deaths.cases.Continent("Asia")
###UAE
kuw_d <- DeathsByCountry["Kuwait",]
kuw_c<- CasesByCountry["Kuwait",]
par(mfrow=c(1,2))
hist(kuw_c,main = "Kuwait cases density",breaks=15,xlab = "New cases per day")
abline(v = mean(kuw_c), col = "blue", lwd = 2)
hist(kuw_d,main="Kuwait deaths densitiy",breaks = 15,xlab="Deaths per day")
abline(v = mean(kuw_d), col = "blue", lwd = 2)
par(mfrow=c(1,2))
boxplot(kuw_c,main= "Kuwait",ylab="Cases per day")
boxplot(kuw_d,main= "Kuwait",ylab="Death per day")
hist(log(1+kuw_c),main = "Kuwait log cases density",breaks=15,xlab = "New cases per day(log)")
abline(v = mean(log(1+kuw_c)), col = "blue", lwd = 2)
hist(log(1+kuw_d),main="Kuwait log deaths densitiy",breaks = 15,xlab="Deaths per day(log)")
abline(v = mean(log(1+kuw_d)), col = "blue", lwd = 2)
par(mfrow=c(1,2))
boxplot(log(1+kuw_c),main= "Kuwait",ylab="Cases per day(log)")
boxplot(log(1+kuw_d),main= "Kuwait",ylab="Death per day(log)")
###Outliers, missing values
tail(sort(kuw_d))#8,9,9,9,9,10
tail(sort(kuw_c))#958,965,973,975,977,987
#range of variables
range(kuw_d) #range of deaths per day 0-10
range(kuw_c) #range of cases per day 0-987
###Saudi_Arabia
sau_d <- DeathsByCountry["Saudi_Arabia",]
sau_c<- CasesByCountry["Saudi_Arabia",]
par(mfrow=c(1,2))
hist(sau_c,main = "Saudi_Arabia cases density",breaks=15,xlab = "New cases per day")
abline(v = mean(sau_c), col = "blue", lwd = 2)
hist(sau_d,main="Saudi_Arabia deaths densitiy",breaks = 15,xlab="Deaths per day")
abline(v = mean(sau_d), col = "blue", lwd = 2)
par(mfrow=c(1,2))
boxplot(uae_c,main= "Saudi_Arabia",ylab="Cases per day")
boxplot(uae_d,main= "Saudi_Arabia",ylab="Death per day")
hist(log(1+sau_c),main = "Saudi_Arabia log cases density",breaks=15,xlab = "New cases per day(log)")
abline(v = mean(log(1+sau_c)), col = "blue", lwd = 2)
hist(log(1+sau_d),main="Saudi_Arabia log deaths densitiy",breaks = 15,xlab="Deaths per day(log)")
abline(v = mean(log(1+sau_d)), col = "blue", lwd = 2)
par(mfrow=c(1,2))
boxplot(log(1+sau_c),main= "Saudi_Arabia",ylab="Cases per day(log)")
boxplot(log(1+sau_d),main= "Saudi_Arabia",ylab="Death per day(log)")
###Outliers, missing values
tail(sort(sau_d))#13,14,16,18,21,23
tail(sort(sau_c))#2577,2598,2613,2628,2723
par(mfrow=c(1,3))
###range of variables
range(sau_d) #range of deaths per day 0-23
range(sau_c) #range of cases per day 0-2723
###UAE
uae_d <- DeathsByCountry["United_Arab_Emirates",]
uae_c<- CasesByCountry["United_Arab_Emirates",]
par(mfrow=c(1,2))
hist(uae_c,main = "UAE cases density",breaks=15,xlab = "New cases per day")
abline(v = mean(uae_c), col = "blue", lwd = 2)
hist(uae_d,main="UAE deaths densitiy",breaks = 15,xlab="Deaths per day")
abline(v = mean(uae_d), col = "blue", lwd = 2)
par(mfrow=c(1,2))
boxplot(uae_c,main= "UAE",ylab="Cases per day")
boxplot(uae_d,main= "UAE",ylab="Death per day")
hist(log(1+uae_c),main = "UAE log cases density",breaks=15,xlab = "New cases per day(log)")
abline(v = mean(log(1+uae_c)), col = "blue", lwd = 2)
hist(log(1+uae_d),main="UAE log deaths densitiy",breaks = 15,xlab="Deaths per day(log)")
abline(v = mean(log(1+uae_d)), col = "blue", lwd = 2)
par(mfrow=c(1,2))
boxplot(log(1+uae_c),main= "UAE",ylab="Cases per day(log)")
boxplot(log(1+uae_d),main= "UAE",ylab="Death per day(log)")
###Outliers, missing values
tail(sort(uae_d))#9,9,9,9,10,11
tail(sort(uae_c))#828,862,882,900,903,943
par(mfrow=c(1,3))
plot.Country("United_Arab_Emirates",names=CountryNames, deaths=DeathsByCountry, cases=CasesByCountry, pop=CountryPop,plot=T,plot.cumul = T,xmin=50)
###range of variables
range(uae_d) #range of deaths per day 0-11
range(uae_c) #range of cases per day 0-943
| /archive/EDA-Mateo.R | no_license | baohien97/EPFL-ModReg | R | false | false | 4,792 | r | # Exploratory data analysis : nature of covariates and distribution of response, presence of outliers
# or missing values, range of variables.
source("main.R") # get the function plot.Country
library(gam) # not default lib in my current version of R
library(ggplot2)
library(dyn)
DeathsByCountry <- unlist(readRDS("DeathsByCountry.rds")[[1]])
CasesByCountry <- unlist(readRDS("CasesByCountry.rds")[[1]])
CountryPop <- unlist(readRDS("CountryPop.rds")[[1]])
CountryNames <- readRDS("CountryNames.rds")
# Explore some patterns in Asian, in specific in Kuwait,Saudi_Arabia and United_Arab_Emirates
#Europe
par(mar=c(1,1,1,1))
deaths.cases.Continent("Europe")
par(mfrow=c(1,3))
plot.Country("Kuwait",names=CountryNames, deaths=DeathsByCountry, cases=CasesByCountry, pop=CountryPop,plot=T,plot.cumul = T,xmin=50)
plot.Country("Saudi_Arabia",names=CountryNames, deaths=DeathsByCountry, cases=CasesByCountry, pop=CountryPop, plot=T,plot.cumul=T,xmin=50)
plot.Country("United_Arab_Emirates", names=CountryNames, deaths=DeathsByCountry, cases=CasesByCountry, pop=CountryPop, plot=T,plot.cumul=T,xmin=50)
#plots Asia
plot.all.countries.Continent("Asia")
dist.deaths.cases.Continent("Asia")
###UAE
kuw_d <- DeathsByCountry["Kuwait",]
kuw_c<- CasesByCountry["Kuwait",]
par(mfrow=c(1,2))
hist(kuw_c,main = "Kuwait cases density",breaks=15,xlab = "New cases per day")
abline(v = mean(kuw_c), col = "blue", lwd = 2)
hist(kuw_d,main="Kuwait deaths densitiy",breaks = 15,xlab="Deaths per day")
abline(v = mean(kuw_d), col = "blue", lwd = 2)
par(mfrow=c(1,2))
boxplot(kuw_c,main= "Kuwait",ylab="Cases per day")
boxplot(kuw_d,main= "Kuwait",ylab="Death per day")
hist(log(1+kuw_c),main = "Kuwait log cases density",breaks=15,xlab = "New cases per day(log)")
abline(v = mean(log(1+kuw_c)), col = "blue", lwd = 2)
hist(log(1+kuw_d),main="Kuwait log deaths densitiy",breaks = 15,xlab="Deaths per day(log)")
abline(v = mean(log(1+kuw_d)), col = "blue", lwd = 2)
par(mfrow=c(1,2))
boxplot(log(1+kuw_c),main= "Kuwait",ylab="Cases per day(log)")
boxplot(log(1+kuw_d),main= "Kuwait",ylab="Death per day(log)")
###Outliers, missing values
tail(sort(kuw_d))#8,9,9,9,9,10
tail(sort(kuw_c))#958,965,973,975,977,987
#range of variables
range(kuw_d) #range of deaths per day 0-10
range(kuw_c) #range of cases per day 0-987
###Saudi_Arabia
sau_d <- DeathsByCountry["Saudi_Arabia",]
sau_c<- CasesByCountry["Saudi_Arabia",]
par(mfrow=c(1,2))
hist(sau_c,main = "Saudi_Arabia cases density",breaks=15,xlab = "New cases per day")
abline(v = mean(sau_c), col = "blue", lwd = 2)
hist(sau_d,main="Saudi_Arabia deaths densitiy",breaks = 15,xlab="Deaths per day")
abline(v = mean(sau_d), col = "blue", lwd = 2)
par(mfrow=c(1,2))
boxplot(uae_c,main= "Saudi_Arabia",ylab="Cases per day")
boxplot(uae_d,main= "Saudi_Arabia",ylab="Death per day")
hist(log(1+sau_c),main = "Saudi_Arabia log cases density",breaks=15,xlab = "New cases per day(log)")
abline(v = mean(log(1+sau_c)), col = "blue", lwd = 2)
hist(log(1+sau_d),main="Saudi_Arabia log deaths densitiy",breaks = 15,xlab="Deaths per day(log)")
abline(v = mean(log(1+sau_d)), col = "blue", lwd = 2)
par(mfrow=c(1,2))
boxplot(log(1+sau_c),main= "Saudi_Arabia",ylab="Cases per day(log)")
boxplot(log(1+sau_d),main= "Saudi_Arabia",ylab="Death per day(log)")
###Outliers, missing values
tail(sort(sau_d))#13,14,16,18,21,23
tail(sort(sau_c))#2577,2598,2613,2628,2723
par(mfrow=c(1,3))
###range of variables
range(sau_d) #range of deaths per day 0-23
range(sau_c) #range of cases per day 0-2723
###UAE
uae_d <- DeathsByCountry["United_Arab_Emirates",]
uae_c<- CasesByCountry["United_Arab_Emirates",]
par(mfrow=c(1,2))
hist(uae_c,main = "UAE cases density",breaks=15,xlab = "New cases per day")
abline(v = mean(uae_c), col = "blue", lwd = 2)
hist(uae_d,main="UAE deaths densitiy",breaks = 15,xlab="Deaths per day")
abline(v = mean(uae_d), col = "blue", lwd = 2)
par(mfrow=c(1,2))
boxplot(uae_c,main= "UAE",ylab="Cases per day")
boxplot(uae_d,main= "UAE",ylab="Death per day")
hist(log(1+uae_c),main = "UAE log cases density",breaks=15,xlab = "New cases per day(log)")
abline(v = mean(log(1+uae_c)), col = "blue", lwd = 2)
hist(log(1+uae_d),main="UAE log deaths densitiy",breaks = 15,xlab="Deaths per day(log)")
abline(v = mean(log(1+uae_d)), col = "blue", lwd = 2)
par(mfrow=c(1,2))
boxplot(log(1+uae_c),main= "UAE",ylab="Cases per day(log)")
boxplot(log(1+uae_d),main= "UAE",ylab="Death per day(log)")
###Outliers, missing values
tail(sort(uae_d))#9,9,9,9,10,11
tail(sort(uae_c))#828,862,882,900,903,943
par(mfrow=c(1,3))
plot.Country("United_Arab_Emirates",names=CountryNames, deaths=DeathsByCountry, cases=CasesByCountry, pop=CountryPop,plot=T,plot.cumul = T,xmin=50)
###range of variables
range(uae_d) #range of deaths per day 0-11
range(uae_c) #range of cases per day 0-943
|
Sys.setlocale(category = "LC_ALL", locale = "UTF-8")
library("plyr")
library("rvest")
library("dplyr")
library("ggplot2")
#### Load and format data ####
data.org <- read.csv(file = "dk_ft15_politician_responses.csv", header = TRUE) #Load the raw dataset
data <- unique(data.org) # Load a "working" dataset, while removing duplicate entries
## Map responses to Likert-scale-style numeric
for (i in 17:31){
data[,i] <- data[,i] %>%
gsub(x = ., pattern = "Helt enig", replacement = 5) %>%
gsub(x = ., pattern = "Delvist enig", replacement = 4) %>%
gsub(x = ., pattern = "Hverken enig eller uenig", replacement = 3) %>%
gsub(x = ., pattern = "Delvist uenig", replacement = 2) %>%
gsub(x = ., pattern = "Helt uenig", replacement = 1)
}
for (i in 17:31){
data[,i] <- as.numeric(data[,i]) #define as numeric
}
#Removing the double Kristian Andersen
# data <- data %>% # A candidate, Kristian Andersen, has several entries, these are removed. NOTE: This removes one candidate
# group_by(name) %>%
# filter(row_number() == 1 ) %>% # Method: data is grouped on name variable, and groups with >1 name are discarded
# ungroup()
## Create mapping of response variables,
# Use this to copy into code: -c(name, party, storkreds, lokalkreds, age, is.male,
# title, location, elected, votes.pers, votes.all, valgt.nr,
# stedfor.nr, opstillet.i.kreds.nr, nomineret.i.kreds.nr,
# ran.last.election)
## Create colormapping to use for later plotting
colormapping <- c(
"red",
"darkorchid4",
"lightgreen",
"hotpink",
"cyan1" ,
"grey" ,
"yellow" ,
"darkblue" ,
"orange" ,
"darkolivegreen4",
"lightgrey"
)
names(colormapping) <- unique(as.character(data$party)) # Naming the elements in the character vector,
# for ggplot2 to call later.
## Create partyname mapping to use for later plotting
namemapping <- c(
"Socialdemokratiet",
"Radikale",
"Konservative",
"SF",
"Liberal Alliance" ,
"Kristendemokraterne" ,
"Dansk Folkeparti" ,
"Venstre" ,
"Enhedslisten" ,
"Alternativet",
"Uden for partierne"
)
names(namemapping) <- unique(as.character(data$party)) # Naming the elements in the character vector,
# for ggplot2 to call later.
#### Data description ####
########## Mean responses
## -- Add mean response for each party, for each question -- ##
party.means <- data %>%
filter(party != 1) %>%
group_by(party) %>%
summarize_each(funs(mean), -c(name, party, storkreds, lokalkreds, age, is.male,
title, location, elected, votes.pers, votes.all, valgt.nr,
stedfor.nr, opstillet.i.kreds.nr, nomineret.i.kreds.nr,
ran.last.election))
## --- Plot average response to each question, by party --- #
# Construct labels with question text to be plotted
labels <- data.frame(
question = names(party.means[2:16]),
position.y = 16:2+0.5, # position is based on the plot below
position.x = rep(3, 15) # position is based on the plot below
)
# Build plot
p <- ggplot(data = party.means) #initiate plot
#Loop over each question, and plot the party means
for(i in 2:16){
p <- p +
geom_point(aes_string(
y = 18-i, # Split questions by y-coordinates for each question
x = paste("party.means$", names(party.means)[i], sep = ""), # Let party means be x-axis
fill = "party"
), colour = "black", alpha=0.8, shape = 21, size = 10 )
}
#Add questions
p <- p + geom_text(data = labels,
aes( y = position.y, x = position.x, label = question),
size = 3)
#Party colors
p <- p + scale_fill_manual ( values = colormapping )
#Titles and axis
p <- p +
theme_minimal() +
theme(axis.title.y = element_blank(),
axis.text.y = element_blank(),
axis.title.x = element_blank(),
panel.grid.minor=element_blank(),
legend.position="top") +
scale_y_continuous(breaks=seq(1, 16, 1)) +
scale_x_continuous(breaks=c(1,3,5),
labels=c("Highly disagree", "Neither agree nor disagree", "Highly agree"))+
ggtitle("Mean response to survey \n questions, by party")
p
## --- How close are parties to the middle? -------
#Calculate 'centerness' NOTE: Requires above code to have been run already, to create party.means
party.middle <- party.means
party.middle[,2:16] <- abs(party.middle[,2:16]-3) #Re-align around center (defining center = 0) and take absolutes
party.middle[,17] <- rowMeans(party.middle[,2:16]) #Compute averages
#simplify dataframe
party.middle <- party.middle %>%
select( party = party, mean.dist.from.center = V17) #Select only the two relevant variables
p <- ggplot(data = party.middle, aes( x = reorder(party, mean.dist.from.center),
y = mean.dist.from.center, fill = party)) +
geom_bar(stat = "identity",
color = "black"
) +
scale_fill_manual( values = colormapping) +
coord_flip() +
theme_minimal() +
ylab("Average distance from 'neither agree nor disagree',\n on 0-2 scale") +
xlab("")+
ggtitle("What parties have the most extreme opinions?")
p
#### Variance in responses -------------------------- NOTE: useless. Doesn't measure the right thing.
data.var <- data %>%
group_by(party) %>%
select(party, 17:31) %>%
summarize_each(
funs(mean)
)
#This calculates variance in responses by party (but it's a non-informative measure)
for (i in 1:nrow(data.var)){
data.var$party.std[i] <- sqrt(sum((data.var[i,2:16] - rep(mean(as.numeric(data.var[i,2:16])), 15))^2)/ (15 - 1) )
}
p <- ggplot( data = data.var, aes( x = reorder(party, party.std), y = party.std, fill = party) ) +
geom_bar(stat = "identity") +
scale_fill_manual(values = colormapping) +
coord_flip()+
theme_minimal() +
ylab("..") +
xlab("..")+
ggtitle("...")
p
p <- ggplot(data = data.var, aes( x = reorder(data.var, party.std),
y = party.std, fill = party)) +
geom_bar(stat = "identity",
color = "black" ) +
scale_fill_manual( values = colormapping) +
coord_flip() +
theme_minimal() +
ylab("..") +
xlab("..")+
ggtitle("...")
p
### Principal Component Analysis ----
pc <- princomp(data[,17:31], cor=TRUE, scores=TRUE)
data.pc <- data
data.pc[32:36] <- pc$scores[,1:5]
#Pretty Plot#
# data.pc = filter(data.pc, party!="1") #Filter away candidates outside the parties
p <- ggplot(data = data.pc, aes(x = data.pc[,32], y = data.pc[,33] )) +
geom_point(aes(fill = party), colour = "black", alpha=0.8, shape = 21, size = 10) +
scale_fill_manual(values = colormapping) +
theme_minimal()
p
## Let's try and divide the questions into two groups of questions:
#redistribution and value-based policy questions
#Splitting the dataset
redist <- data %>% select (1:16,18:19,23:24,26,29,31)
value <- data %>% select (1:17, 20:22,25, 27:28,30)
##Do PCA analysis on both subsets and restore 5 first components
pc1 <- princomp(redist[,17:23], cor = T, scores = T)
redist[24:28] <- pc1$scores[,1:5]
pc2 <- princomp(value[,17:24], cor = T, scores = T)
value[25:29] <- pc2$scores[,1:5]
##Compute summary statistics on components
summary(princomp(redist[,17:23], loadings = T ))
summary(princomp(value[,17:24], loadings = T ))
##Add the first component from each subset to original data in order to plot in same plot
data.pc[37] <- pc1$scores[,1]
data.pc[38] <- pc2$scores[,1]
##The PCA - using first component from each subset analysis
p <- ggplot(data.pc, aes(x = data.pc[,37], y=data.pc[,38])) +
geom_point(aes(fill = party), colour = "black", alpha=0.8, shape = 21, size = 10) +
scale_fill_manual(values = colormapping) +
theme_minimal()
p
#Faceted Party Plot#
data.pc = filter(data.pc) #Filter away candidates outside the parties
p <- ggplot(data = data.pc, aes(x = data.pc[,32], y = data.pc[,33], size = sqrt(votes.pers/pi))) +
geom_point(aes(fill = party), colour = "black",
alpha=0.8, shape = 21) +
scale_size_continuous( range = c(1,25) ) +
scale_fill_manual(values = colormapping) +
theme_minimal() +
theme(legend.position = "none") +
facet_wrap(~ party)
p
# library(ggfortify)
# autoplot(prcomp(data[,17:31]), loadings = TRUE, loadings.colour = 'blue',
# loadings.label = TRUE, loadings.label.size = 3)
#### Decision tree analysis ####
library(rpart)
set.seed(1)
# separate into training and test data
train <- sample( x = 1:nrow(data), size = 2/3 * nrow(data), replace = FALSE)
data.train <- data[train, ]
data.train <- data.train[,c(2,17:31)]
names(data.train) = c("party","uddannelse","forebyggelse","sundhed","velfærd","arb1","arb2","økonomi","trafik","ret","social","integration","eu","udvikling","miljø","kultur")
data.test <- data[-train,]
data.test <- data.test[,c(2,17:31)]
names(data.test) = c("party","uddannelse","forebyggelse","sundhed","velfærd","arb1","arb2","økonomi","trafik","ret","social","integration","eu","udvikling","miljø","kultur")
# Fit decision tree
model = rpart(party ~ ., data = data.train, method = "class")
partychoice = predict(model, newdata = data.test, type = "class")
# plot the model
library("rpart.plot")
prp(model, box.col = "lightblue", border.col = "darkblue", shadow.col = "lightgrey", split.cex = 0.7,split.font = 4, split.col = "darkblue", split.border.col = 9, split.shadow.col = "lightgrey", nn.col = "darkred")
# variable importance
v.importance <- data.frame(model$variable.importance)
# run the model on the whole dataset
data.pred <- data[,c(2,17:31)]
names(data.pred) <- c("party","uddannelse","forebyggelse","sundhed","velfærd","arb1","arb2","økonomi","trafik","ret","social","integration","eu","udvikling","miljø","kultur")
pred = data.frame(predict(model, newdata = data.pred, type = "class"))
data.pred <- cbind(data.pred, pred)
data.pred$homogen = ifelse(data.pred$party == data.pred[,17], 1,0 )
data.pred = mutate(data.pred, votes = data$votes.pers)
# how is the mean personal votes for "homogenious" candidates versus "non-homogenious"
homogenious <- data.pred %>%
group_by(homogen) %>%
summarise(meanvotes = mean(votes))
#### Distances between points #### ---------------------------
# Construct matrix of Euclidean distances between all candidates, in all dimensions
df.distance <- data[,17:31]
#Select only questions
rownames(df.distance) <- 1:nrow(df.distance) #Set names of rows
names(df.distance)[1:15] <- 1:15 #Simplify variable names
#Compute distance matrix
dist.eucl <- dist(df.distance) %>%
as.matrix() %>%
as.data.frame()
#Make a smaller matrix, containing only the distance to 30 nearest candidates, for each candidate
cand.dist <- data.frame()
for (i in 1:ncol(dist.eucl)) {
cand.dist[1:30, i] <- sort(dist.eucl[,i])[1:30]
}
cand.dist.one <- t(cand.dist[2,])
#Average distance to five nearest candidates
nearest.five.mean <- rep(0, ncol(dist.eucl))
for (i in 1:ncol(dist.eucl)) {
nearest.five.mean[i] <- mean(cand.dist[2:6,i])
}
#Add distance measures to principal component dataframe
data.pc$nearest.cand <- as.numeric(cand.dist.one )
data.pc$nearest.five.mean <- nearest.five.mean
#Test plot of nearest candidates (note that distance is measured in many more dimensions than those plotted)
p <- ggplot(data = data.pc, aes(x = data.pc[,32], y = data.pc[,33] )) +
geom_point(aes(fill = nearest.cand), colour = "black", alpha=0.8, shape = 21, size = 10) +
scale_fill_continuous(low = "darkred", high = "green") +
theme_minimal()
p
#Test plot of mean distance to five nearest candidates (note that distance is measured in many more dimensions than those plotted)
p <- ggplot(data = data.pc, aes(x = data.pc[,32], y = data.pc[,33] )) +
geom_point(aes(fill = nearest.five.mean), colour = "black", alpha=0.8, shape = 21, size = 10) +
scale_fill_continuous(low = "darkred", high = "green") +
theme_minimal()
p
# THE MILLION DOLLAR PLOT (if it worked, but it doesn't)
p <- ggplot(data = filter(data.pc, votes.pers > 10 & nearest.five.mean >0), aes(x = nearest.five.mean, y = votes.pers )) +
geom_point() +
scale_y_log10() +
geom_smooth(method=lm, col = "red")+
# scale_fill_continuous(low = "darkred", high = "green") +
theme_minimal()
p
#### Agreement between candidates, Altinget definition #### --------------------------------------------
### Construct matrix of agreement between all candidates ###
# Import and transpose responses
df.distance <- t(data[,17:31])
#Create empty matrix
cand.distance <- matrix(nrow = ncol(df.distance), ncol = ncol(df.distance))
#Fill out matrix
for (k in 1:nrow(cand.distance)){
for (i in 1:ncol(cand.distance)) {
cand.distance[k,i] <- sum((-abs(df.distance[,k] - df.distance[,i])+4) / 60) #Use Altingets definition of Agreement (see below)
}
print(k)
}
rm(df.distance)
###Average agreement with five nearest candidates
#Create average 'agreement' with five closest candidate for each candidate
agree.five.mean <- data.frame() #Empty frame
for (i in 1:ncol(cand.distance)) {
agree.five.mean[1, i] <- sort(cand.distance[,i], decreasing = TRUE)[2:6] %>% #Choose top 5 of each candidates agreement
mean() # Take the mean
}
agree.five.mean <- t(agree.five.mean) #transpose before merging with original data frame
### Test results in PCA plot
#Add distance measures to principal component dataframe
data.pc$agree.five.mean <- as.numeric(agree.five.mean)
### Plot
# Plot of mean agreement with five nearest candidates
p <- ggplot(data = data.pc, aes(x = data.pc[,32], y = data.pc[,33] )) +
geom_point(aes(fill = agree.five.mean), colour = "black", alpha=0.8, shape = 21, size = 10) +
scale_fill_continuous(low = "green", high = "red") +
theme(legend.position = "none") +
facet_wrap(~ party) +
theme_minimal()
p
# THE MILLION DOLLAR PLOT (if it worked, but it doesn't)
# - Regressing personal votes on average agreement with five nearest candidates
p <- ggplot(data = filter(data.pc, votes.pers > 10), aes(x = agree.five.mean, y = votes.pers )) +
geom_point() +
scale_y_log10() +
geom_smooth(method=lm, col = "red")+
theme_minimal()
p
##### Party center analysis -------------------------------------------------------
centers <- data %>%
select(party, 17:31) %>%
group_by(party) %>%
summarize_each( funs(mean) ) %>%
for (i in 1:nrow(data.pc)) {
par <- data.pc$party[i]
data.pc$agree.party.mean[i] = sum((-abs(data.pc[i,17:31] - filter(centers, party == par)[,2:16])+4) / 60)
print(i)
}
party.centers <- data.pc %>%
group_by(party) %>%
summarize(
average.agreement = mean(agree.party.mean) * 100
) %>%
arrange(desc(average.agreement))
party.centers
p <- ggplot( data = party.centers, aes( x = reorder(party, average.agreement), y = average.agreement, fill = party) ) +
geom_bar(stat = "identity") +
scale_fill_manual(values = colormapping) +
coord_flip()+
theme_minimal() +
ylab("..") +
# ylim(60, 100)+
xlab("..")+
ggtitle("...")
p
#### Agreement with other candidates, full melted data set #### ---------------------------------------------------
<<<<<<< HEAD
### Goal: the dataset should look something like this #
# Name1 name2 party lokalkreds storkreds agreement
# navn navnsen esben lunde venstre xxx xxxxx 88 %
# navn navnsen lars l?kke venstre xxx xxxxx 58 %
# navn navnsen pia K venstre xxx xxxxx 42 %
# .....
# .....
# .....
# esben lunde navn navnsen o xxx xxxxx 88 %
# esben lunde ...
# esben lunde ...
# esben lunde ...
# Step 1: Add names, party, lokalkreds and storkreds to the dataframe with full distances
# Step 2: Melt the dataframe
# Step 3: Compute the distance for each candidate to the wanted other candidates (party, kreds, etc.)
# Step 4: Add distance measures as a single variable to the original dataset
### Step 1: Add names, party, lokalkreds and storkreds to the dataframe with full distances
View(cand.distance)
cand.distance <- cbind(data[,c(1,2,3,4)], cand.distance)
# Work around the *Kristian Andersen* mistake: This should be checked, if Kristian Andersen is fixed.
#Add names to rows
cand.distance[,1] <- as.character(cand.distance[,1])
cand.distance[517,1] <- "Kristian Andersen_K1"
cand.distance[518,1] <- "Kristian Andersen_K2"
cand.distance[592,1] <- "Kristian Andersen_V1"
cand.distance[593,1] <- "Kristian Andersen_V2"
cand.distance[,1] <- as.factor(cand.distance[,1])
cand.distance2 <- cand.distance
#Put names on columns as well
names(cand.distance)[5:728] <- as.character(cand.distance[,1])
#Load libraries
library(reshape2)
#Melt dataframe to obtain a 'long' version of the above distance matrix
melted.distance <- melt(data = cand.distance,
id.vars = c(1,2,3,4),
value.name = "agreement")
#Add candidate info to both 'sides' of the list (such that info is attached to both names in every row)
cand.info <- cand.distance[,1:4]
melted.distance <- left_join(melted.distance, cand.info, by = c("variable" = "name"))
rm(cand.info)
###Create distance measures
#Average agreement with three nearest same party candidates within storkreds
distance.measure <- melted.distance %>%
filter(
storkreds.x == storkreds.y & # Look only within same storkreds (for those with unknown lokalkreds)
party.x == party.y & # Look only across parties
name != variable) %>% # Technical: remove agreement with oneself
group_by(name) %>%
arrange(desc(agreement)) %>%
filter( 1:n() == 1 | 1:n() == 2 | 1:n() == 3) %>% #Select top three, with ties removed (always takes three)
summarize(
agree.three.mean.party.storkreds = mean(agreement)
)
agree.three.mean.party.storkreds <- distance.measure
#Average agreement with three nearest non-same party candidates within storkreds
distance.measure <- melted.distance %>%
filter(
storkreds.x == storkreds.y & # Look only within same storkreds (for those with unknown lokalkreds)
party.x != party.y & # Look only across parties
name != variable) %>% # Technical: remove agreement with oneself
group_by(name) %>%
arrange(desc(agreement)) %>%
filter( 1:n() == 1 | 1:n() == 2 | 1:n() == 3) %>% #Select top three, with ties removed (always takes three)
summarize(
agree.three.mean.oth.party.storkreds = mean(agreement)
)
agree.three.mean.oth.party.storkreds <- distance.measure
### Add to original dataframe
#Add distance measures to principal component dataframe
data.pc <- left_join(data.pc, agree.three.mean.party.storkreds)
data.pc <- left_join(data.pc, agree.three.mean.oth.party.storkreds)
### Plot: DISTANCE TO OWN PARTY
# Plot of mean agreement with five nearest candidates
p <- ggplot(data = data.pc, aes(x = data.pc[,32], y = data.pc[,33] )) +
geom_point(aes(fill = agree.three.mean.party.storkreds), colour = "black", alpha=0.8, shape = 21, size = 10) +
scale_fill_continuous(low = "green", high = "red") +
theme(legend.position = "none") +
#facet_wrap(~ party) +
theme_minimal()
p
# THE MILLION DOLLAR PLOT (if it worked, but it doesn't)
# - Regressing personal votes on average agreement with five nearest candidates
p <- ggplot(data = filter(data.pc, votes.pers > 10), aes(x = agree.three.mean.party.storkreds, y = votes.pers )) +
geom_point() +
scale_y_log10() +
geom_smooth(method=lm, col = "red")+
theme_minimal()
p
### Plot: DISTANCE TO OTHER PARTY
# Plot of mean agreement with five nearest candidates
p <- ggplot(data = data.pc, aes(x = data.pc[,32], y = data.pc[,33] )) +
geom_point(aes(fill = agree.three.mean.oth.party.storkreds), colour = "black", alpha=0.8, shape = 21, size = 10) +
scale_fill_continuous(low = "green", high = "red") +
theme(legend.position = "none") +
#facet_wrap(~ party) +
theme_minimal()
p
# THE MILLION DOLLAR PLOT (if it worked, but it doesn't)
# - Regressing personal votes on average agreement with five nearest candidates
p <- ggplot(data = filter(data.pc, votes.pers > 10), aes(x = agree.three.mean.oth.party.storkreds, y = votes.pers )) +
geom_point() +
scale_y_log10() +
geom_smooth(method=lm, col = "red")+
theme_minimal()
p
#### -----------------------------------
=======
### Goal: the dataset should look something like this #
# Name1 name2 party lokalkreds storkreds agreement
# navn navnsen esben lunde venstre xxx xxxxx 88 %
# navn navnsen lars l?kke venstre xxx xxxxx 58 %
# navn navnsen pia K venstre xxx xxxxx 42 %
# .....
# .....
# .....
# esben lunde navn navnsen o xxx xxxxx 88 %
# esben lunde ...
# esben lunde ...
# esben lunde ...
# Step 1: Add names, party, lokalkreds and storkreds to the dataframe with full distances
# Step 2: Melt the dataframe
# Step 3: Compute the distance for each candidate to the wanted other candidates (party, kreds, etc.)
# Step 4: Add distance measures as a single variable to the original dataset
### Step 1: Add names, party, lokalkreds and storkreds to the dataframe with full distances
View(cand.distance)
cand.distance <- cbind(data[,c(1,2,3,4)], cand.distance)
# Work around the *Kristian Andersen* mistake: This should be checked, if Kristian Andersen is fixed.
#Add names to rows
cand.distance[,1] <- as.character(cand.distance[,1])
cand.distance[517,1] <- "Kristian Andersen_K1"
cand.distance[518,1] <- "Kristian Andersen_K2"
cand.distance[592,1] <- "Kristian Andersen_V1"
cand.distance[593,1] <- "Kristian Andersen_V2"
cand.distance[,1] <- as.factor(cand.distance[,1])
cand.distance2 <- cand.distance
#Put names on columns as well
names(cand.distance)[5:728] <- as.character(cand.distance[,1])
#Load libraries
library(reshape2)
#Melt dataframe to obtain a 'long' version of the above distance matrix
melted.distance <- melt(data = cand.distance,
id.vars = c(1,2,3,4),
value.name = "agreement")
#Add candidate info to both 'sides' of the list (such that info is attached to both names in every row)
cand.info <- cand.distance[,1:4]
melted.distance <- left_join(melted.distance, cand.info, by = c("variable" = "name"))
rm(cand.info)
###Create distance measures
#Average agreement with three nearest same party candidates within storkreds
distance.measure <- melted.distance %>%
filter(
storkreds.x == storkreds.y & # Look only within same storkreds (for those with unknown lokalkreds)
party.x == party.y & # Look only across parties
name != variable) %>% # Technical: remove agreement with oneself
group_by(name) %>%
arrange(desc(agreement)) %>%
filter( 1:n() == 1 | 1:n() == 2 | 1:n() == 3) %>% #Select top three, with ties removed (always takes three)
summarize(
agree.three.mean.party.storkreds = mean(agreement)
)
agree.three.mean.party.storkreds <- distance.measure
#Average agreement with three nearest non-same party candidates within storkreds
distance.measure <- melted.distance %>%
filter(
storkreds.x == storkreds.y & # Look only within same storkreds (for those with unknown lokalkreds)
party.x != party.y & # Look only across parties
name != variable) %>% # Technical: remove agreement with oneself
group_by(name) %>%
arrange(desc(agreement)) %>%
filter( 1:n() == 1 | 1:n() == 2 | 1:n() == 3) %>% #Select top three, with ties removed (always takes three)
summarize(
agree.three.mean.oth.party.storkreds = mean(agreement)
)
agree.three.mean.oth.party.storkreds <- distance.measure
### Add to original dataframe
#Add distance measures to principal component dataframe
data.pc <- left_join(data.pc, agree.three.mean.party.storkreds)
data.pc <- left_join(data.pc, agree.three.mean.oth.party.storkreds)
### Plot: DISTANCE TO OWN PARTY
# Plot of mean agreement with five nearest candidates
data.pc.plot <- filter(data.pc, party != "1")
p <- ggplot(data = data.pc.plot, aes(x = data.pc.plot[,32], y = data.pc.plot[,33], size = sqrt(votes.pers/pi))) +
geom_point(aes(fill = agree.three.mean.party.storkreds), colour = "black", alpha=0.8, shape = 21) +
scale_size_continuous( range = c(1,25), labels = c("4,000", "15,000"), breaks = c(50, 100), name = "votes" ) +
scale_fill_continuous(low = "green", high = "red", name = "agree.mean") +
theme(legend.position = "none") +
# facet_wrap(~ party) +
xlab("First Component") +
ylab("Second Component") +
theme_minimal()
p
p <- ggplot(data = data.pc, aes(x = data.pc[,32], y = data.pc[,33], size = sqrt(votes.pers/pi))) +
geom_point(aes(fill = party), colour = "black",
alpha=0.8, shape = 21) +
scale_size_continuous( range = c(1,25) ) +
p
# THE MILLION DOLLAR PLOT (if it worked, but it doesn't)
# - Regressing personal votes on average agreement with five nearest candidates
p <- ggplot(data = filter(data.pc, votes.pers > 10), aes(x = agree.three.mean.party.storkreds, y = votes.pers )) +
geom_point() +
scale_y_log10() +
geom_smooth(method=lm, col = "red")+
theme_minimal()
p
### Plot: DISTANCE TO OTHER PARTY
# Plot of mean agreement with five nearest candidates
p <- ggplot(data = data.pc, aes(x = data.pc[,32], y = data.pc[,33] )) +
geom_point(aes(fill = agree.three.mean.oth.party.storkreds), colour = "black", alpha=0.8, shape = 21, size = 10) +
scale_fill_continuous(low = "green", high = "red") +
theme(legend.position = "none") +
#facet_wrap(~ party) +
theme_minimal()
p
# THE MILLION DOLLAR PLOT (if it worked, but it doesn't)
# - Regressing personal votes on average agreement with five nearest candidates
p <- ggplot(data = filter(data.pc, votes.pers > 10), aes(x = agree.three.mean.oth.party.storkreds, y = votes.pers )) +
geom_point() +
scale_y_log10() +
geom_smooth(method=lm, col = "red")+
theme_minimal()
p
#### -------- Regression analysis -------------------
names(reg.data)
reg.data <- data.pc
reg.data <- filter(reg.data, party != "1")
# K?r for enkelte partier, not?r estimat
# agree.three.mean, Signifikant for: a, b, k, (positiv alle)
# agree.three.oth.mean, signifikant for o (negativ),
lm2 <- lm(formula = log(votes.pers) ~
# agree.three.mean.party.storkreds +
# agree.three.mean.oth.party.storkreds +
# agree.three.mean.party.storkreds*party +
# nearest.cand +
# nearest.five.mean +
# agree.party.mean +
# agree.party.mean*party +
# party +
# opstillet.i.kreds.nr +
is.male +
ran.last.election+
age,
data = reg.data, na.action = "na.omit")
summary(lm2)
length(lm2$fitted.values)
library(stargazer)
stargazer(lm1, lm2, lm3)
### How many votes does it take to get elected?
av <- data.pc %>%
group_by(elected) %>%
filter(votes.pers < 2000) %>%
summarize(av = n() )
av
p <- ggplot(data = data.pc, aes( x = votes.pers, group = elected, fill = elected)) +
geom_density(alpha = 0.6) +
scale_x_log10( breaks = c(10, 100, 500, 1000, 2000, 5000, 10000,50000 )) +
scale_fill_discrete() +
xlab("Personal votes received") +
theme_minimal()
p
#### Description of the distance measure #### -----------
summary(data.pc$agree.three.mean.party.storkreds)
sqrt(var(data.pc$agree.three.mean.party.storkreds, na.rm = TRUE))
p <- ggplot(data = data.pc, aes(x = agree.three.mean.party.storkreds))+
stat_function(fun = dnorm, args = list(mean = 0.8586,
sd = 0.07812928)) + # This is crap code, but it works. Sorry.
geom_density(na.rm = T, fill = "darkgreen", alpha = 0.8) +
theme_minimal()
p
data.pc <- data.pc %>% ungroup()
sum(data.pc[,42][data.pc[,42] == 1], na.rm = T)
>>>>>>> origin/master
#### TO DO #####
# - Build distance algorithm
# - within parties
# - within storkreds
# - within lokalkreds
#
# - Match valgkredsdata wwith
# - latitude, or
# - median income
#
# - Fix
# - scales in facet wrapped plots: the horizontal axis is different for each plot
#
#### TRASH #####
<<<<<<< HEAD
## Variance in responses
resp.var <- data[,17:31] %>%
var() %>%
diag() %>%
sqrt() %>%
t()
rownames(resp.var) <- "Standard Deviation"
#Explanation
# http://www.altinget.dk/kandidater/ft15/information.aspx#.VmNPf7xlmRs
# Testens algoritme virker s?dan, at der gives point p? baggrund af forskellen mellem en kandidat
=======
## Variance in responses
resp.var <- data[,17:31] %>%
var() %>%
diag() %>%
sqrt() %>%
t()
rownames(resp.var) <- "Standard Deviation"
#Explanation
# http://www.altinget.dk/kandidater/ft15/information.aspx#.VmNPf7xlmRs
# Testens algoritme virker s?dan, at der gives point p? baggrund af forskellen mellem en kandidat
>>>>>>> origin/master
# og en brugers besvarelse. Et ens svar giver 4 point (f.eks. helt enig og helt enig), et trin ved
# siden af giver 3 point (f.eks. helt uenig og delvist uenig). Man f?r 0 point for svar i hver sin
# ende i skalaen (f.eks. helt enig og helt uenig). Hvert sp?rgsm?l har en 1/20 v?gt, og antallet af
# point bliver summeret til den endelig procentsats.
| /Exam Project/Unused files/Data Analysis.R | no_license | nayani-p/Assignment-2 | R | false | false | 31,666 | r | Sys.setlocale(category = "LC_ALL", locale = "UTF-8")
library("plyr")
library("rvest")
library("dplyr")
library("ggplot2")
#### Load and format data ####
data.org <- read.csv(file = "dk_ft15_politician_responses.csv", header = TRUE) #Load the raw dataset
data <- unique(data.org) # Load a "working" dataset, while removing duplicate entries
## Map responses to Likert-scale-style numeric
for (i in 17:31){
data[,i] <- data[,i] %>%
gsub(x = ., pattern = "Helt enig", replacement = 5) %>%
gsub(x = ., pattern = "Delvist enig", replacement = 4) %>%
gsub(x = ., pattern = "Hverken enig eller uenig", replacement = 3) %>%
gsub(x = ., pattern = "Delvist uenig", replacement = 2) %>%
gsub(x = ., pattern = "Helt uenig", replacement = 1)
}
for (i in 17:31){
data[,i] <- as.numeric(data[,i]) #define as numeric
}
#Removing the double Kristian Andersen
# data <- data %>% # A candidate, Kristian Andersen, has several entries, these are removed. NOTE: This removes one candidate
# group_by(name) %>%
# filter(row_number() == 1 ) %>% # Method: data is grouped on name variable, and groups with >1 name are discarded
# ungroup()
## Create mapping of response variables,
# Use this to copy into code: -c(name, party, storkreds, lokalkreds, age, is.male,
# title, location, elected, votes.pers, votes.all, valgt.nr,
# stedfor.nr, opstillet.i.kreds.nr, nomineret.i.kreds.nr,
# ran.last.election)
## Create colormapping to use for later plotting
colormapping <- c(
"red",
"darkorchid4",
"lightgreen",
"hotpink",
"cyan1" ,
"grey" ,
"yellow" ,
"darkblue" ,
"orange" ,
"darkolivegreen4",
"lightgrey"
)
names(colormapping) <- unique(as.character(data$party)) # Naming the elements in the character vector,
# for ggplot2 to call later.
## Create partyname mapping to use for later plotting
namemapping <- c(
"Socialdemokratiet",
"Radikale",
"Konservative",
"SF",
"Liberal Alliance" ,
"Kristendemokraterne" ,
"Dansk Folkeparti" ,
"Venstre" ,
"Enhedslisten" ,
"Alternativet",
"Uden for partierne"
)
names(namemapping) <- unique(as.character(data$party)) # Naming the elements in the character vector,
# for ggplot2 to call later.
#### Data description ####
########## Mean responses
## -- Add mean response for each party, for each question -- ##
party.means <- data %>%
filter(party != 1) %>%
group_by(party) %>%
summarize_each(funs(mean), -c(name, party, storkreds, lokalkreds, age, is.male,
title, location, elected, votes.pers, votes.all, valgt.nr,
stedfor.nr, opstillet.i.kreds.nr, nomineret.i.kreds.nr,
ran.last.election))
## --- Plot average response to each question, by party --- #
# Construct labels with question text to be plotted
labels <- data.frame(
question = names(party.means[2:16]),
position.y = 16:2+0.5, # position is based on the plot below
position.x = rep(3, 15) # position is based on the plot below
)
# Build plot
p <- ggplot(data = party.means) #initiate plot
#Loop over each question, and plot the party means
for(i in 2:16){
p <- p +
geom_point(aes_string(
y = 18-i, # Split questions by y-coordinates for each question
x = paste("party.means$", names(party.means)[i], sep = ""), # Let party means be x-axis
fill = "party"
), colour = "black", alpha=0.8, shape = 21, size = 10 )
}
#Add questions
p <- p + geom_text(data = labels,
aes( y = position.y, x = position.x, label = question),
size = 3)
#Party colors
p <- p + scale_fill_manual ( values = colormapping )
#Titles and axis
p <- p +
theme_minimal() +
theme(axis.title.y = element_blank(),
axis.text.y = element_blank(),
axis.title.x = element_blank(),
panel.grid.minor=element_blank(),
legend.position="top") +
scale_y_continuous(breaks=seq(1, 16, 1)) +
scale_x_continuous(breaks=c(1,3,5),
labels=c("Highly disagree", "Neither agree nor disagree", "Highly agree"))+
ggtitle("Mean response to survey \n questions, by party")
p
## --- How close are parties to the middle? -------
#Calculate 'centerness' NOTE: Requires above code to have been run already, to create party.means
party.middle <- party.means
party.middle[,2:16] <- abs(party.middle[,2:16]-3) #Re-align around center (defining center = 0) and take absolutes
party.middle[,17] <- rowMeans(party.middle[,2:16]) #Compute averages
#simplify dataframe
party.middle <- party.middle %>%
select( party = party, mean.dist.from.center = V17) #Select only the two relevant variables
p <- ggplot(data = party.middle, aes( x = reorder(party, mean.dist.from.center),
y = mean.dist.from.center, fill = party)) +
geom_bar(stat = "identity",
color = "black"
) +
scale_fill_manual( values = colormapping) +
coord_flip() +
theme_minimal() +
ylab("Average distance from 'neither agree nor disagree',\n on 0-2 scale") +
xlab("")+
ggtitle("What parties have the most extreme opinions?")
p
#### Variance in responses -------------------------- NOTE: useless. Doesn't measure the right thing.
data.var <- data %>%
group_by(party) %>%
select(party, 17:31) %>%
summarize_each(
funs(mean)
)
#This calculates variance in responses by party (but it's a non-informative measure)
for (i in 1:nrow(data.var)){
data.var$party.std[i] <- sqrt(sum((data.var[i,2:16] - rep(mean(as.numeric(data.var[i,2:16])), 15))^2)/ (15 - 1) )
}
p <- ggplot( data = data.var, aes( x = reorder(party, party.std), y = party.std, fill = party) ) +
geom_bar(stat = "identity") +
scale_fill_manual(values = colormapping) +
coord_flip()+
theme_minimal() +
ylab("..") +
xlab("..")+
ggtitle("...")
p
p <- ggplot(data = data.var, aes( x = reorder(data.var, party.std),
y = party.std, fill = party)) +
geom_bar(stat = "identity",
color = "black" ) +
scale_fill_manual( values = colormapping) +
coord_flip() +
theme_minimal() +
ylab("..") +
xlab("..")+
ggtitle("...")
p
### Principal Component Analysis ----
pc <- princomp(data[,17:31], cor=TRUE, scores=TRUE)
data.pc <- data
data.pc[32:36] <- pc$scores[,1:5]
#Pretty Plot#
# data.pc = filter(data.pc, party!="1") #Filter away candidates outside the parties
p <- ggplot(data = data.pc, aes(x = data.pc[,32], y = data.pc[,33] )) +
geom_point(aes(fill = party), colour = "black", alpha=0.8, shape = 21, size = 10) +
scale_fill_manual(values = colormapping) +
theme_minimal()
p
## Let's try and divide the questions into two groups of questions:
#redistribution and value-based policy questions
#Splitting the dataset
redist <- data %>% select (1:16,18:19,23:24,26,29,31)
value <- data %>% select (1:17, 20:22,25, 27:28,30)
##Do PCA analysis on both subsets and restore 5 first components
pc1 <- princomp(redist[,17:23], cor = T, scores = T)
redist[24:28] <- pc1$scores[,1:5]
pc2 <- princomp(value[,17:24], cor = T, scores = T)
value[25:29] <- pc2$scores[,1:5]
##Compute summary statistics on components
summary(princomp(redist[,17:23], loadings = T ))
summary(princomp(value[,17:24], loadings = T ))
##Add the first component from each subset to original data in order to plot in same plot
data.pc[37] <- pc1$scores[,1]
data.pc[38] <- pc2$scores[,1]
##The PCA - using first component from each subset analysis
p <- ggplot(data.pc, aes(x = data.pc[,37], y=data.pc[,38])) +
geom_point(aes(fill = party), colour = "black", alpha=0.8, shape = 21, size = 10) +
scale_fill_manual(values = colormapping) +
theme_minimal()
p
#Faceted Party Plot#
data.pc = filter(data.pc) #Filter away candidates outside the parties
p <- ggplot(data = data.pc, aes(x = data.pc[,32], y = data.pc[,33], size = sqrt(votes.pers/pi))) +
geom_point(aes(fill = party), colour = "black",
alpha=0.8, shape = 21) +
scale_size_continuous( range = c(1,25) ) +
scale_fill_manual(values = colormapping) +
theme_minimal() +
theme(legend.position = "none") +
facet_wrap(~ party)
p
# library(ggfortify)
# autoplot(prcomp(data[,17:31]), loadings = TRUE, loadings.colour = 'blue',
# loadings.label = TRUE, loadings.label.size = 3)
#### Decision tree analysis ####
library(rpart)
set.seed(1)
# separate into training and test data
train <- sample( x = 1:nrow(data), size = 2/3 * nrow(data), replace = FALSE)
data.train <- data[train, ]
data.train <- data.train[,c(2,17:31)]
names(data.train) = c("party","uddannelse","forebyggelse","sundhed","velfærd","arb1","arb2","økonomi","trafik","ret","social","integration","eu","udvikling","miljø","kultur")
data.test <- data[-train,]
data.test <- data.test[,c(2,17:31)]
names(data.test) = c("party","uddannelse","forebyggelse","sundhed","velfærd","arb1","arb2","økonomi","trafik","ret","social","integration","eu","udvikling","miljø","kultur")
# Fit decision tree
model = rpart(party ~ ., data = data.train, method = "class")
partychoice = predict(model, newdata = data.test, type = "class")
# plot the model
library("rpart.plot")
prp(model, box.col = "lightblue", border.col = "darkblue", shadow.col = "lightgrey", split.cex = 0.7,split.font = 4, split.col = "darkblue", split.border.col = 9, split.shadow.col = "lightgrey", nn.col = "darkred")
# variable importance
v.importance <- data.frame(model$variable.importance)
# run the model on the whole dataset
data.pred <- data[,c(2,17:31)]
names(data.pred) <- c("party","uddannelse","forebyggelse","sundhed","velfærd","arb1","arb2","økonomi","trafik","ret","social","integration","eu","udvikling","miljø","kultur")
pred = data.frame(predict(model, newdata = data.pred, type = "class"))
data.pred <- cbind(data.pred, pred)
data.pred$homogen = ifelse(data.pred$party == data.pred[,17], 1,0 )
data.pred = mutate(data.pred, votes = data$votes.pers)
# how is the mean personal votes for "homogenious" candidates versus "non-homogenious"
homogenious <- data.pred %>%
group_by(homogen) %>%
summarise(meanvotes = mean(votes))
#### Distances between points #### ---------------------------
# Construct matrix of Euclidean distances between all candidates, in all dimensions
df.distance <- data[,17:31]
#Select only questions
rownames(df.distance) <- 1:nrow(df.distance) #Set names of rows
names(df.distance)[1:15] <- 1:15 #Simplify variable names
#Compute distance matrix
dist.eucl <- dist(df.distance) %>%
as.matrix() %>%
as.data.frame()
#Make a smaller matrix, containing only the distance to 30 nearest candidates, for each candidate
cand.dist <- data.frame()
for (i in 1:ncol(dist.eucl)) {
cand.dist[1:30, i] <- sort(dist.eucl[,i])[1:30]
}
cand.dist.one <- t(cand.dist[2,])
#Average distance to five nearest candidates
nearest.five.mean <- rep(0, ncol(dist.eucl))
for (i in 1:ncol(dist.eucl)) {
nearest.five.mean[i] <- mean(cand.dist[2:6,i])
}
#Add distance measures to principal component dataframe
data.pc$nearest.cand <- as.numeric(cand.dist.one )
data.pc$nearest.five.mean <- nearest.five.mean
#Test plot of nearest candidates (note that distance is measured in many more dimensions than those plotted)
p <- ggplot(data = data.pc, aes(x = data.pc[,32], y = data.pc[,33] )) +
geom_point(aes(fill = nearest.cand), colour = "black", alpha=0.8, shape = 21, size = 10) +
scale_fill_continuous(low = "darkred", high = "green") +
theme_minimal()
p
#Test plot of mean distance to five nearest candidates (note that distance is measured in many more dimensions than those plotted)
p <- ggplot(data = data.pc, aes(x = data.pc[,32], y = data.pc[,33] )) +
geom_point(aes(fill = nearest.five.mean), colour = "black", alpha=0.8, shape = 21, size = 10) +
scale_fill_continuous(low = "darkred", high = "green") +
theme_minimal()
p
# THE MILLION DOLLAR PLOT (if it worked, but it doesn't)
p <- ggplot(data = filter(data.pc, votes.pers > 10 & nearest.five.mean >0), aes(x = nearest.five.mean, y = votes.pers )) +
geom_point() +
scale_y_log10() +
geom_smooth(method=lm, col = "red")+
# scale_fill_continuous(low = "darkred", high = "green") +
theme_minimal()
p
#### Agreement between candidates, Altinget definition #### --------------------------------------------
### Construct matrix of agreement between all candidates ###
# Import and transpose responses
df.distance <- t(data[,17:31])
#Create empty matrix
cand.distance <- matrix(nrow = ncol(df.distance), ncol = ncol(df.distance))
#Fill out matrix
for (k in 1:nrow(cand.distance)){
for (i in 1:ncol(cand.distance)) {
cand.distance[k,i] <- sum((-abs(df.distance[,k] - df.distance[,i])+4) / 60) #Use Altingets definition of Agreement (see below)
}
print(k)
}
rm(df.distance)
###Average agreement with five nearest candidates
#Create average 'agreement' with five closest candidate for each candidate
agree.five.mean <- data.frame() #Empty frame
for (i in 1:ncol(cand.distance)) {
agree.five.mean[1, i] <- sort(cand.distance[,i], decreasing = TRUE)[2:6] %>% #Choose top 5 of each candidates agreement
mean() # Take the mean
}
agree.five.mean <- t(agree.five.mean) #transpose before merging with original data frame
### Test results in PCA plot
#Add distance measures to principal component dataframe
data.pc$agree.five.mean <- as.numeric(agree.five.mean)
### Plot
# Plot of mean agreement with five nearest candidates
p <- ggplot(data = data.pc, aes(x = data.pc[,32], y = data.pc[,33] )) +
geom_point(aes(fill = agree.five.mean), colour = "black", alpha=0.8, shape = 21, size = 10) +
scale_fill_continuous(low = "green", high = "red") +
theme(legend.position = "none") +
facet_wrap(~ party) +
theme_minimal()
p
# THE MILLION DOLLAR PLOT (if it worked, but it doesn't)
# - Regressing personal votes on average agreement with five nearest candidates
p <- ggplot(data = filter(data.pc, votes.pers > 10), aes(x = agree.five.mean, y = votes.pers )) +
geom_point() +
scale_y_log10() +
geom_smooth(method=lm, col = "red")+
theme_minimal()
p
##### Party center analysis -------------------------------------------------------
centers <- data %>%
select(party, 17:31) %>%
group_by(party) %>%
summarize_each( funs(mean) ) %>%
for (i in 1:nrow(data.pc)) {
par <- data.pc$party[i]
data.pc$agree.party.mean[i] = sum((-abs(data.pc[i,17:31] - filter(centers, party == par)[,2:16])+4) / 60)
print(i)
}
party.centers <- data.pc %>%
group_by(party) %>%
summarize(
average.agreement = mean(agree.party.mean) * 100
) %>%
arrange(desc(average.agreement))
party.centers
p <- ggplot( data = party.centers, aes( x = reorder(party, average.agreement), y = average.agreement, fill = party) ) +
geom_bar(stat = "identity") +
scale_fill_manual(values = colormapping) +
coord_flip()+
theme_minimal() +
ylab("..") +
# ylim(60, 100)+
xlab("..")+
ggtitle("...")
p
#### Agreement with other candidates, full melted data set #### ---------------------------------------------------
<<<<<<< HEAD
### Goal: the dataset should look something like this #
# Name1 name2 party lokalkreds storkreds agreement
# navn navnsen esben lunde venstre xxx xxxxx 88 %
# navn navnsen lars l?kke venstre xxx xxxxx 58 %
# navn navnsen pia K venstre xxx xxxxx 42 %
# .....
# .....
# .....
# esben lunde navn navnsen o xxx xxxxx 88 %
# esben lunde ...
# esben lunde ...
# esben lunde ...
# Step 1: Add names, party, lokalkreds and storkreds to the dataframe with full distances
# Step 2: Melt the dataframe
# Step 3: Compute the distance for each candidate to the wanted other candidates (party, kreds, etc.)
# Step 4: Add distance measures as a single variable to the original dataset
### Step 1: Add names, party, lokalkreds and storkreds to the dataframe with full distances
View(cand.distance)
cand.distance <- cbind(data[,c(1,2,3,4)], cand.distance)
# Work around the *Kristian Andersen* mistake: This should be checked, if Kristian Andersen is fixed.
#Add names to rows
cand.distance[,1] <- as.character(cand.distance[,1])
cand.distance[517,1] <- "Kristian Andersen_K1"
cand.distance[518,1] <- "Kristian Andersen_K2"
cand.distance[592,1] <- "Kristian Andersen_V1"
cand.distance[593,1] <- "Kristian Andersen_V2"
cand.distance[,1] <- as.factor(cand.distance[,1])
cand.distance2 <- cand.distance
#Put names on columns as well
names(cand.distance)[5:728] <- as.character(cand.distance[,1])
#Load libraries
library(reshape2)
#Melt dataframe to obtain a 'long' version of the above distance matrix
melted.distance <- melt(data = cand.distance,
id.vars = c(1,2,3,4),
value.name = "agreement")
#Add candidate info to both 'sides' of the list (such that info is attached to both names in every row)
cand.info <- cand.distance[,1:4]
melted.distance <- left_join(melted.distance, cand.info, by = c("variable" = "name"))
rm(cand.info)
###Create distance measures
#Average agreement with three nearest same party candidates within storkreds
distance.measure <- melted.distance %>%
filter(
storkreds.x == storkreds.y & # Look only within same storkreds (for those with unknown lokalkreds)
party.x == party.y & # Look only across parties
name != variable) %>% # Technical: remove agreement with oneself
group_by(name) %>%
arrange(desc(agreement)) %>%
filter( 1:n() == 1 | 1:n() == 2 | 1:n() == 3) %>% #Select top three, with ties removed (always takes three)
summarize(
agree.three.mean.party.storkreds = mean(agreement)
)
agree.three.mean.party.storkreds <- distance.measure
#Average agreement with three nearest non-same party candidates within storkreds
distance.measure <- melted.distance %>%
filter(
storkreds.x == storkreds.y & # Look only within same storkreds (for those with unknown lokalkreds)
party.x != party.y & # Look only across parties
name != variable) %>% # Technical: remove agreement with oneself
group_by(name) %>%
arrange(desc(agreement)) %>%
filter( 1:n() == 1 | 1:n() == 2 | 1:n() == 3) %>% #Select top three, with ties removed (always takes three)
summarize(
agree.three.mean.oth.party.storkreds = mean(agreement)
)
agree.three.mean.oth.party.storkreds <- distance.measure
### Add to original dataframe
#Add distance measures to principal component dataframe
data.pc <- left_join(data.pc, agree.three.mean.party.storkreds)
data.pc <- left_join(data.pc, agree.three.mean.oth.party.storkreds)
### Plot: DISTANCE TO OWN PARTY
# Plot of mean agreement with five nearest candidates
p <- ggplot(data = data.pc, aes(x = data.pc[,32], y = data.pc[,33] )) +
geom_point(aes(fill = agree.three.mean.party.storkreds), colour = "black", alpha=0.8, shape = 21, size = 10) +
scale_fill_continuous(low = "green", high = "red") +
theme(legend.position = "none") +
#facet_wrap(~ party) +
theme_minimal()
p
# THE MILLION DOLLAR PLOT (if it worked, but it doesn't)
# - Regressing personal votes on average agreement with five nearest candidates
p <- ggplot(data = filter(data.pc, votes.pers > 10), aes(x = agree.three.mean.party.storkreds, y = votes.pers )) +
geom_point() +
scale_y_log10() +
geom_smooth(method=lm, col = "red")+
theme_minimal()
p
### Plot: DISTANCE TO OTHER PARTY
# Plot of mean agreement with five nearest candidates
p <- ggplot(data = data.pc, aes(x = data.pc[,32], y = data.pc[,33] )) +
geom_point(aes(fill = agree.three.mean.oth.party.storkreds), colour = "black", alpha=0.8, shape = 21, size = 10) +
scale_fill_continuous(low = "green", high = "red") +
theme(legend.position = "none") +
#facet_wrap(~ party) +
theme_minimal()
p
# THE MILLION DOLLAR PLOT (if it worked, but it doesn't)
# - Regressing personal votes on average agreement with five nearest candidates
p <- ggplot(data = filter(data.pc, votes.pers > 10), aes(x = agree.three.mean.oth.party.storkreds, y = votes.pers )) +
geom_point() +
scale_y_log10() +
geom_smooth(method=lm, col = "red")+
theme_minimal()
p
#### -----------------------------------
=======
### Goal: the dataset should look something like this #
# Name1 name2 party lokalkreds storkreds agreement
# navn navnsen esben lunde venstre xxx xxxxx 88 %
# navn navnsen lars l?kke venstre xxx xxxxx 58 %
# navn navnsen pia K venstre xxx xxxxx 42 %
# .....
# .....
# .....
# esben lunde navn navnsen o xxx xxxxx 88 %
# esben lunde ...
# esben lunde ...
# esben lunde ...
# Step 1: Add names, party, lokalkreds and storkreds to the dataframe with full distances
# Step 2: Melt the dataframe
# Step 3: Compute the distance for each candidate to the wanted other candidates (party, kreds, etc.)
# Step 4: Add distance measures as a single variable to the original dataset
### Step 1: Add names, party, lokalkreds and storkreds to the dataframe with full distances
View(cand.distance)
cand.distance <- cbind(data[,c(1,2,3,4)], cand.distance)
# Work around the *Kristian Andersen* mistake: This should be checked, if Kristian Andersen is fixed.
#Add names to rows
cand.distance[,1] <- as.character(cand.distance[,1])
cand.distance[517,1] <- "Kristian Andersen_K1"
cand.distance[518,1] <- "Kristian Andersen_K2"
cand.distance[592,1] <- "Kristian Andersen_V1"
cand.distance[593,1] <- "Kristian Andersen_V2"
cand.distance[,1] <- as.factor(cand.distance[,1])
cand.distance2 <- cand.distance
#Put names on columns as well
names(cand.distance)[5:728] <- as.character(cand.distance[,1])
#Load libraries
library(reshape2)
#Melt dataframe to obtain a 'long' version of the above distance matrix
melted.distance <- melt(data = cand.distance,
id.vars = c(1,2,3,4),
value.name = "agreement")
#Add candidate info to both 'sides' of the list (such that info is attached to both names in every row)
cand.info <- cand.distance[,1:4]
melted.distance <- left_join(melted.distance, cand.info, by = c("variable" = "name"))
rm(cand.info)
###Create distance measures
#Average agreement with three nearest same party candidates within storkreds
distance.measure <- melted.distance %>%
filter(
storkreds.x == storkreds.y & # Look only within same storkreds (for those with unknown lokalkreds)
party.x == party.y & # Look only across parties
name != variable) %>% # Technical: remove agreement with oneself
group_by(name) %>%
arrange(desc(agreement)) %>%
filter( 1:n() == 1 | 1:n() == 2 | 1:n() == 3) %>% #Select top three, with ties removed (always takes three)
summarize(
agree.three.mean.party.storkreds = mean(agreement)
)
agree.three.mean.party.storkreds <- distance.measure
#Average agreement with three nearest non-same party candidates within storkreds
distance.measure <- melted.distance %>%
filter(
storkreds.x == storkreds.y & # Look only within same storkreds (for those with unknown lokalkreds)
party.x != party.y & # Look only across parties
name != variable) %>% # Technical: remove agreement with oneself
group_by(name) %>%
arrange(desc(agreement)) %>%
filter( 1:n() == 1 | 1:n() == 2 | 1:n() == 3) %>% #Select top three, with ties removed (always takes three)
summarize(
agree.three.mean.oth.party.storkreds = mean(agreement)
)
agree.three.mean.oth.party.storkreds <- distance.measure
### Add to original dataframe
#Add distance measures to principal component dataframe
data.pc <- left_join(data.pc, agree.three.mean.party.storkreds)
data.pc <- left_join(data.pc, agree.three.mean.oth.party.storkreds)
### Plot: DISTANCE TO OWN PARTY
# Plot of mean agreement with five nearest candidates
data.pc.plot <- filter(data.pc, party != "1")
p <- ggplot(data = data.pc.plot, aes(x = data.pc.plot[,32], y = data.pc.plot[,33], size = sqrt(votes.pers/pi))) +
geom_point(aes(fill = agree.three.mean.party.storkreds), colour = "black", alpha=0.8, shape = 21) +
scale_size_continuous( range = c(1,25), labels = c("4,000", "15,000"), breaks = c(50, 100), name = "votes" ) +
scale_fill_continuous(low = "green", high = "red", name = "agree.mean") +
theme(legend.position = "none") +
# facet_wrap(~ party) +
xlab("First Component") +
ylab("Second Component") +
theme_minimal()
p
p <- ggplot(data = data.pc, aes(x = data.pc[,32], y = data.pc[,33], size = sqrt(votes.pers/pi))) +
geom_point(aes(fill = party), colour = "black",
alpha=0.8, shape = 21) +
scale_size_continuous( range = c(1,25) ) +
p
# THE MILLION DOLLAR PLOT (if it worked, but it doesn't)
# - Regressing personal votes on average agreement with five nearest candidates
p <- ggplot(data = filter(data.pc, votes.pers > 10), aes(x = agree.three.mean.party.storkreds, y = votes.pers )) +
geom_point() +
scale_y_log10() +
geom_smooth(method=lm, col = "red")+
theme_minimal()
p
### Plot: DISTANCE TO OTHER PARTY
# Plot of mean agreement with five nearest candidates
p <- ggplot(data = data.pc, aes(x = data.pc[,32], y = data.pc[,33] )) +
geom_point(aes(fill = agree.three.mean.oth.party.storkreds), colour = "black", alpha=0.8, shape = 21, size = 10) +
scale_fill_continuous(low = "green", high = "red") +
theme(legend.position = "none") +
#facet_wrap(~ party) +
theme_minimal()
p
# THE MILLION DOLLAR PLOT (if it worked, but it doesn't)
# - Regressing personal votes on average agreement with five nearest candidates
p <- ggplot(data = filter(data.pc, votes.pers > 10), aes(x = agree.three.mean.oth.party.storkreds, y = votes.pers )) +
geom_point() +
scale_y_log10() +
geom_smooth(method=lm, col = "red")+
theme_minimal()
p
#### -------- Regression analysis -------------------
names(reg.data)
reg.data <- data.pc
reg.data <- filter(reg.data, party != "1")
# K?r for enkelte partier, not?r estimat
# agree.three.mean, Signifikant for: a, b, k, (positiv alle)
# agree.three.oth.mean, signifikant for o (negativ),
lm2 <- lm(formula = log(votes.pers) ~
# agree.three.mean.party.storkreds +
# agree.three.mean.oth.party.storkreds +
# agree.three.mean.party.storkreds*party +
# nearest.cand +
# nearest.five.mean +
# agree.party.mean +
# agree.party.mean*party +
# party +
# opstillet.i.kreds.nr +
is.male +
ran.last.election+
age,
data = reg.data, na.action = "na.omit")
summary(lm2)
length(lm2$fitted.values)
library(stargazer)
stargazer(lm1, lm2, lm3)
### How many votes does it take to get elected?
av <- data.pc %>%
group_by(elected) %>%
filter(votes.pers < 2000) %>%
summarize(av = n() )
av
p <- ggplot(data = data.pc, aes( x = votes.pers, group = elected, fill = elected)) +
geom_density(alpha = 0.6) +
scale_x_log10( breaks = c(10, 100, 500, 1000, 2000, 5000, 10000,50000 )) +
scale_fill_discrete() +
xlab("Personal votes received") +
theme_minimal()
p
#### Description of the distance measure #### -----------
summary(data.pc$agree.three.mean.party.storkreds)
sqrt(var(data.pc$agree.three.mean.party.storkreds, na.rm = TRUE))
p <- ggplot(data = data.pc, aes(x = agree.three.mean.party.storkreds))+
stat_function(fun = dnorm, args = list(mean = 0.8586,
sd = 0.07812928)) + # This is crap code, but it works. Sorry.
geom_density(na.rm = T, fill = "darkgreen", alpha = 0.8) +
theme_minimal()
p
data.pc <- data.pc %>% ungroup()
sum(data.pc[,42][data.pc[,42] == 1], na.rm = T)
>>>>>>> origin/master
#### TO DO #####
# - Build distance algorithm
# - within parties
# - within storkreds
# - within lokalkreds
#
# - Match valgkredsdata wwith
# - latitude, or
# - median income
#
# - Fix
# - scales in facet wrapped plots: the horizontal axis is different for each plot
#
#### TRASH #####
<<<<<<< HEAD
## Variance in responses
resp.var <- data[,17:31] %>%
var() %>%
diag() %>%
sqrt() %>%
t()
rownames(resp.var) <- "Standard Deviation"
#Explanation
# http://www.altinget.dk/kandidater/ft15/information.aspx#.VmNPf7xlmRs
# Testens algoritme virker s?dan, at der gives point p? baggrund af forskellen mellem en kandidat
=======
## Variance in responses
resp.var <- data[,17:31] %>%
var() %>%
diag() %>%
sqrt() %>%
t()
rownames(resp.var) <- "Standard Deviation"
#Explanation
# http://www.altinget.dk/kandidater/ft15/information.aspx#.VmNPf7xlmRs
# Testens algoritme virker s?dan, at der gives point p? baggrund af forskellen mellem en kandidat
>>>>>>> origin/master
# og en brugers besvarelse. Et ens svar giver 4 point (f.eks. helt enig og helt enig), et trin ved
# siden af giver 3 point (f.eks. helt uenig og delvist uenig). Man f?r 0 point for svar i hver sin
# ende i skalaen (f.eks. helt enig og helt uenig). Hvert sp?rgsm?l har en 1/20 v?gt, og antallet af
# point bliver summeret til den endelig procentsats.
|
# Started 4/21/2021
# Initial Visualizations for Campus Weather Data
library(dplyr)
library(ggplot2)
# Reading in latest versions of data
# CHANGE THE VERSION IN THE USER INPUTS ALL FILE TO MOST RECENT IN THE FOLDER
UserInputsAll <- read.csv(paste0(DirFinal[user], "/UserInputsAllv6.csv"), colClasses = c("NULL", rep(NA,7)))
MeterData <- read.csv(paste0(DirFinal[user], "/MeterData", UserInputsAll[nrow(UserInputsAll), 1], ".csv"), colClasses = c("NULL", rep(NA, 31)))
MeterUnits <- read.csv(paste0(DirFinal[user], "/MeterUnits", UserInputsAll[nrow(UserInputsAll), 1], ".csv"))
NAcount <- read.csv(paste0(DirFinal[user], "/NACount", UserInputsAll[nrow(UserInputsAll), 1], ".csv"), colClasses = c("NULL", rep(NA,5)))
NAcount$Date <- as.Date(NAcount$Date)
TomstSData <- read.csv(paste0(DirFinal[user], "/TomstSData", UserInputsAll[nrow(UserInputsAll), 1], ".csv"), colClasses = c("NULL", rep(NA, 15)))
Tomst5mData <- read.csv(paste0(DirFinal[user], "/Tomst5mData", UserInputsAll[nrow(UserInputsAll), 1], ".csv"), colClasses = c("NULL", rep(NA, 12)))
Tomst25mData <- read.csv(paste0(DirFinal[user], "/Tomst25mData", UserInputsAll[nrow(UserInputsAll), 1], ".csv"), colClasses = c("NULL", rep(NA, 12)))
# METER 2021
MeterData21 <- MeterData[MeterData$Year == 2021, ]
ggplot(MeterData21, aes(x = DecYear, y = AirTemp))+
geom_line(col = "Firebrick4")+
labs(title = "Air Temperature in 2021", subtitle = "Data from METER Sensor",
y = "Temperature (˚C)", x = "Decimal Year")+
theme_classic()
# TOMST 2021
Tomst5m21 <- Tomst5mData[Tomst5mData$Year == 2021, ]
colnames(Tomst5m21) <- c("Date", "TZ", "AirTemp","Shake", "Error","TempFlag",
"Date_Format", "DOY","Year", "Hour", "Minute", "DecYear")
ggplot(Tomst5m21, aes(x = DecYear, y = AirTemp))+
geom_line(col = "Deepskyblue3")+
labs(title = "Air Temperature in 2021", subtitle = "Data from TOMST Sensor",
y = "Temperature (˚C)", x = "Decimal Year")+
theme_classic()
# Both on same plot
plot(MeterData21$DecYear, MeterData21$AirTemp,
type = "l",
lwd = 2,
col = "tomato3",
xlab = "Decimal Year",
ylab = "Temperature (Celsius)",
main = "Air Temperature in Clinton, NY in 2021",
sub = "Data collected from Hamilton College weather station")
lines(Tomst5m21$DecYear, Tomst5m21$AirTemp,
lwd = 2,
col = alpha("skyblue", 0.5))
legend("topleft", c("METER Data", "TOMST Data"), col = c("tomato3","skyblue"), lwd = 2, bty="n")
# Looking at weird METER data behavior
MeterDataSub <- MeterData[MeterData$DecYear>2021.16 & MeterData$DecYear<2021.19, ]
TomstDataSub <- Tomst5mData[Tomst5mData$DecYear>2021.16 & Tomst5mData$DecYear<2021.19, ]
# Both on same plot
plot(MeterDataSub$DecYear, MeterDataSub$AirTemp,
type = "l",
lwd = 2,
col = "tomato3",
xlab = "Decimal Year",
ylab = "Temperature (Celsius)",
main = "Air Temperature in Clinton, NY in 2021",
sub = "Data collected from Hamilton College weather station")
lines(TomstDataSub$DecYear, TomstDataSub$Temp1,
lwd = 2,
col = alpha("skyblue", 0.5))
legend("topleft", c("METER Data", "TOMST Data"), col = c("tomato3","skyblue"), lwd = 2, bty="n")
# Plot of solar radiation
plot(MeterData21$DecYear, MeterData21$SolRad,
type = "l",
lwd = 2,
col = "tomato3",
xlab = "Decimal Year",
ylab = "Solar Radiation (W/m^2)",
main = "Solar Radiation in Clinton, NY in 2021",
sub = "Data collected from Hamilton College weather station")
# ggplot version
ggplot(MeterData21, aes(x = DecYear, y = SolRad))+
geom_line(col = "Deepskyblue3")+
labs(title = "Solar Radiation in 2021", subtitle = "Data from METER Sensor",
y = "Solar Radiation (W/m^2)", x = "Decimal Year")+
theme_classic()
# Plot of precipitation
plot(MeterData21$DecYear, MeterData21$Precip,
type = "h",
lwd = 3,
col = "tomato3",
xlab = "Decimal Year",
ylab = "Solar Radiation (mm)",
main = "Solar Radiation in Clinton, NY in 2021",
sub = "Data collected from Hamilton College weather station")
ggplot(MeterData21, aes(x = DOY, y = Precip))+
geom_col(col = "Deepskyblue3", fill = "Deepskyblue3")+
labs(title = "Precipitation in 2021", subtitle = "Data from METER Sensor",
y = "Precipitation (mm)", x = "Day of Year")+
theme_classic()
| /Analysis.R | no_license | rachelpikeee/Campus_Weather | R | false | false | 4,322 | r | # Started 4/21/2021
# Initial Visualizations for Campus Weather Data
library(dplyr)
library(ggplot2)
# Reading in latest versions of data
# CHANGE THE VERSION IN THE USER INPUTS ALL FILE TO MOST RECENT IN THE FOLDER
UserInputsAll <- read.csv(paste0(DirFinal[user], "/UserInputsAllv6.csv"), colClasses = c("NULL", rep(NA,7)))
MeterData <- read.csv(paste0(DirFinal[user], "/MeterData", UserInputsAll[nrow(UserInputsAll), 1], ".csv"), colClasses = c("NULL", rep(NA, 31)))
MeterUnits <- read.csv(paste0(DirFinal[user], "/MeterUnits", UserInputsAll[nrow(UserInputsAll), 1], ".csv"))
NAcount <- read.csv(paste0(DirFinal[user], "/NACount", UserInputsAll[nrow(UserInputsAll), 1], ".csv"), colClasses = c("NULL", rep(NA,5)))
NAcount$Date <- as.Date(NAcount$Date)
TomstSData <- read.csv(paste0(DirFinal[user], "/TomstSData", UserInputsAll[nrow(UserInputsAll), 1], ".csv"), colClasses = c("NULL", rep(NA, 15)))
Tomst5mData <- read.csv(paste0(DirFinal[user], "/Tomst5mData", UserInputsAll[nrow(UserInputsAll), 1], ".csv"), colClasses = c("NULL", rep(NA, 12)))
Tomst25mData <- read.csv(paste0(DirFinal[user], "/Tomst25mData", UserInputsAll[nrow(UserInputsAll), 1], ".csv"), colClasses = c("NULL", rep(NA, 12)))
# METER 2021
MeterData21 <- MeterData[MeterData$Year == 2021, ]
ggplot(MeterData21, aes(x = DecYear, y = AirTemp))+
geom_line(col = "Firebrick4")+
labs(title = "Air Temperature in 2021", subtitle = "Data from METER Sensor",
y = "Temperature (˚C)", x = "Decimal Year")+
theme_classic()
# TOMST 2021
Tomst5m21 <- Tomst5mData[Tomst5mData$Year == 2021, ]
colnames(Tomst5m21) <- c("Date", "TZ", "AirTemp","Shake", "Error","TempFlag",
"Date_Format", "DOY","Year", "Hour", "Minute", "DecYear")
ggplot(Tomst5m21, aes(x = DecYear, y = AirTemp))+
geom_line(col = "Deepskyblue3")+
labs(title = "Air Temperature in 2021", subtitle = "Data from TOMST Sensor",
y = "Temperature (˚C)", x = "Decimal Year")+
theme_classic()
# Both on same plot
plot(MeterData21$DecYear, MeterData21$AirTemp,
type = "l",
lwd = 2,
col = "tomato3",
xlab = "Decimal Year",
ylab = "Temperature (Celsius)",
main = "Air Temperature in Clinton, NY in 2021",
sub = "Data collected from Hamilton College weather station")
lines(Tomst5m21$DecYear, Tomst5m21$AirTemp,
lwd = 2,
col = alpha("skyblue", 0.5))
legend("topleft", c("METER Data", "TOMST Data"), col = c("tomato3","skyblue"), lwd = 2, bty="n")
# Looking at weird METER data behavior
MeterDataSub <- MeterData[MeterData$DecYear>2021.16 & MeterData$DecYear<2021.19, ]
TomstDataSub <- Tomst5mData[Tomst5mData$DecYear>2021.16 & Tomst5mData$DecYear<2021.19, ]
# Both on same plot
plot(MeterDataSub$DecYear, MeterDataSub$AirTemp,
type = "l",
lwd = 2,
col = "tomato3",
xlab = "Decimal Year",
ylab = "Temperature (Celsius)",
main = "Air Temperature in Clinton, NY in 2021",
sub = "Data collected from Hamilton College weather station")
lines(TomstDataSub$DecYear, TomstDataSub$Temp1,
lwd = 2,
col = alpha("skyblue", 0.5))
legend("topleft", c("METER Data", "TOMST Data"), col = c("tomato3","skyblue"), lwd = 2, bty="n")
# Plot of solar radiation
plot(MeterData21$DecYear, MeterData21$SolRad,
type = "l",
lwd = 2,
col = "tomato3",
xlab = "Decimal Year",
ylab = "Solar Radiation (W/m^2)",
main = "Solar Radiation in Clinton, NY in 2021",
sub = "Data collected from Hamilton College weather station")
# ggplot version
ggplot(MeterData21, aes(x = DecYear, y = SolRad))+
geom_line(col = "Deepskyblue3")+
labs(title = "Solar Radiation in 2021", subtitle = "Data from METER Sensor",
y = "Solar Radiation (W/m^2)", x = "Decimal Year")+
theme_classic()
# Plot of precipitation
plot(MeterData21$DecYear, MeterData21$Precip,
type = "h",
lwd = 3,
col = "tomato3",
xlab = "Decimal Year",
ylab = "Solar Radiation (mm)",
main = "Solar Radiation in Clinton, NY in 2021",
sub = "Data collected from Hamilton College weather station")
ggplot(MeterData21, aes(x = DOY, y = Precip))+
geom_col(col = "Deepskyblue3", fill = "Deepskyblue3")+
labs(title = "Precipitation in 2021", subtitle = "Data from METER Sensor",
y = "Precipitation (mm)", x = "Day of Year")+
theme_classic()
|
% TODO File path/AT.beam.par.technical.to.physical.Rd
\name{AT.beam.par.technical.to.physical}
\alias{AT.beam.par.technical.to.physical}
\title{AT.beam.par.technical.to.physical}
\description{Converts technical, accelerator parameters of a symmetric, double
lateral Gaussian shape beam, i.e.
total number of particles and FWHM to
physical beam parameters, i.e.
central (=peak) fluence and width (= 1 standard deviation)
}
\usage{AT.beam.par.technical.to.physical(N, FWHM.mm)
}
\arguments{
\item{N}{ absolute particle numbers (array of size n).}
\item{FWHM.mm}{ FWHMs (in mm) (array of size n).}
}
\value{
% TODO proper return definition of lists!!! ADD
% NUMBER_OF_FIELD_COMPONENT_DESCRIBTION AGAIN!!!)
\item{fluence.cm2}{ resulting fluence in beam center (array of size n)}
\item{sigma.cm}{ resulting beam width stdev (array of size n)}
}
\seealso{
View the C source code here:
\url{http://sourceforge.net/apps/trac/libamtrack/browser/tags/0.6.3/src/AT_Phy
sicsRoutines.c#L443}
}
\examples{
# Get peak dose of a 142.66 MeV protons in Alox
# from technical beam parameters
peak.fluence.cm2 <- AT.beam.par.technical.to.physical( N = 3.2e8,
FWHM.mm = 15.2)[1]
AT.dose.Gy.from.fluence.cm2( E.MeV.u = 142.66,
particle.no =
AT.particle.no.from.particle.name("1H"),
material.no =
AT.material.no.from.material.name("Aluminum Oxide"),
fluence.cm2 = peak.fluence.cm2,
stopping.power.source.no = 2)
}
| /man/AT.beam.par.technical.to.physical.Rd | no_license | cran/libamtrack | R | false | false | 1,591 | rd | % TODO File path/AT.beam.par.technical.to.physical.Rd
\name{AT.beam.par.technical.to.physical}
\alias{AT.beam.par.technical.to.physical}
\title{AT.beam.par.technical.to.physical}
\description{Converts technical, accelerator parameters of a symmetric, double
lateral Gaussian shape beam, i.e.
total number of particles and FWHM to
physical beam parameters, i.e.
central (=peak) fluence and width (= 1 standard deviation)
}
\usage{AT.beam.par.technical.to.physical(N, FWHM.mm)
}
\arguments{
\item{N}{ absolute particle numbers (array of size n).}
\item{FWHM.mm}{ FWHMs (in mm) (array of size n).}
}
\value{
% TODO proper return definition of lists!!! ADD
% NUMBER_OF_FIELD_COMPONENT_DESCRIBTION AGAIN!!!)
\item{fluence.cm2}{ resulting fluence in beam center (array of size n)}
\item{sigma.cm}{ resulting beam width stdev (array of size n)}
}
\seealso{
View the C source code here:
\url{http://sourceforge.net/apps/trac/libamtrack/browser/tags/0.6.3/src/AT_Phy
sicsRoutines.c#L443}
}
\examples{
# Get peak dose of a 142.66 MeV protons in Alox
# from technical beam parameters
peak.fluence.cm2 <- AT.beam.par.technical.to.physical( N = 3.2e8,
FWHM.mm = 15.2)[1]
AT.dose.Gy.from.fluence.cm2( E.MeV.u = 142.66,
particle.no =
AT.particle.no.from.particle.name("1H"),
material.no =
AT.material.no.from.material.name("Aluminum Oxide"),
fluence.cm2 = peak.fluence.cm2,
stopping.power.source.no = 2)
}
|
# 04.03.18
# @author Christoph Schmidt <schmidtchristoph@@users.noreply.github.com>
library(testthat)
context("deleteField")
test_that("correct lines are deleted - pt. 1", {
filePath <- system.file("testdata/test.bib", package = "bibDelete")
r <- deleteField(filePath, "annote", verbose = TRUE)
expect_equal(r$linesDel, 35)
filePath2 <- system.file("testdata/test_pr.bib", package = "bibDelete")
f <- readLines(filePath2)
expect_true( stringr::str_sub(f[34], -1L, -1L)!="," )
r <- deleteField(filePath, "month", verbose = TRUE)
expect_equal(r$linesDel, c(12, 23, 34))
filePath2 <- system.file("testdata/test_pr.bib", package = "bibDelete")
f <- readLines(filePath2)
expect_true( stringr::str_sub(f[11], -1L, -1L)!="," )
expect_true( stringr::str_sub(f[21], -1L, -1L)!="," )
expect_true( stringr::str_sub(f[31], -1L, -1L)=="," )
expect_true( stringr::str_sub(f[32], -1L, -1L)!="," )
r <- deleteField(filePath, "month", verbose = TRUE) # month + annote field removed
filePath2 <- system.file("testdata/test_pr.bib", package = "bibDelete")
r <- deleteField(filePath2, "annote", verbose = TRUE)
expect_equal(r$linesDel, c(32))
filePath3 <- system.file("testdata/test_pr_pr.bib", package = "bibDelete") # month + annote field removed
f <- readLines(filePath3)
expect_true( stringr::str_sub(f[11], -1L, -1L)!="," )
expect_true( stringr::str_sub(f[21], -1L, -1L)!="," )
expect_true( stringr::str_sub(f[31], -1L, -1L)!="," )
r <- deleteField(filePath, "annote", verbose = TRUE) # annote + month field removed
filePath2 <- system.file("testdata/test_pr.bib", package = "bibDelete")
r <- deleteField(filePath2, "month", verbose = TRUE)
expect_equal(r$linesDel, c(12, 23, 34))
filePath3 <- system.file("testdata/test_pr_pr.bib", package = "bibDelete") # annote + month field removed
f <- readLines(filePath3)
expect_true( stringr::str_sub(f[11], -1L, -1L)!="," )
expect_true( stringr::str_sub(f[21], -1L, -1L)!="," )
expect_true( stringr::str_sub(f[31], -1L, -1L)!="," )
file.remove(filePath2, filePath3)
})
test_that("correct lines are deleted - pt. 2", {
filePath <- system.file("testdata/test2.bib", package = "bibDelete")
r <- deleteField(filePath, "annote", verbose = TRUE)
expect_equal(r$linesDel, c(9, 10, 11, 22, 23, 24, 25, 26, 27, 28))
filePath2 <- system.file("testdata/test2_pr.bib", package = "bibDelete")
f <- readLines(filePath2)
expect_true( stringr::str_sub(f[8], -1L, -1L)!="," )
expect_true( stringr::str_sub(f[18], -1L, -1L)!="," )
r <- deleteField(filePath, "month", verbose = TRUE)
expect_equal(r$linesDel, c(8, 21))
filePath2 <- system.file("testdata/test2_pr.bib", package = "bibDelete")
f <- readLines(filePath2)
expect_true( stringr::str_sub(f[7], -1L, -1L)=="," )
expect_true( stringr::str_sub(f[10], -1L, -1L)!="," )
expect_true( stringr::str_sub(f[19], -1L, -1L)=="," )
expect_true( stringr::str_sub(f[26], -1L, -1L)!="," )
r <- deleteField(filePath, "month", verbose = TRUE) # month + annote field removed
filePath2 <- system.file("testdata/test2_pr.bib", package = "bibDelete")
r <- deleteField(filePath2, "annote", verbose = TRUE)
expect_equal(r$linesDel, c(8, 9, 10, 20, 21, 22, 23, 24, 25, 26))
filePath3 <- system.file("testdata/test2_pr_pr.bib", package = "bibDelete") # month + annote field removed
f <- readLines(filePath3)
expect_true( stringr::str_sub(f[6], -1L, -1L)=="," )
expect_true( stringr::str_sub(f[7], -1L, -1L)!="," )
expect_true( stringr::str_sub(f[15], -1L, -1L)=="," )
expect_true( stringr::str_sub(f[16], -1L, -1L)!="," )
r <- deleteField(filePath, "annote", verbose = TRUE) # annote + month field removed
filePath2 <- system.file("testdata/test2_pr.bib", package = "bibDelete")
r <- deleteField(filePath2, "month", verbose = TRUE)
expect_equal(r$linesDel, c(8, 18))
filePath3 <- system.file("testdata/test2_pr_pr.bib", package = "bibDelete") # annote + month field removed
f <- readLines(filePath3)
expect_true( stringr::str_sub(f[6], -1L, -1L)=="," )
expect_true( stringr::str_sub(f[7], -1L, -1L)!="," )
expect_true( stringr::str_sub(f[15], -1L, -1L)=="," )
expect_true( stringr::str_sub(f[16], -1L, -1L)!="," )
file.remove(filePath2, filePath3)
})
test_that("correct lines are deleted - pt. 3", {
filePath <- system.file("testdata/test3.bib", package = "bibDelete")
file.copy(filePath, "test3.bib")
r <- deleteField("test3.bib", "annote", verbose = TRUE)
expect_equal(r$linesDel, c(11, 12, 13, 14, 15, 16, 17))
f <- readLines("test3_pr.bib")
expect_true( stringr::str_sub(f[10], -1L, -1L)=="," )
expect_true( stringr::str_sub(f[11], -1L, -1L)=="," )
expect_true( stringr::str_sub(f[12], -1L, -1L)=="," )
expect_true( stringr::str_sub(f[13], -1L, -1L)=="," )
expect_true( stringr::str_sub(f[14], -1L, -1L)=="," ) # test3.bib is not standard conform: normally there should be just a single delimiter "}"
r <- deleteField("test3.bib", "month", verbose = TRUE)
expect_equal(r$linesDel, c(8, 9, 10, 19, 20, 21))
f <- readLines("test3_pr.bib")
expect_true( stringr::str_sub(f[7], -1L, -1L)=="," )
expect_true( stringr::str_sub(f[13], -1L, -1L)=="," )
expect_true( stringr::str_sub(f[15], -1L, -1L)!="," )
r <- deleteField("test3.bib", "month", verbose = TRUE) # month + annote field removed
r <- deleteField("test3_pr.bib", "annote", verbose = TRUE)
expect_equal(r$linesDel, c(8, 9, 10, 11, 12, 13, 14))
f <- readLines("test3_pr_pr.bib") # month + annote field removed
expect_true( stringr::str_sub(f[7], -1L, -1L)=="," )
expect_true( stringr::str_sub(f[8], -1L, -1L)!="," )
r <- deleteField("test3.bib", "annote", verbose = TRUE) # annote + month field removed
expect_equal(r$linesDel, 11:17)
r2 <- deleteField("test3_pr.bib", "month", verbose = TRUE)
expect_equal(r2$linesDel, c(8, 9, 10, 11, 12, 13, 14))
f <- readLines("test3_pr_pr.bib") # annote + month field removed
expect_true( stringr::str_sub(f[1], -1L, -1L)=="," )
expect_true( stringr::str_sub(f[2], -1L, -1L)=="," )
expect_true( stringr::str_sub(f[3], -1L, -1L)=="," )
expect_true( stringr::str_sub(f[4], -1L, -1L)=="," )
expect_true( stringr::str_sub(f[5], -1L, -1L)=="," )
expect_true( stringr::str_sub(f[6], -1L, -1L)=="," )
expect_true( stringr::str_sub(f[7], -1L, -1L)=="," ) # a very rare bug in 'removeCommaOnLineBeforeSearchedField()', only caused by severely non-standard bib entries (i.e. two "month" fields running over multiple lines each, separated by a non-standard "test" field type); this bug can most likely only be fixed in a general way be running over entire file, line by line and finding start and end of each bib entry; then checking the last fields ending of each bib entry for a remaining comma
expect_true( stringr::str_sub(f[8], -1L, -1L)=="}" )
r <- deleteField("test3.bib", "annote", verbose = TRUE) # annote + month field removed + taking care of custom field type definition
expect_equal(r$linesDel, 11:17)
r2 <- deleteField("test3_pr.bib", "month", verbose = TRUE, addCustomField = "test")
expect_equal(r2$linesDel, c(8, 9, 10, 12, 13, 14))
f <- readLines("test3_pr_pr.bib") # annote + month field removed
expect_true( stringr::str_sub(f[1], -1L, -1L)=="," )
expect_true( stringr::str_sub(f[2], -1L, -1L)=="," )
expect_true( stringr::str_sub(f[3], -1L, -1L)=="," )
expect_true( stringr::str_sub(f[4], -1L, -1L)=="," )
expect_true( stringr::str_sub(f[5], -1L, -1L)=="," )
expect_true( stringr::str_sub(f[6], -1L, -1L)=="," )
expect_true( stringr::str_sub(f[7], -1L, -1L)=="," )
expect_true( stringr::str_sub(f[8], -1L, -1L)!="," )
expect_true( stringr::str_sub(f[9], -1L, -1L)=="}" )
r <- deleteField("test3.bib", "journal", verbose = TRUE)
expect_equal(r$linesDel, 4)
f <- readLines("test3_pr.bib")
expect_true( stringr::str_sub(f[3], -1L, -1L)=="," )
expect_true( stringr::str_sub(f[4], -1L, -1L)=="," )
expect_true( stringr::str_sub(f[5], -1L, -1L)=="," )
expect_true( stringr::str_sub(f[20], -1L, -1L)=="," ) # should not be the case, but 'test3.bib' is not standard conform and the last field type ('month') wasn't requested to be deleted
file.remove("test3.bib", "test3_pr.bib", "test3_pr_pr.bib")
})
test_that("correct output file is generated", {
filePath <- system.file("testdata/test.bib", package = "bibDelete")
file.copy(filePath, "test.bib")
deleteField("test.bib", "annote")
f <- readLines("test_pr.bib")
f_expect <- c("%% Created using Papers on Thu, 11 Aug 2016.", "%% http://papersapp.com/papers/",
"", "@article{Estrada:2010ka,", "author = {Estrada, Ernesto},",
"title = {{Quantifying network heterogeneity}},", "journal = {Physical Review E},",
"year = {2010},", "volume = {82},", "number = {6},", "pages = {066102},",
"month = dec", "}", "", "@article{Freeman:1977kx,", "author = {Freeman, Linton C},",
"title = {{A set of measures of centrality based on betweenness}},",
"journal = {Sociometry},", "year = {1977},", "volume = {40},",
"number = {1},", "pages = {35},", "month = mar", "}", "", "@article{Krzywinski:2012jj,",
"author = {Krzywinski, Martin and Birol, Inanc and Jones, Steven J M and Marra, Marco A},",
"title = {{Hive plots-rational approach to visualizing networks}},",
"journal = {Briefings in Bioinformatics},", "year = {2012},",
"volume = {13},", "number = {5},", "pages = {627--644},", "month = sep",
"}")
expect_equal(f, f_expect)
filePath <- system.file("testdata/test.bib", package = "bibDelete")
file.copy(filePath, "test.bib")
deleteField("test.bib", "annote")
deleteField("test_pr.bib", "month")
deleteField("test_pr_pr.bib", "author")
f <- readLines("test_pr_pr_pr.bib")
f_expect <- c("%% Created using Papers on Thu, 11 Aug 2016.", "%% http://papersapp.com/papers/",
"", "@article{Estrada:2010ka,", "title = {{Quantifying network heterogeneity}},",
"journal = {Physical Review E},", "year = {2010},", "volume = {82},",
"number = {6},", "pages = {066102}", "}", "", "@article{Freeman:1977kx,",
"title = {{A set of measures of centrality based on betweenness}},",
"journal = {Sociometry},", "year = {1977},", "volume = {40},",
"number = {1},", "pages = {35}", "}", "", "@article{Krzywinski:2012jj,",
"title = {{Hive plots-rational approach to visualizing networks}},",
"journal = {Briefings in Bioinformatics},", "year = {2012},",
"volume = {13},", "number = {5},", "pages = {627--644}", "}")
expect_equal(f, f_expect)
file.remove(c("test.bib", "test_pr.bib", "test_pr_pr.bib", "test_pr_pr_pr.bib"))
filePath <- system.file("testdata/test.bib", package = "bibDelete")
file.copy(filePath, "test.bib")
deleteField("test.bib", "month")
f <- readLines("test_pr.bib")
f_expect <- c("%% Created using Papers on Thu, 11 Aug 2016.", "%% http://papersapp.com/papers/",
"", "@article{Estrada:2010ka,", "author = {Estrada, Ernesto},",
"title = {{Quantifying network heterogeneity}},", "journal = {Physical Review E},",
"year = {2010},", "volume = {82},", "number = {6},", "pages = {066102}",
"}", "", "@article{Freeman:1977kx,", "author = {Freeman, Linton C},",
"title = {{A set of measures of centrality based on betweenness}},",
"journal = {Sociometry},", "year = {1977},", "volume = {40},",
"number = {1},", "pages = {35}", "}", "", "@article{Krzywinski:2012jj,",
"author = {Krzywinski, Martin and Birol, Inanc and Jones, Steven J M and Marra, Marco A},",
"title = {{Hive plots-rational approach to visualizing networks}},",
"journal = {Briefings in Bioinformatics},", "year = {2012},",
"volume = {13},", "number = {5},", "pages = {627--644},", "annote = {{\\#} hive plots provide visual signatures of large networks}",
"}")
expect_equal(f, f_expect)
filePath <- system.file("testdata/test.bib", package = "bibDelete")
file.copy(filePath, "test.bib")
deleteField("test.bib", "title")
f <- readLines("test_pr.bib")
f_expect <- c("%% Created using Papers on Thu, 11 Aug 2016.", "%% http://papersapp.com/papers/",
"", "@article{Estrada:2010ka,", "author = {Estrada, Ernesto},",
"journal = {Physical Review E},", "year = {2010},", "volume = {82},",
"number = {6},", "pages = {066102},", "month = dec", "}", "",
"@article{Freeman:1977kx,", "author = {Freeman, Linton C},",
"journal = {Sociometry},", "year = {1977},", "volume = {40},",
"number = {1},", "pages = {35},", "month = mar", "}", "", "@article{Krzywinski:2012jj,",
"author = {Krzywinski, Martin and Birol, Inanc and Jones, Steven J M and Marra, Marco A},",
"journal = {Briefings in Bioinformatics},", "year = {2012},",
"volume = {13},", "number = {5},", "pages = {627--644},", "month = sep,",
"annote = {{\\#} hive plots provide visual signatures of large networks}",
"}")
expect_equal(f, f_expect)
file.remove(c("test.bib", "test_pr.bib"))
})
test_that("correct output file is generated--pt2", {
filePath <- system.file("testdata/test2.bib", package = "bibDelete")
file.copy(filePath, "test2.bib")
deleteField("test2.bib", "annote")
f <- readLines("test2_pr.bib")
f_expect <- c("@article{Lancichinetti:2012kx,", "author = {Lancichinetti, Andrea and Fortunato, Santo},",
"title = {{Consensus clustering in complex networks}},", "journal = {Scientific Reports},",
"year = {2012},", "volume = {2},", "pages = {336},", "month = mar",
"}", "", "@article{Peel:2014ul,", "author = {Peel, Leto and Clauset, Aaron},",
"title = {{Detecting change points in the large-scale structure of evolving networks}},",
"journal = {arXiv},", "year = {2014},", "eprint = {1403.0989},",
"eprinttype = {arxiv},", "month = mar", "}")
expect_equal(f, f_expect)
filePath <- system.file("testdata/test2.bib", package = "bibDelete")
file.copy(filePath, "test2.bib")
deleteField("test2.bib", "annote")
deleteField("test2_pr.bib", "month")
deleteField("test2_pr_pr.bib", "author")
f <- readLines("test2_pr_pr_pr.bib")
f_expect <- c("@article{Lancichinetti:2012kx,", "title = {{Consensus clustering in complex networks}},",
"journal = {Scientific Reports},", "year = {2012},", "volume = {2},",
"pages = {336}", "}", "", "@article{Peel:2014ul,", "title = {{Detecting change points in the large-scale structure of evolving networks}},",
"journal = {arXiv},", "year = {2014},", "eprint = {1403.0989},",
"eprinttype = {arxiv}", "}")
expect_equal(f, f_expect)
file.remove(c("test2.bib", "test2_pr.bib", "test2_pr_pr.bib", "test2_pr_pr_pr.bib"))
filePath <- system.file("testdata/test2.bib", package = "bibDelete")
file.copy(filePath, "test2.bib")
deleteField("test2.bib", "month")
f <- readLines("test2_pr.bib")
f_expect <- c("@article{Lancichinetti:2012kx,", "author = {Lancichinetti, Andrea and Fortunato, Santo},",
"title = {{Consensus clustering in complex networks}},", "journal = {Scientific Reports},",
"year = {2012},", "volume = {2},", "pages = {336},", "annote = {{\\#} module detection algorithms might be dependent on random seeds",
"", "{\\#} nr of runs r = nr of partitions used for the consensus matrix}",
"}", "", "@article{Peel:2014ul,", "author = {Peel, Leto and Clauset, Aaron},",
"title = {{Detecting change points in the large-scale structure of evolving networks}},",
"journal = {arXiv},", "year = {2014},", "eprint = {1403.0989},",
"eprinttype = {arxiv},", "annote = {{\\#} see conference proceeding papers3://publication/uuid/739AD14E-73B1-4A87-8A5B-6DBD847D0F47",
"", "{\\#} corresponding Python code at http://gdriv.es/letopeel/code.html",
"", "{\\#} we found that changes associated with two communities merging or with one of several communities losing its internal connections ({\\textquotedblleft}fragmentation{\\textquotedblright}) were more difficult to accurately detect than those associated with one community splitting in two or with many singletons connecting to form a new community ({\\textquotedblleft}formation{\\textquotedblright})",
"", "{\\#} change-point methods based on network measures like the mean degree, clustering coeffi- cient, or mean geodesic path length performed poorly, yielding high false negative rates even for large structural changes}",
"}")
expect_equal(f, f_expect)
filePath <- system.file("testdata/test2.bib", package = "bibDelete")
file.copy(filePath, "test2.bib")
deleteField("test2.bib", "title")
f <- readLines("test2_pr.bib")
f_expect <- c("@article{Lancichinetti:2012kx,", "author = {Lancichinetti, Andrea and Fortunato, Santo},",
"journal = {Scientific Reports},", "year = {2012},", "volume = {2},",
"pages = {336},", "month = mar,", "annote = {{\\#} module detection algorithms might be dependent on random seeds",
"", "{\\#} nr of runs r = nr of partitions used for the consensus matrix}",
"}", "", "@article{Peel:2014ul,", "author = {Peel, Leto and Clauset, Aaron},",
"journal = {arXiv},", "year = {2014},", "eprint = {1403.0989},",
"eprinttype = {arxiv},", "month = mar,", "annote = {{\\#} see conference proceeding papers3://publication/uuid/739AD14E-73B1-4A87-8A5B-6DBD847D0F47",
"", "{\\#} corresponding Python code at http://gdriv.es/letopeel/code.html",
"", "{\\#} we found that changes associated with two communities merging or with one of several communities losing its internal connections ({\\textquotedblleft}fragmentation{\\textquotedblright}) were more difficult to accurately detect than those associated with one community splitting in two or with many singletons connecting to form a new community ({\\textquotedblleft}formation{\\textquotedblright})",
"", "{\\#} change-point methods based on network measures like the mean degree, clustering coeffi- cient, or mean geodesic path length performed poorly, yielding high false negative rates even for large structural changes}",
"}")
expect_equal(f, f_expect)
file.remove(c("test2.bib", "test2_pr.bib"))
})
test_that("correct output file is generated--pt3", {
filePath <- system.file("testdata/test3.bib", package = "bibDelete")
file.copy(filePath, "test3.bib", overwrite = TRUE)
deleteField("test3.bib", "annote")
f <- readLines("test3_pr.bib")
f_expect <- c("@article{Peel:2014ul,", "author = {Peel, Leto and Clauset, Aaron},",
"title = {{Detecting change points in the large-scale structure of evolving networks}},",
"journal = {arXiv},", "year = {2014},", "eprint = {1403.0989},",
"eprinttype = {arxiv},", "month = mar", "dec", "nov,", "test = {testfield},",
"month = mar,", "dec,", "nov,", "}")
expect_equal(f, f_expect)
filePath <- system.file("testdata/test3.bib", package = "bibDelete")
file.copy(filePath, "test3.bib", overwrite = TRUE)
deleteField("test3.bib", "annote", addCustomField = "test")
deleteField("test3_pr.bib", "month", addCustomField = "test")
deleteField("test3_pr_pr.bib", "author", addCustomField = "test")
f <- readLines("test3_pr_pr_pr.bib")
f_expect <- c("@article{Peel:2014ul,", "title = {{Detecting change points in the large-scale structure of evolving networks}},",
"journal = {arXiv},", "year = {2014},", "eprint = {1403.0989},",
"eprinttype = {arxiv},", "test = {testfield}", "}")
expect_equal(f, f_expect)
file.remove(c("test3.bib", "test3_pr.bib", "test3_pr_pr.bib", "test3_pr_pr_pr.bib"))
filePath <- system.file("testdata/test3.bib", package = "bibDelete")
file.copy(filePath, "test3.bib", overwrite = TRUE)
deleteField("test3.bib", "month")
f <- readLines("test3_pr.bib")
f_expect <- c("@article{Peel:2014ul,", "author = {Peel, Leto and Clauset, Aaron},",
"title = {{Detecting change points in the large-scale structure of evolving networks}},",
"journal = {arXiv},", "year = {2014},", "eprint = {1403.0989},",
"eprinttype = {arxiv},", "annote = {{\\#} see conference proceeding papers3://publication/uuid/739AD14E-73B1-4A87-8A5B-6DBD847D0F47",
"here, it goes on,", "", "{\\#} corresponding Python code at http://gdriv.es/letopeel/code.html",
"", "{\\#} change-point methods based on network measures like the mean degree, clustering coeffi- cient, or mean geodesic path length performed poorly, yielding high false negative rates even for large structural changes},",
"", "test = {testfield}", "}")
expect_equal(f, f_expect)
filePath <- system.file("testdata/test3.bib", package = "bibDelete")
file.copy(filePath, "test3.bib", overwrite = TRUE)
deleteField("test3.bib", "title")
f <- readLines("test3_pr.bib")
f_expect <- c("@article{Peel:2014ul,", "author = {Peel, Leto and Clauset, Aaron},",
"journal = {arXiv},", "year = {2014},", "eprint = {1403.0989},",
"eprinttype = {arxiv},", "month = mar", "dec", "nov,", "annote = {{\\#} see conference proceeding papers3://publication/uuid/739AD14E-73B1-4A87-8A5B-6DBD847D0F47",
"here, it goes on,", "", "{\\#} corresponding Python code at http://gdriv.es/letopeel/code.html",
"", "{\\#} change-point methods based on network measures like the mean degree, clustering coeffi- cient, or mean geodesic path length performed poorly, yielding high false negative rates even for large structural changes},",
"", "test = {testfield},", "month = mar,", "dec,", "nov,", "}")
expect_equal(f, f_expect)
file.remove(c("test3.bib", "test3_pr.bib"))
})
test_that("correct output file is generated--pt4", {
filePath <- system.file("testdata/test4.bib", package = "bibDelete")
file.copy(filePath, "test4.bib", overwrite = TRUE)
deleteField("test4.bib", "annote")
f <- readLines("test4_pr.bib")
f_expect <- c("@article{Nuzzo:2014bp,", "author = {Nuzzo, Regina},", "title = {{Statistical errors}},",
"journal = {Nature},", "year = {2014},", "volume = {506},", "number = {7487},",
"pages = {150--152}", "}")
expect_equal(f, f_expect)
filePath <- system.file("testdata/test4.bib", package = "bibDelete")
file.copy(filePath, "test4.bib", overwrite = TRUE)
deleteField("test4.bib", "annote")
deleteField("test4_pr.bib", "author")
deleteField("test4_pr_pr.bib", "year")
f <- readLines("test4_pr_pr_pr.bib")
f_expect <- c("@article{Nuzzo:2014bp,", "title = {{Statistical errors}},",
"journal = {Nature},", "volume = {506},", "number = {7487},",
"pages = {150--152}", "}")
expect_equal(f, f_expect)
file.remove(c("test4_original.bib", "test4.bib", "test4_pr.bib", "test4_pr_pr.bib", "test4_pr_pr_pr.bib"))
})
| /tests/testthat/test_deleteField.R | permissive | schmidtchristoph/bibDelete | R | false | false | 24,561 | r | # 04.03.18
# @author Christoph Schmidt <schmidtchristoph@@users.noreply.github.com>
library(testthat)
context("deleteField")
test_that("correct lines are deleted - pt. 1", {
filePath <- system.file("testdata/test.bib", package = "bibDelete")
r <- deleteField(filePath, "annote", verbose = TRUE)
expect_equal(r$linesDel, 35)
filePath2 <- system.file("testdata/test_pr.bib", package = "bibDelete")
f <- readLines(filePath2)
expect_true( stringr::str_sub(f[34], -1L, -1L)!="," )
r <- deleteField(filePath, "month", verbose = TRUE)
expect_equal(r$linesDel, c(12, 23, 34))
filePath2 <- system.file("testdata/test_pr.bib", package = "bibDelete")
f <- readLines(filePath2)
expect_true( stringr::str_sub(f[11], -1L, -1L)!="," )
expect_true( stringr::str_sub(f[21], -1L, -1L)!="," )
expect_true( stringr::str_sub(f[31], -1L, -1L)=="," )
expect_true( stringr::str_sub(f[32], -1L, -1L)!="," )
r <- deleteField(filePath, "month", verbose = TRUE) # month + annote field removed
filePath2 <- system.file("testdata/test_pr.bib", package = "bibDelete")
r <- deleteField(filePath2, "annote", verbose = TRUE)
expect_equal(r$linesDel, c(32))
filePath3 <- system.file("testdata/test_pr_pr.bib", package = "bibDelete") # month + annote field removed
f <- readLines(filePath3)
expect_true( stringr::str_sub(f[11], -1L, -1L)!="," )
expect_true( stringr::str_sub(f[21], -1L, -1L)!="," )
expect_true( stringr::str_sub(f[31], -1L, -1L)!="," )
r <- deleteField(filePath, "annote", verbose = TRUE) # annote + month field removed
filePath2 <- system.file("testdata/test_pr.bib", package = "bibDelete")
r <- deleteField(filePath2, "month", verbose = TRUE)
expect_equal(r$linesDel, c(12, 23, 34))
filePath3 <- system.file("testdata/test_pr_pr.bib", package = "bibDelete") # annote + month field removed
f <- readLines(filePath3)
expect_true( stringr::str_sub(f[11], -1L, -1L)!="," )
expect_true( stringr::str_sub(f[21], -1L, -1L)!="," )
expect_true( stringr::str_sub(f[31], -1L, -1L)!="," )
file.remove(filePath2, filePath3)
})
test_that("correct lines are deleted - pt. 2", {
filePath <- system.file("testdata/test2.bib", package = "bibDelete")
r <- deleteField(filePath, "annote", verbose = TRUE)
expect_equal(r$linesDel, c(9, 10, 11, 22, 23, 24, 25, 26, 27, 28))
filePath2 <- system.file("testdata/test2_pr.bib", package = "bibDelete")
f <- readLines(filePath2)
expect_true( stringr::str_sub(f[8], -1L, -1L)!="," )
expect_true( stringr::str_sub(f[18], -1L, -1L)!="," )
r <- deleteField(filePath, "month", verbose = TRUE)
expect_equal(r$linesDel, c(8, 21))
filePath2 <- system.file("testdata/test2_pr.bib", package = "bibDelete")
f <- readLines(filePath2)
expect_true( stringr::str_sub(f[7], -1L, -1L)=="," )
expect_true( stringr::str_sub(f[10], -1L, -1L)!="," )
expect_true( stringr::str_sub(f[19], -1L, -1L)=="," )
expect_true( stringr::str_sub(f[26], -1L, -1L)!="," )
r <- deleteField(filePath, "month", verbose = TRUE) # month + annote field removed
filePath2 <- system.file("testdata/test2_pr.bib", package = "bibDelete")
r <- deleteField(filePath2, "annote", verbose = TRUE)
expect_equal(r$linesDel, c(8, 9, 10, 20, 21, 22, 23, 24, 25, 26))
filePath3 <- system.file("testdata/test2_pr_pr.bib", package = "bibDelete") # month + annote field removed
f <- readLines(filePath3)
expect_true( stringr::str_sub(f[6], -1L, -1L)=="," )
expect_true( stringr::str_sub(f[7], -1L, -1L)!="," )
expect_true( stringr::str_sub(f[15], -1L, -1L)=="," )
expect_true( stringr::str_sub(f[16], -1L, -1L)!="," )
r <- deleteField(filePath, "annote", verbose = TRUE) # annote + month field removed
filePath2 <- system.file("testdata/test2_pr.bib", package = "bibDelete")
r <- deleteField(filePath2, "month", verbose = TRUE)
expect_equal(r$linesDel, c(8, 18))
filePath3 <- system.file("testdata/test2_pr_pr.bib", package = "bibDelete") # annote + month field removed
f <- readLines(filePath3)
expect_true( stringr::str_sub(f[6], -1L, -1L)=="," )
expect_true( stringr::str_sub(f[7], -1L, -1L)!="," )
expect_true( stringr::str_sub(f[15], -1L, -1L)=="," )
expect_true( stringr::str_sub(f[16], -1L, -1L)!="," )
file.remove(filePath2, filePath3)
})
test_that("correct lines are deleted - pt. 3", {
filePath <- system.file("testdata/test3.bib", package = "bibDelete")
file.copy(filePath, "test3.bib")
r <- deleteField("test3.bib", "annote", verbose = TRUE)
expect_equal(r$linesDel, c(11, 12, 13, 14, 15, 16, 17))
f <- readLines("test3_pr.bib")
expect_true( stringr::str_sub(f[10], -1L, -1L)=="," )
expect_true( stringr::str_sub(f[11], -1L, -1L)=="," )
expect_true( stringr::str_sub(f[12], -1L, -1L)=="," )
expect_true( stringr::str_sub(f[13], -1L, -1L)=="," )
expect_true( stringr::str_sub(f[14], -1L, -1L)=="," ) # test3.bib is not standard conform: normally there should be just a single delimiter "}"
r <- deleteField("test3.bib", "month", verbose = TRUE)
expect_equal(r$linesDel, c(8, 9, 10, 19, 20, 21))
f <- readLines("test3_pr.bib")
expect_true( stringr::str_sub(f[7], -1L, -1L)=="," )
expect_true( stringr::str_sub(f[13], -1L, -1L)=="," )
expect_true( stringr::str_sub(f[15], -1L, -1L)!="," )
r <- deleteField("test3.bib", "month", verbose = TRUE) # month + annote field removed
r <- deleteField("test3_pr.bib", "annote", verbose = TRUE)
expect_equal(r$linesDel, c(8, 9, 10, 11, 12, 13, 14))
f <- readLines("test3_pr_pr.bib") # month + annote field removed
expect_true( stringr::str_sub(f[7], -1L, -1L)=="," )
expect_true( stringr::str_sub(f[8], -1L, -1L)!="," )
r <- deleteField("test3.bib", "annote", verbose = TRUE) # annote + month field removed
expect_equal(r$linesDel, 11:17)
r2 <- deleteField("test3_pr.bib", "month", verbose = TRUE)
expect_equal(r2$linesDel, c(8, 9, 10, 11, 12, 13, 14))
f <- readLines("test3_pr_pr.bib") # annote + month field removed
expect_true( stringr::str_sub(f[1], -1L, -1L)=="," )
expect_true( stringr::str_sub(f[2], -1L, -1L)=="," )
expect_true( stringr::str_sub(f[3], -1L, -1L)=="," )
expect_true( stringr::str_sub(f[4], -1L, -1L)=="," )
expect_true( stringr::str_sub(f[5], -1L, -1L)=="," )
expect_true( stringr::str_sub(f[6], -1L, -1L)=="," )
expect_true( stringr::str_sub(f[7], -1L, -1L)=="," ) # a very rare bug in 'removeCommaOnLineBeforeSearchedField()', only caused by severely non-standard bib entries (i.e. two "month" fields running over multiple lines each, separated by a non-standard "test" field type); this bug can most likely only be fixed in a general way be running over entire file, line by line and finding start and end of each bib entry; then checking the last fields ending of each bib entry for a remaining comma
expect_true( stringr::str_sub(f[8], -1L, -1L)=="}" )
r <- deleteField("test3.bib", "annote", verbose = TRUE) # annote + month field removed + taking care of custom field type definition
expect_equal(r$linesDel, 11:17)
r2 <- deleteField("test3_pr.bib", "month", verbose = TRUE, addCustomField = "test")
expect_equal(r2$linesDel, c(8, 9, 10, 12, 13, 14))
f <- readLines("test3_pr_pr.bib") # annote + month field removed
expect_true( stringr::str_sub(f[1], -1L, -1L)=="," )
expect_true( stringr::str_sub(f[2], -1L, -1L)=="," )
expect_true( stringr::str_sub(f[3], -1L, -1L)=="," )
expect_true( stringr::str_sub(f[4], -1L, -1L)=="," )
expect_true( stringr::str_sub(f[5], -1L, -1L)=="," )
expect_true( stringr::str_sub(f[6], -1L, -1L)=="," )
expect_true( stringr::str_sub(f[7], -1L, -1L)=="," )
expect_true( stringr::str_sub(f[8], -1L, -1L)!="," )
expect_true( stringr::str_sub(f[9], -1L, -1L)=="}" )
r <- deleteField("test3.bib", "journal", verbose = TRUE)
expect_equal(r$linesDel, 4)
f <- readLines("test3_pr.bib")
expect_true( stringr::str_sub(f[3], -1L, -1L)=="," )
expect_true( stringr::str_sub(f[4], -1L, -1L)=="," )
expect_true( stringr::str_sub(f[5], -1L, -1L)=="," )
expect_true( stringr::str_sub(f[20], -1L, -1L)=="," ) # should not be the case, but 'test3.bib' is not standard conform and the last field type ('month') wasn't requested to be deleted
file.remove("test3.bib", "test3_pr.bib", "test3_pr_pr.bib")
})
test_that("correct output file is generated", {
filePath <- system.file("testdata/test.bib", package = "bibDelete")
file.copy(filePath, "test.bib")
deleteField("test.bib", "annote")
f <- readLines("test_pr.bib")
f_expect <- c("%% Created using Papers on Thu, 11 Aug 2016.", "%% http://papersapp.com/papers/",
"", "@article{Estrada:2010ka,", "author = {Estrada, Ernesto},",
"title = {{Quantifying network heterogeneity}},", "journal = {Physical Review E},",
"year = {2010},", "volume = {82},", "number = {6},", "pages = {066102},",
"month = dec", "}", "", "@article{Freeman:1977kx,", "author = {Freeman, Linton C},",
"title = {{A set of measures of centrality based on betweenness}},",
"journal = {Sociometry},", "year = {1977},", "volume = {40},",
"number = {1},", "pages = {35},", "month = mar", "}", "", "@article{Krzywinski:2012jj,",
"author = {Krzywinski, Martin and Birol, Inanc and Jones, Steven J M and Marra, Marco A},",
"title = {{Hive plots-rational approach to visualizing networks}},",
"journal = {Briefings in Bioinformatics},", "year = {2012},",
"volume = {13},", "number = {5},", "pages = {627--644},", "month = sep",
"}")
expect_equal(f, f_expect)
filePath <- system.file("testdata/test.bib", package = "bibDelete")
file.copy(filePath, "test.bib")
deleteField("test.bib", "annote")
deleteField("test_pr.bib", "month")
deleteField("test_pr_pr.bib", "author")
f <- readLines("test_pr_pr_pr.bib")
f_expect <- c("%% Created using Papers on Thu, 11 Aug 2016.", "%% http://papersapp.com/papers/",
"", "@article{Estrada:2010ka,", "title = {{Quantifying network heterogeneity}},",
"journal = {Physical Review E},", "year = {2010},", "volume = {82},",
"number = {6},", "pages = {066102}", "}", "", "@article{Freeman:1977kx,",
"title = {{A set of measures of centrality based on betweenness}},",
"journal = {Sociometry},", "year = {1977},", "volume = {40},",
"number = {1},", "pages = {35}", "}", "", "@article{Krzywinski:2012jj,",
"title = {{Hive plots-rational approach to visualizing networks}},",
"journal = {Briefings in Bioinformatics},", "year = {2012},",
"volume = {13},", "number = {5},", "pages = {627--644}", "}")
expect_equal(f, f_expect)
file.remove(c("test.bib", "test_pr.bib", "test_pr_pr.bib", "test_pr_pr_pr.bib"))
filePath <- system.file("testdata/test.bib", package = "bibDelete")
file.copy(filePath, "test.bib")
deleteField("test.bib", "month")
f <- readLines("test_pr.bib")
f_expect <- c("%% Created using Papers on Thu, 11 Aug 2016.", "%% http://papersapp.com/papers/",
"", "@article{Estrada:2010ka,", "author = {Estrada, Ernesto},",
"title = {{Quantifying network heterogeneity}},", "journal = {Physical Review E},",
"year = {2010},", "volume = {82},", "number = {6},", "pages = {066102}",
"}", "", "@article{Freeman:1977kx,", "author = {Freeman, Linton C},",
"title = {{A set of measures of centrality based on betweenness}},",
"journal = {Sociometry},", "year = {1977},", "volume = {40},",
"number = {1},", "pages = {35}", "}", "", "@article{Krzywinski:2012jj,",
"author = {Krzywinski, Martin and Birol, Inanc and Jones, Steven J M and Marra, Marco A},",
"title = {{Hive plots-rational approach to visualizing networks}},",
"journal = {Briefings in Bioinformatics},", "year = {2012},",
"volume = {13},", "number = {5},", "pages = {627--644},", "annote = {{\\#} hive plots provide visual signatures of large networks}",
"}")
expect_equal(f, f_expect)
filePath <- system.file("testdata/test.bib", package = "bibDelete")
file.copy(filePath, "test.bib")
deleteField("test.bib", "title")
f <- readLines("test_pr.bib")
f_expect <- c("%% Created using Papers on Thu, 11 Aug 2016.", "%% http://papersapp.com/papers/",
"", "@article{Estrada:2010ka,", "author = {Estrada, Ernesto},",
"journal = {Physical Review E},", "year = {2010},", "volume = {82},",
"number = {6},", "pages = {066102},", "month = dec", "}", "",
"@article{Freeman:1977kx,", "author = {Freeman, Linton C},",
"journal = {Sociometry},", "year = {1977},", "volume = {40},",
"number = {1},", "pages = {35},", "month = mar", "}", "", "@article{Krzywinski:2012jj,",
"author = {Krzywinski, Martin and Birol, Inanc and Jones, Steven J M and Marra, Marco A},",
"journal = {Briefings in Bioinformatics},", "year = {2012},",
"volume = {13},", "number = {5},", "pages = {627--644},", "month = sep,",
"annote = {{\\#} hive plots provide visual signatures of large networks}",
"}")
expect_equal(f, f_expect)
file.remove(c("test.bib", "test_pr.bib"))
})
test_that("correct output file is generated--pt2", {
filePath <- system.file("testdata/test2.bib", package = "bibDelete")
file.copy(filePath, "test2.bib")
deleteField("test2.bib", "annote")
f <- readLines("test2_pr.bib")
f_expect <- c("@article{Lancichinetti:2012kx,", "author = {Lancichinetti, Andrea and Fortunato, Santo},",
"title = {{Consensus clustering in complex networks}},", "journal = {Scientific Reports},",
"year = {2012},", "volume = {2},", "pages = {336},", "month = mar",
"}", "", "@article{Peel:2014ul,", "author = {Peel, Leto and Clauset, Aaron},",
"title = {{Detecting change points in the large-scale structure of evolving networks}},",
"journal = {arXiv},", "year = {2014},", "eprint = {1403.0989},",
"eprinttype = {arxiv},", "month = mar", "}")
expect_equal(f, f_expect)
filePath <- system.file("testdata/test2.bib", package = "bibDelete")
file.copy(filePath, "test2.bib")
deleteField("test2.bib", "annote")
deleteField("test2_pr.bib", "month")
deleteField("test2_pr_pr.bib", "author")
f <- readLines("test2_pr_pr_pr.bib")
f_expect <- c("@article{Lancichinetti:2012kx,", "title = {{Consensus clustering in complex networks}},",
"journal = {Scientific Reports},", "year = {2012},", "volume = {2},",
"pages = {336}", "}", "", "@article{Peel:2014ul,", "title = {{Detecting change points in the large-scale structure of evolving networks}},",
"journal = {arXiv},", "year = {2014},", "eprint = {1403.0989},",
"eprinttype = {arxiv}", "}")
expect_equal(f, f_expect)
file.remove(c("test2.bib", "test2_pr.bib", "test2_pr_pr.bib", "test2_pr_pr_pr.bib"))
filePath <- system.file("testdata/test2.bib", package = "bibDelete")
file.copy(filePath, "test2.bib")
deleteField("test2.bib", "month")
f <- readLines("test2_pr.bib")
f_expect <- c("@article{Lancichinetti:2012kx,", "author = {Lancichinetti, Andrea and Fortunato, Santo},",
"title = {{Consensus clustering in complex networks}},", "journal = {Scientific Reports},",
"year = {2012},", "volume = {2},", "pages = {336},", "annote = {{\\#} module detection algorithms might be dependent on random seeds",
"", "{\\#} nr of runs r = nr of partitions used for the consensus matrix}",
"}", "", "@article{Peel:2014ul,", "author = {Peel, Leto and Clauset, Aaron},",
"title = {{Detecting change points in the large-scale structure of evolving networks}},",
"journal = {arXiv},", "year = {2014},", "eprint = {1403.0989},",
"eprinttype = {arxiv},", "annote = {{\\#} see conference proceeding papers3://publication/uuid/739AD14E-73B1-4A87-8A5B-6DBD847D0F47",
"", "{\\#} corresponding Python code at http://gdriv.es/letopeel/code.html",
"", "{\\#} we found that changes associated with two communities merging or with one of several communities losing its internal connections ({\\textquotedblleft}fragmentation{\\textquotedblright}) were more difficult to accurately detect than those associated with one community splitting in two or with many singletons connecting to form a new community ({\\textquotedblleft}formation{\\textquotedblright})",
"", "{\\#} change-point methods based on network measures like the mean degree, clustering coeffi- cient, or mean geodesic path length performed poorly, yielding high false negative rates even for large structural changes}",
"}")
expect_equal(f, f_expect)
filePath <- system.file("testdata/test2.bib", package = "bibDelete")
file.copy(filePath, "test2.bib")
deleteField("test2.bib", "title")
f <- readLines("test2_pr.bib")
f_expect <- c("@article{Lancichinetti:2012kx,", "author = {Lancichinetti, Andrea and Fortunato, Santo},",
"journal = {Scientific Reports},", "year = {2012},", "volume = {2},",
"pages = {336},", "month = mar,", "annote = {{\\#} module detection algorithms might be dependent on random seeds",
"", "{\\#} nr of runs r = nr of partitions used for the consensus matrix}",
"}", "", "@article{Peel:2014ul,", "author = {Peel, Leto and Clauset, Aaron},",
"journal = {arXiv},", "year = {2014},", "eprint = {1403.0989},",
"eprinttype = {arxiv},", "month = mar,", "annote = {{\\#} see conference proceeding papers3://publication/uuid/739AD14E-73B1-4A87-8A5B-6DBD847D0F47",
"", "{\\#} corresponding Python code at http://gdriv.es/letopeel/code.html",
"", "{\\#} we found that changes associated with two communities merging or with one of several communities losing its internal connections ({\\textquotedblleft}fragmentation{\\textquotedblright}) were more difficult to accurately detect than those associated with one community splitting in two or with many singletons connecting to form a new community ({\\textquotedblleft}formation{\\textquotedblright})",
"", "{\\#} change-point methods based on network measures like the mean degree, clustering coeffi- cient, or mean geodesic path length performed poorly, yielding high false negative rates even for large structural changes}",
"}")
expect_equal(f, f_expect)
file.remove(c("test2.bib", "test2_pr.bib"))
})
test_that("correct output file is generated--pt3", {
filePath <- system.file("testdata/test3.bib", package = "bibDelete")
file.copy(filePath, "test3.bib", overwrite = TRUE)
deleteField("test3.bib", "annote")
f <- readLines("test3_pr.bib")
f_expect <- c("@article{Peel:2014ul,", "author = {Peel, Leto and Clauset, Aaron},",
"title = {{Detecting change points in the large-scale structure of evolving networks}},",
"journal = {arXiv},", "year = {2014},", "eprint = {1403.0989},",
"eprinttype = {arxiv},", "month = mar", "dec", "nov,", "test = {testfield},",
"month = mar,", "dec,", "nov,", "}")
expect_equal(f, f_expect)
filePath <- system.file("testdata/test3.bib", package = "bibDelete")
file.copy(filePath, "test3.bib", overwrite = TRUE)
deleteField("test3.bib", "annote", addCustomField = "test")
deleteField("test3_pr.bib", "month", addCustomField = "test")
deleteField("test3_pr_pr.bib", "author", addCustomField = "test")
f <- readLines("test3_pr_pr_pr.bib")
f_expect <- c("@article{Peel:2014ul,", "title = {{Detecting change points in the large-scale structure of evolving networks}},",
"journal = {arXiv},", "year = {2014},", "eprint = {1403.0989},",
"eprinttype = {arxiv},", "test = {testfield}", "}")
expect_equal(f, f_expect)
file.remove(c("test3.bib", "test3_pr.bib", "test3_pr_pr.bib", "test3_pr_pr_pr.bib"))
filePath <- system.file("testdata/test3.bib", package = "bibDelete")
file.copy(filePath, "test3.bib", overwrite = TRUE)
deleteField("test3.bib", "month")
f <- readLines("test3_pr.bib")
f_expect <- c("@article{Peel:2014ul,", "author = {Peel, Leto and Clauset, Aaron},",
"title = {{Detecting change points in the large-scale structure of evolving networks}},",
"journal = {arXiv},", "year = {2014},", "eprint = {1403.0989},",
"eprinttype = {arxiv},", "annote = {{\\#} see conference proceeding papers3://publication/uuid/739AD14E-73B1-4A87-8A5B-6DBD847D0F47",
"here, it goes on,", "", "{\\#} corresponding Python code at http://gdriv.es/letopeel/code.html",
"", "{\\#} change-point methods based on network measures like the mean degree, clustering coeffi- cient, or mean geodesic path length performed poorly, yielding high false negative rates even for large structural changes},",
"", "test = {testfield}", "}")
expect_equal(f, f_expect)
filePath <- system.file("testdata/test3.bib", package = "bibDelete")
file.copy(filePath, "test3.bib", overwrite = TRUE)
deleteField("test3.bib", "title")
f <- readLines("test3_pr.bib")
f_expect <- c("@article{Peel:2014ul,", "author = {Peel, Leto and Clauset, Aaron},",
"journal = {arXiv},", "year = {2014},", "eprint = {1403.0989},",
"eprinttype = {arxiv},", "month = mar", "dec", "nov,", "annote = {{\\#} see conference proceeding papers3://publication/uuid/739AD14E-73B1-4A87-8A5B-6DBD847D0F47",
"here, it goes on,", "", "{\\#} corresponding Python code at http://gdriv.es/letopeel/code.html",
"", "{\\#} change-point methods based on network measures like the mean degree, clustering coeffi- cient, or mean geodesic path length performed poorly, yielding high false negative rates even for large structural changes},",
"", "test = {testfield},", "month = mar,", "dec,", "nov,", "}")
expect_equal(f, f_expect)
file.remove(c("test3.bib", "test3_pr.bib"))
})
test_that("correct output file is generated--pt4", {
filePath <- system.file("testdata/test4.bib", package = "bibDelete")
file.copy(filePath, "test4.bib", overwrite = TRUE)
deleteField("test4.bib", "annote")
f <- readLines("test4_pr.bib")
f_expect <- c("@article{Nuzzo:2014bp,", "author = {Nuzzo, Regina},", "title = {{Statistical errors}},",
"journal = {Nature},", "year = {2014},", "volume = {506},", "number = {7487},",
"pages = {150--152}", "}")
expect_equal(f, f_expect)
filePath <- system.file("testdata/test4.bib", package = "bibDelete")
file.copy(filePath, "test4.bib", overwrite = TRUE)
deleteField("test4.bib", "annote")
deleteField("test4_pr.bib", "author")
deleteField("test4_pr_pr.bib", "year")
f <- readLines("test4_pr_pr_pr.bib")
f_expect <- c("@article{Nuzzo:2014bp,", "title = {{Statistical errors}},",
"journal = {Nature},", "volume = {506},", "number = {7487},",
"pages = {150--152}", "}")
expect_equal(f, f_expect)
file.remove(c("test4_original.bib", "test4.bib", "test4_pr.bib", "test4_pr_pr.bib", "test4_pr_pr_pr.bib"))
})
|
##' .. content for \description{} (no empty lines) ..
##'
##' .. content for \details{} ..
##'
##' @title
##'
##' @param infile
##' @param scholar_pubs
##' @param author
clean_pubs <- function(infile, scholar_pubs, .author) {
data_pubs <- read_csv(file.path(here(),'data', infile))
just_the_citations <- scholar_pubs %>%
select(ID, cites)
left_join(data_pubs, just_the_citations, by = "ID") %>%
select(-ID) %>%
# set up for use in .Rmd doc
mutate(
# fill missing
cites = if_else(is.na(cites), 0, cites),
# format author values so its easier to see your name
author = clean_author(x = author, .author = .author, bold = TRUE),
# journal names should be italic
journal = paste0("*",journal,"*")
) %>%
transmute(
# label section
section = 'publications',
# give links if you got em.
title = if_else(
condition = is.na(link),
true = title,
false = as.character(glue("[{title}]({link})"))
),
# combine author <new line> journal, number for the .Rmd doc
subtitle = glue("{author} <br/> {journal}, {number}"),
# show citations for each paper
description_1 = glue("Citations: {cites}"),
description_2 = glue("DOI: {doi}"),
# Just show the year that paper was published
end = year(as.Date(date, format = '%m/%d/%Y')),
aside
)
}
| /R/clean_pubs.R | no_license | setison/curriculum-vitae-starter-kit | R | false | false | 1,405 | r | ##' .. content for \description{} (no empty lines) ..
##'
##' .. content for \details{} ..
##'
##' @title
##'
##' @param infile
##' @param scholar_pubs
##' @param author
clean_pubs <- function(infile, scholar_pubs, .author) {
data_pubs <- read_csv(file.path(here(),'data', infile))
just_the_citations <- scholar_pubs %>%
select(ID, cites)
left_join(data_pubs, just_the_citations, by = "ID") %>%
select(-ID) %>%
# set up for use in .Rmd doc
mutate(
# fill missing
cites = if_else(is.na(cites), 0, cites),
# format author values so its easier to see your name
author = clean_author(x = author, .author = .author, bold = TRUE),
# journal names should be italic
journal = paste0("*",journal,"*")
) %>%
transmute(
# label section
section = 'publications',
# give links if you got em.
title = if_else(
condition = is.na(link),
true = title,
false = as.character(glue("[{title}]({link})"))
),
# combine author <new line> journal, number for the .Rmd doc
subtitle = glue("{author} <br/> {journal}, {number}"),
# show citations for each paper
description_1 = glue("Citations: {cites}"),
description_2 = glue("DOI: {doi}"),
# Just show the year that paper was published
end = year(as.Date(date, format = '%m/%d/%Y')),
aside
)
}
|
# install.packages("e1071")
# install.packages("caret")
# source("http://bioconductor.org/biocLite.R")
# biocLite()
# biocLite("EBImage")
# install.packages("grDevices")
library(EBImage)
library(grDevices)
library(e1071)
library(caret)
# Set folder containing images
# dir_images <- "/Users/JPC/Documents/Columbia/2nd Semester/1. Applied Data Science/2. Homeworks/Project 3/images/"
# Set to project directory
# setwd("/Users/JPC/Documents/Columbia/2nd Semester/1. Applied Data Science/2. Homeworks/Project 3/cycle3cvd-team-6")
dir_images <- "/Users/yueyingteng/Downloads/images/"
setwd ("/Users/yueyingteng/Downloads/cycle3cvd-team-6/data")
### Extract HSV
extract.features <- function(img){
mat <- imageData(img)
# Convert 3d array of RGB to 2d matrix
mat_rgb <- mat
dim(mat_rgb) <- c(nrow(mat)*ncol(mat), 3)
mat_hsv <- rgb2hsv(t(mat_rgb))
nH <- 10
nS <- 6
nV <- 6
# Caution: determine the bins using all images! The bins should be consistent across all images.
# The following code is only used for demonstration on a single image.
hBin <- seq(0, 1, length.out=nH)
sBin <- seq(0, 1, length.out=nS)
vBin <- seq(0, 0.005, length.out=nV)
freq_hsv <- as.data.frame(table(factor(findInterval(mat_hsv[1,], hBin), levels=1:nH),
factor(findInterval(mat_hsv[2,], sBin), levels=1:nS),
factor(findInterval(mat_hsv[3,], vBin), levels=1:nV)))
hsv_feature <- as.numeric(freq_hsv$Freq)/(ncol(mat)*nrow(mat)) # normalization
return(hsv_feature)
}
## read image
##image_names <- list.files(dir_images)
##corrupt <- c(-4, -6, -8, -140, -152, -2237, -2246, -2247, -2253, -2265, -2274, -2283, -2293, -2299, -6903, -6909)
image_names <- image_names[corrupt]
names<-as.data.frame(image_names)
# labels <- read.csv("/Users/yueyingteng/Downloads/labels.csv",stringsAsFactors = F)
# obs<-dim(labels)[1]
X <- array(rep(0,length(image_names)*360),dim=c(length(image_names),360))
for (i in 1:length(image_names)){
tryCatch({
img <- readImage(paste0(dir_images,image_names[i]))
},
error =function(err){print(i)},
finally = {X[i,] <- extract.features(img)})
}
data_hsv<-as.data.frame(X)
# data_hsv<-as.data.frame(cbind(labels[,3],X))
# data_hsv<-unique(data_hsv)
save(data_hsv,file="beseline feature.RData")
# data_hsv$V1<-as.factor(data_hsv$V1)
# load("/Users/yueyingteng/Downloads/cycle3cvd-team-6/output/feature_eval.RData")
# Data base for in class cross validation
names(data_hsv) <- paste0("base",seq(1:ncol(data_hsv)))
names(data_hsv)
load("/Users/yueyingteng/Downloads/cycle3cvd-team-6/data/baseline feature.RData")
new_features <- read.csv("/Users/yueyingteng/Downloads/cycle3cvd-team-6/data/new_features.csv", header=F)
names(new_features) <- paste0("new",seq(1:ncol(new_features)))
names(new_features)
feature_eval <- cbind(names,feature_eval)
save(feature_eval,file = "/Users/yueyingteng/Downloads/cycle3cvd-team-6/output/feature_eval.RData")
| /lib/features/base_features and binding.R | no_license | HolyZero/Cat-VS-Dog-Image-Classification | R | false | false | 2,961 | r | # install.packages("e1071")
# install.packages("caret")
# source("http://bioconductor.org/biocLite.R")
# biocLite()
# biocLite("EBImage")
# install.packages("grDevices")
library(EBImage)
library(grDevices)
library(e1071)
library(caret)
# Set folder containing images
# dir_images <- "/Users/JPC/Documents/Columbia/2nd Semester/1. Applied Data Science/2. Homeworks/Project 3/images/"
# Set to project directory
# setwd("/Users/JPC/Documents/Columbia/2nd Semester/1. Applied Data Science/2. Homeworks/Project 3/cycle3cvd-team-6")
dir_images <- "/Users/yueyingteng/Downloads/images/"
setwd ("/Users/yueyingteng/Downloads/cycle3cvd-team-6/data")
### Extract HSV
extract.features <- function(img){
mat <- imageData(img)
# Convert 3d array of RGB to 2d matrix
mat_rgb <- mat
dim(mat_rgb) <- c(nrow(mat)*ncol(mat), 3)
mat_hsv <- rgb2hsv(t(mat_rgb))
nH <- 10
nS <- 6
nV <- 6
# Caution: determine the bins using all images! The bins should be consistent across all images.
# The following code is only used for demonstration on a single image.
hBin <- seq(0, 1, length.out=nH)
sBin <- seq(0, 1, length.out=nS)
vBin <- seq(0, 0.005, length.out=nV)
freq_hsv <- as.data.frame(table(factor(findInterval(mat_hsv[1,], hBin), levels=1:nH),
factor(findInterval(mat_hsv[2,], sBin), levels=1:nS),
factor(findInterval(mat_hsv[3,], vBin), levels=1:nV)))
hsv_feature <- as.numeric(freq_hsv$Freq)/(ncol(mat)*nrow(mat)) # normalization
return(hsv_feature)
}
## read image
##image_names <- list.files(dir_images)
##corrupt <- c(-4, -6, -8, -140, -152, -2237, -2246, -2247, -2253, -2265, -2274, -2283, -2293, -2299, -6903, -6909)
image_names <- image_names[corrupt]
names<-as.data.frame(image_names)
# labels <- read.csv("/Users/yueyingteng/Downloads/labels.csv",stringsAsFactors = F)
# obs<-dim(labels)[1]
X <- array(rep(0,length(image_names)*360),dim=c(length(image_names),360))
for (i in 1:length(image_names)){
tryCatch({
img <- readImage(paste0(dir_images,image_names[i]))
},
error =function(err){print(i)},
finally = {X[i,] <- extract.features(img)})
}
data_hsv<-as.data.frame(X)
# data_hsv<-as.data.frame(cbind(labels[,3],X))
# data_hsv<-unique(data_hsv)
save(data_hsv,file="beseline feature.RData")
# data_hsv$V1<-as.factor(data_hsv$V1)
# load("/Users/yueyingteng/Downloads/cycle3cvd-team-6/output/feature_eval.RData")
# Data base for in class cross validation
names(data_hsv) <- paste0("base",seq(1:ncol(data_hsv)))
names(data_hsv)
load("/Users/yueyingteng/Downloads/cycle3cvd-team-6/data/baseline feature.RData")
new_features <- read.csv("/Users/yueyingteng/Downloads/cycle3cvd-team-6/data/new_features.csv", header=F)
names(new_features) <- paste0("new",seq(1:ncol(new_features)))
names(new_features)
feature_eval <- cbind(names,feature_eval)
save(feature_eval,file = "/Users/yueyingteng/Downloads/cycle3cvd-team-6/output/feature_eval.RData")
|
install.packages("foreign") # foreign 패키지 설치
library(foreign) # SPSS 파일 로드
library(dplyr) # 전처리
library(ggplot2) # 시각화
library(readxl) # 엑셀 파일 불러오기
setwd("C://easy_r")
raw_welfare<-read.spss(file='Koweps_hpc10_2015_beta1.sav',
to.data.frame=T)
welfare<-raw_welfare
head(welfare)
tail(welfare)
welfare<-rename(welfare,
sex=h10_g3,
birth=h10_g4,
marriage=h10_g10,
religion=h10_g11,
income=p1002_8aq1,
code_job=h10_eco9,
code_region=h10_reg7)
class(welfare$birth)
summary(welfare$birth)
qplot(welfare$birth)
summary(welfare$birth)
table(is.na(welfare$birth))
welfare$birth<-ifelse(welfare$birth==9999,NA,welfare$birth)
table(is.na(welfare$birth))
welfare$age<-2018-welfare$birth+1
summary(welfare$age)
qplot(welfare$age)
age_income<-welfare %>%
filter(!is.na(income)) %>%
group_by(age) %>%
summarise(mean_income=mean(income))
head(age_income)
ggplot(data=age_income,aes(x=age,y=mean_income))+geom_line()
| /r/180220/교재9장_3_나이와 월급의 관계.R | no_license | Young-sun-git/bigdata-web | R | false | false | 1,156 | r | install.packages("foreign") # foreign 패키지 설치
library(foreign) # SPSS 파일 로드
library(dplyr) # 전처리
library(ggplot2) # 시각화
library(readxl) # 엑셀 파일 불러오기
setwd("C://easy_r")
raw_welfare<-read.spss(file='Koweps_hpc10_2015_beta1.sav',
to.data.frame=T)
welfare<-raw_welfare
head(welfare)
tail(welfare)
welfare<-rename(welfare,
sex=h10_g3,
birth=h10_g4,
marriage=h10_g10,
religion=h10_g11,
income=p1002_8aq1,
code_job=h10_eco9,
code_region=h10_reg7)
class(welfare$birth)
summary(welfare$birth)
qplot(welfare$birth)
summary(welfare$birth)
table(is.na(welfare$birth))
welfare$birth<-ifelse(welfare$birth==9999,NA,welfare$birth)
table(is.na(welfare$birth))
welfare$age<-2018-welfare$birth+1
summary(welfare$age)
qplot(welfare$age)
age_income<-welfare %>%
filter(!is.na(income)) %>%
group_by(age) %>%
summarise(mean_income=mean(income))
head(age_income)
ggplot(data=age_income,aes(x=age,y=mean_income))+geom_line()
|
# CC0
#To the extent possible under law, the author(s) have dedicated all copyright
#and related and neighboring rights to this software to the public domain
#worldwide. This software is distributed without any warranty.
# For a copy of the CC0 Public Domain Dedication see,
# <http://creativecommons.org/publicdomain/zero/1.0/>.
# plot the phylogeny
rm(list=ls())
source("method2_tools.R")
myphy <- paint_phy(ape.phy, ape.dat, list(c("Bolbometopon_muricatum", "Sparisoma_radians"), c("Chlorurus_sordidus", "Hipposcarus_longiceps")))
cairo_pdf(file="phylo.pdf")
plot(myphy$phy, edge.color=myphy$colors, type="fan", show.tip.label=FALSE, edge.width=2)
dev.off()
| /inst/examples/misc_examples/treeplot.R | permissive | cboettig/wrightscape | R | false | false | 671 | r | # CC0
#To the extent possible under law, the author(s) have dedicated all copyright
#and related and neighboring rights to this software to the public domain
#worldwide. This software is distributed without any warranty.
# For a copy of the CC0 Public Domain Dedication see,
# <http://creativecommons.org/publicdomain/zero/1.0/>.
# plot the phylogeny
rm(list=ls())
source("method2_tools.R")
myphy <- paint_phy(ape.phy, ape.dat, list(c("Bolbometopon_muricatum", "Sparisoma_radians"), c("Chlorurus_sordidus", "Hipposcarus_longiceps")))
cairo_pdf(file="phylo.pdf")
plot(myphy$phy, edge.color=myphy$colors, type="fan", show.tip.label=FALSE, edge.width=2)
dev.off()
|
first.x.on.plot<-1
last.x.on.plot<-8
incl.sp<-c(17,18,20) # species number to be included.
incl.sp<-c(1) # species number to be included.
palette("default") # good for clolorfull plots
#palette(gray(seq(0,.9,len=6))) # gray scale for papers, use len =500 to get black only
by.den<-0.01
if (F) {
dirs<- c('Baltic_logn_logn','Baltic_beta_logn' ) # directories with output to compare
labels<-c('lognorm size','beta size') # labes for each scenario (directory with data)
dirs<- c('Baltic_logn_logn','Baltic_logn_diri','Baltic_beta_logn','Baltic_beta_diri') # directories with output to compare
labels<-c('lognorm size, lognorm','lognorm size, Dirichlet','Beta size, lognorm','Beta size, Dirichlet' ) # labes for each scenario (directory with data)
dirs<- c('NS_4_7_Mac_beta5_diri','NS_4_7_Mac_beta5_logn',"NS_4_7_MAC_logn_diri_limit" ,"NS_4_7_MAC_logn_logn",'NS_4_7_Mac_beta6_logn','NS_4_7_Mac_beta6_diri' ) # directories with output to compare
labels<-c('beta size, Dirichlet','beta size, lognormal','lognorm size, Dirichlet','lognorm size, lognorm','beta unimodal, logn','beta unimodal, diri' ) # labes for each scenario (directory with data)
dirs<- c("NS_4_7_MAC_no_adj", "NS_4_7_MAC", "NS_4_7_MAC_free" , "NS_4_7_MAC_100" ,"NS_4_7_MAC_test" ) # directories with output to compare
labels<-c('a) no adjustment', 'b) adjusted input','bb) estimate L50 and SR', 'd) L50 fixed at 100 mm','e) test') # labes for each scenario (directory with data)
dirs<- c("NS_4_7_MAC_81dist_size","NS_4_7_MAC_81dist_size_fixed","NS_4_7_MAC_81dist_size_mesh","NS_4_7_MAC_91dist_size_mesh") # directories with output to compare
labels<-c("log-normal","log-normal, fixed parameters","log-normal, mesh","log-normal, 91 mesh") # labes for each scenario (directory with data)
dirs<- c("NS_4_7_MAC_81dist","NS_4_7_MAC_81dist_size","NS_4_7_MAC_81dist_size_fixed","NS_4_7_MAC_81dist_size_mesh") # directories with output to compare
labels<-c("a) uniform size selction","b) size selection, free parameters","c) size selction, fixed parameters","d) b) and mesh selction for ALK") # labes for each scenario (directory with data)
dirs<- c("NS_paper_size","NS_paper_size_fixed","NS_paper_size_mesh") # directories with output to compare
labels<-c("log-normal","log-normal, fixed parameters","log-normal, mesh selection") # labes for each scenario (directory with data)
}
######################
for (dir in dirs) {
if ( file.access(file.path(root,dir,"sms.dat"), mode = 0)!=0) stop(paste('Directory',dir,'does not exist'))
}
Init.function() # get SMS.contol object including sp.names
a<-0
for (dir in dirs) {
file<-file.path(root,dir,'size_pref.out')
size<-read.table(file,comment.char = "#",header=T)
a<-a+1
size<-data.frame(size,dirs=labels[a])
if (dir==dirs[1]) {sizes<-size; npr<-dim(size)[1];} else sizes<-rbind(sizes,size)
if (dir==dirs[1]) {
file<-file.path(root,dir,'min_max_size_pref.out')
mm<-scan(file)
min.size<-matrix(data=mm,ncol=nsp-first.VPA+1,nrow=npr,byrow=TRUE)
dimnames(min.size)[2]<-list(sp.names[first.VPA:nsp])
dimnames(min.size)[1]<-list(sp.names[1:npr])
max.size<-matrix(data=mm[(1+length(min.size)):(2*length(min.size))],ncol=nsp-first.VPA+1,nrow=npr,byrow=TRUE)
dimnames(max.size)<-dimnames(min.size)
min.size<-apply(min.size,1,min)
max.size<-apply(max.size,1,max)
range.size<-min.size # copy structure
}
}
sizes<-subset(sizes,size.model %in% c(1,3,5,6) & species.n %in% incl.sp)
for (a in (1:dim(sizes)[1])) {
ratio<-sizes[a,"size.ratio"]
vars<-sizes[a,"size.var"]
# var.right<-sizes[a,"size.var.right"]
model<- sizes[a,"size.model"]
species<-sizes[a,"species.n"]
dirss<-sizes[a,"dirs"]
if (model==1) {
xx<-seq(first.x.on.plot,last.x.on.plot,by=by.den)
len=length(xx)
b<-data.frame(x=xx,y=exp(-(xx-ratio)^2/(2.0*vars)),Species=rep(sp.names[species],len),dirs=rep(dirss,len))
b<-subset(b,x>=log(min.size[species]) & x<=log(max.size[species]))
}
else if (model==3) { # Gamma
xx<-seq(first.x.on.plot,last.x.on.plot,by=by.den)
len=length(xx)
b<-data.frame(x=xx,y=dgamma(xx,shape=ratio,scale=vars),Species=rep(sp.names[species],len),dirs=rep(dirss,len))
b<-subset(b,x>=log(min.size[species]) & x<=log(max.size[species]))
}
else if (model==5 | model==6) {
min.s=log(min.size[species]);
max.s=log(max.size[species]);
# adjust to avoid outer bounds in beta distribution [0;1]
range.size[species]= 1.001*(max.s-min.s);
min.s= 0.999*min.s;
# range.size[species]=max.s-min.s;
xx<-seq(0,1,by=by.den/10)
len=length(xx)
yy<-dbeta(xx,ratio,vars)
xx<-min.s+range.size[species]*xx
b<-data.frame(x=xx,y=yy,Species=rep(sp.names[species],len),dirs=rep(dirss,len))
b<-subset(b,x>=min.s & x<=max.s)
}
if (a==1) ab<-rbind(b) else ab<-rbind(ab,b)
}
print(xyplot(y~x|Species*dirs,data=subset(ab,y<2.5),type='l',lwd=2,col=1,transparent=F,
layout=c(2,3),
xlab='log(predator weight / prey weight)',ylab='Size preference'))
####
nox<-2; noy<-3;
#cleanup()
newplot(dev="screen",nox,noy)
by(ab,list(ab$Species),function(x) {
#plot(x$x,y$y,
a<-subset(x,dirs==dirs[1])
plot(a$x,a$y,type='l',col=1,xlab="log(predator weight / prey weight)",ylab="size preference",
xlim=c(first.x.on.plot,last.x.on.plot),ylim=c(0,1),main=a[1,'Species'] )
for (i in (2:length(dirs))) {
a<-subset(x,dirs==labels[i])
lines(a$x,a$y,type='l',col=i,lty=i,lwd=2)
}
})
#for the paper;
#cleanup()
trellis.device(device = "windows",
color = F, width=9, height=17,pointsize = 12,
new = TRUE, retain = FALSE)
print( xyplot(y~x|Species,group=dirs, data=ab,type='a',lwd=2,lty=c(9,1,2),
layout=c(1,3), xlab='log(predator weight / prey weight)',ylab='Size preference',
strip = strip.custom( bg='white'),par.strip.text=list(cex=1, lines=1.7),
scales = list(x = list( cex=1), y= list(cex=1),alternating = 1)))
| /SMS_r_prog/r_prog_less_frequently_used/compare_runs_prey_size_selection.r | permissive | ices-eg/wg_WGSAM | R | false | false | 6,093 | r |
first.x.on.plot<-1
last.x.on.plot<-8
incl.sp<-c(17,18,20) # species number to be included.
incl.sp<-c(1) # species number to be included.
palette("default") # good for clolorfull plots
#palette(gray(seq(0,.9,len=6))) # gray scale for papers, use len =500 to get black only
by.den<-0.01
if (F) {
dirs<- c('Baltic_logn_logn','Baltic_beta_logn' ) # directories with output to compare
labels<-c('lognorm size','beta size') # labes for each scenario (directory with data)
dirs<- c('Baltic_logn_logn','Baltic_logn_diri','Baltic_beta_logn','Baltic_beta_diri') # directories with output to compare
labels<-c('lognorm size, lognorm','lognorm size, Dirichlet','Beta size, lognorm','Beta size, Dirichlet' ) # labes for each scenario (directory with data)
dirs<- c('NS_4_7_Mac_beta5_diri','NS_4_7_Mac_beta5_logn',"NS_4_7_MAC_logn_diri_limit" ,"NS_4_7_MAC_logn_logn",'NS_4_7_Mac_beta6_logn','NS_4_7_Mac_beta6_diri' ) # directories with output to compare
labels<-c('beta size, Dirichlet','beta size, lognormal','lognorm size, Dirichlet','lognorm size, lognorm','beta unimodal, logn','beta unimodal, diri' ) # labes for each scenario (directory with data)
dirs<- c("NS_4_7_MAC_no_adj", "NS_4_7_MAC", "NS_4_7_MAC_free" , "NS_4_7_MAC_100" ,"NS_4_7_MAC_test" ) # directories with output to compare
labels<-c('a) no adjustment', 'b) adjusted input','bb) estimate L50 and SR', 'd) L50 fixed at 100 mm','e) test') # labes for each scenario (directory with data)
dirs<- c("NS_4_7_MAC_81dist_size","NS_4_7_MAC_81dist_size_fixed","NS_4_7_MAC_81dist_size_mesh","NS_4_7_MAC_91dist_size_mesh") # directories with output to compare
labels<-c("log-normal","log-normal, fixed parameters","log-normal, mesh","log-normal, 91 mesh") # labes for each scenario (directory with data)
dirs<- c("NS_4_7_MAC_81dist","NS_4_7_MAC_81dist_size","NS_4_7_MAC_81dist_size_fixed","NS_4_7_MAC_81dist_size_mesh") # directories with output to compare
labels<-c("a) uniform size selction","b) size selection, free parameters","c) size selction, fixed parameters","d) b) and mesh selction for ALK") # labes for each scenario (directory with data)
dirs<- c("NS_paper_size","NS_paper_size_fixed","NS_paper_size_mesh") # directories with output to compare
labels<-c("log-normal","log-normal, fixed parameters","log-normal, mesh selection") # labes for each scenario (directory with data)
}
######################
for (dir in dirs) {
if ( file.access(file.path(root,dir,"sms.dat"), mode = 0)!=0) stop(paste('Directory',dir,'does not exist'))
}
Init.function() # get SMS.contol object including sp.names
a<-0
for (dir in dirs) {
file<-file.path(root,dir,'size_pref.out')
size<-read.table(file,comment.char = "#",header=T)
a<-a+1
size<-data.frame(size,dirs=labels[a])
if (dir==dirs[1]) {sizes<-size; npr<-dim(size)[1];} else sizes<-rbind(sizes,size)
if (dir==dirs[1]) {
file<-file.path(root,dir,'min_max_size_pref.out')
mm<-scan(file)
min.size<-matrix(data=mm,ncol=nsp-first.VPA+1,nrow=npr,byrow=TRUE)
dimnames(min.size)[2]<-list(sp.names[first.VPA:nsp])
dimnames(min.size)[1]<-list(sp.names[1:npr])
max.size<-matrix(data=mm[(1+length(min.size)):(2*length(min.size))],ncol=nsp-first.VPA+1,nrow=npr,byrow=TRUE)
dimnames(max.size)<-dimnames(min.size)
min.size<-apply(min.size,1,min)
max.size<-apply(max.size,1,max)
range.size<-min.size # copy structure
}
}
sizes<-subset(sizes,size.model %in% c(1,3,5,6) & species.n %in% incl.sp)
for (a in (1:dim(sizes)[1])) {
ratio<-sizes[a,"size.ratio"]
vars<-sizes[a,"size.var"]
# var.right<-sizes[a,"size.var.right"]
model<- sizes[a,"size.model"]
species<-sizes[a,"species.n"]
dirss<-sizes[a,"dirs"]
if (model==1) {
xx<-seq(first.x.on.plot,last.x.on.plot,by=by.den)
len=length(xx)
b<-data.frame(x=xx,y=exp(-(xx-ratio)^2/(2.0*vars)),Species=rep(sp.names[species],len),dirs=rep(dirss,len))
b<-subset(b,x>=log(min.size[species]) & x<=log(max.size[species]))
}
else if (model==3) { # Gamma
xx<-seq(first.x.on.plot,last.x.on.plot,by=by.den)
len=length(xx)
b<-data.frame(x=xx,y=dgamma(xx,shape=ratio,scale=vars),Species=rep(sp.names[species],len),dirs=rep(dirss,len))
b<-subset(b,x>=log(min.size[species]) & x<=log(max.size[species]))
}
else if (model==5 | model==6) {
min.s=log(min.size[species]);
max.s=log(max.size[species]);
# adjust to avoid outer bounds in beta distribution [0;1]
range.size[species]= 1.001*(max.s-min.s);
min.s= 0.999*min.s;
# range.size[species]=max.s-min.s;
xx<-seq(0,1,by=by.den/10)
len=length(xx)
yy<-dbeta(xx,ratio,vars)
xx<-min.s+range.size[species]*xx
b<-data.frame(x=xx,y=yy,Species=rep(sp.names[species],len),dirs=rep(dirss,len))
b<-subset(b,x>=min.s & x<=max.s)
}
if (a==1) ab<-rbind(b) else ab<-rbind(ab,b)
}
print(xyplot(y~x|Species*dirs,data=subset(ab,y<2.5),type='l',lwd=2,col=1,transparent=F,
layout=c(2,3),
xlab='log(predator weight / prey weight)',ylab='Size preference'))
####
nox<-2; noy<-3;
#cleanup()
newplot(dev="screen",nox,noy)
by(ab,list(ab$Species),function(x) {
#plot(x$x,y$y,
a<-subset(x,dirs==dirs[1])
plot(a$x,a$y,type='l',col=1,xlab="log(predator weight / prey weight)",ylab="size preference",
xlim=c(first.x.on.plot,last.x.on.plot),ylim=c(0,1),main=a[1,'Species'] )
for (i in (2:length(dirs))) {
a<-subset(x,dirs==labels[i])
lines(a$x,a$y,type='l',col=i,lty=i,lwd=2)
}
})
#for the paper;
#cleanup()
trellis.device(device = "windows",
color = F, width=9, height=17,pointsize = 12,
new = TRUE, retain = FALSE)
print( xyplot(y~x|Species,group=dirs, data=ab,type='a',lwd=2,lty=c(9,1,2),
layout=c(1,3), xlab='log(predator weight / prey weight)',ylab='Size preference',
strip = strip.custom( bg='white'),par.strip.text=list(cex=1, lines=1.7),
scales = list(x = list( cex=1), y= list(cex=1),alternating = 1)))
|
# pdf text extraction and tidying
library(pdftools)
library(tidyverse)
library(stringr)
txt <- pdf_text("http://goo.gl/wUXvjk")
txt %>% head(n = 1)
pattern <- "([0-9]{4} [M\\.|Mme|Mlle]{1}.*?, [né|neé]{1}.*?)\\."
# [digits]{matches exactly n = 4 times}
# escape '.' {matches exactly n = 1 times}
# any chr, match at least 0 times and at most one time
# né OR neé {matches exactly n = 1 times}
# any chr, match at least 0 times and at most one time
# escape '.'
# gsubfn package:
library(gsubfn) # similar to gsub, instead - usage function > replacement string
# uses matched text as input, emits replacement text from function run on it
?strapply # apply function over string(s), treutnrs output of the function()
# pattern = ____ chr string of regex to be matched in any given chr vector
data <- unlist(gsubfn::strapply(txt, pattern = pattern))
?unlist() # given list structure, simplify to produce vector with all atomic components in 'x'
head(data, 5)
# Stringr
?matrix()
data_parsed <- matrix(NA_character_, length(data), 7)
# create matrix with row = length(data), column = 7
?boundary()
data_words <- str_extract_all(data, boundary("word"))
words <- c("These are some words.", "homina homina homina")
str_count(words, boundary("word")) # 4 words
str_split(words, " ")[[1]] # split
str_split(words, " ")[[2]] # split
str_split(words, boundary("word"))[[1]] # split only "word"
str_split(words, boundary("word"))[[2]]
# data_parsed[, 1:7] <- t(sapply(data_words, head, n = 7))
data_parsed[, 1:4] <- t(sapply(data_words, head, n = 4)) # ranking, gender prefix, last name, first name
data_parsed[, 5:7] <- t(sapply(data_words, tail, n = 3)) # day, month, year
# or else include the ne and nee!
?t() # trasnpose of 'x', need to transpose or each word go into subsequent row of same column!
head(data_parsed)
# [,1] [,2] [,3] [,4] [,5] [,6] [,7]
# [1,] "0001" "Mme" "Beaumont" "Anne" "1" "septembre" "1993"
# [2,] "0002" "M" "Petitdemange" "Arthur" "15" "septembre" "1993"
# ~~VOILA~~
as.tibble(data_parsed) # for column vars names
library(purrr)
data_parsed %>% as_tibble() %>% mutate(birth_date = pmap(list(V5, V6, V7), function(d, m, y) {
paste(d, m, y, collapse = "")
}) %>% lubridate::dmy()
)
# NOT WORK because "months" in french... ??
data_parsed %>% as.tibble %>% dplyr::select(V6)
data_parsed <- data_parsed %>% as_tibble()
data_parsed %>% select(V6)
data_parsed %>% select(V6) %>% n_distinct() # 12 distinct for 12 months duh
data_parsed %>% select(V6) %>% distinct()
data_parsed$V6 <- as.factor(data_parsed$V6)
glimpse(data_parsed)
library(forcats)
levels(data_parsed$V6)
data_parsed$V6 <- data_parsed$V6 %>%
fct_recode("january" = "janvier",
"february" = "février",
"march" = "mars",
"april" = "avril",
"may" = "mai",
"june" = "juin",
"july" = "juillet",
"august" = "août",
"september" = "septembre",
"october" = "octobre",
"november" = "novembre",
"december" = "décembre"
)
?fct_recode
data_parsed_tidy <- as_tibble(data_parsed) %>%
transmute(
ranking = as.integer(V1),
is_male = (V2 == "M"),
family_name = V3,
first_name = V4,
birth_date = pmap(list(V5, V6, V7), function(d, m, y) {
paste(d, m, y, collapse = "")
}) %>% lubridate::dmy()
)
head(data_parsed_tidy)
sum(is.na(data_parsed_tidy$birth_date))
complete()
mean(data_parsed_tidy$is_male)
# 0.4345 43.5% is male!
library(scales)
data_parsed_tidy %>%
ggplot() +
geom_histogram(aes(birth_date), bins = 100) +
scale_y_continuous(breaks = pretty_breaks()) +
scale_x_date(breaks = pretty_breaks(n = 10))
# mutate(actual age?)
glimpse(data_parsed_tidy)
data_parsed_tidy$birth_date %>% as.character() %>% str_extract(pattern = "[0-9]{4}")
data_parsed_tidy <- data_parsed_tidy %>% mutate(birth_year = (birth_date %>% as.character() %>% str_extract(pattern = "[0-9]{4}")),
age = (2017 - as.numeric(birth_year)))
glimpse(data_parsed_tidy)
summary(data_parsed_tidy$age) # min. age = 20, max. age = 54 !
cummean(data_parsed_tidy$is_male) # proportion of males AT each new observation 0.00 as Rank 1 = Female!
mean(data_parsed_tidy$is_male) # 0.43 as above...
data_parsed_tidy %>%
mutate(prop_male = cummean(is_male)) %>%
ggplot() +
geom_line(aes(ranking, prop_male)) +
geom_hline(yintercept = mean(data_parsed_tidy$is_male), col = "orange", size = 1.1)
(data_parsed_tidy %>%
ggplot() +
geom_point(aes(ranking, birth_date, color = is_male)) +
aes(text = asPlotlyText(data_parsed_tidy))) %>%
plotly::ggplotly(tooltip = "text")
data_parsed_tidy %>%
ggplot(aes(ranking, birth_date)) +
geom_point() +
geom_smooth(method = 'gam', aes(color = is_male), lwd = 0.8)
?geom_smooth
| /med_school_pdf_data.r | no_license | Ryo-N7/Misc.ProjectsTutorials | R | false | false | 5,199 | r | # pdf text extraction and tidying
library(pdftools)
library(tidyverse)
library(stringr)
txt <- pdf_text("http://goo.gl/wUXvjk")
txt %>% head(n = 1)
pattern <- "([0-9]{4} [M\\.|Mme|Mlle]{1}.*?, [né|neé]{1}.*?)\\."
# [digits]{matches exactly n = 4 times}
# escape '.' {matches exactly n = 1 times}
# any chr, match at least 0 times and at most one time
# né OR neé {matches exactly n = 1 times}
# any chr, match at least 0 times and at most one time
# escape '.'
# gsubfn package:
library(gsubfn) # similar to gsub, instead - usage function > replacement string
# uses matched text as input, emits replacement text from function run on it
?strapply # apply function over string(s), treutnrs output of the function()
# pattern = ____ chr string of regex to be matched in any given chr vector
data <- unlist(gsubfn::strapply(txt, pattern = pattern))
?unlist() # given list structure, simplify to produce vector with all atomic components in 'x'
head(data, 5)
# Stringr
?matrix()
data_parsed <- matrix(NA_character_, length(data), 7)
# create matrix with row = length(data), column = 7
?boundary()
data_words <- str_extract_all(data, boundary("word"))
words <- c("These are some words.", "homina homina homina")
str_count(words, boundary("word")) # 4 words
str_split(words, " ")[[1]] # split
str_split(words, " ")[[2]] # split
str_split(words, boundary("word"))[[1]] # split only "word"
str_split(words, boundary("word"))[[2]]
# data_parsed[, 1:7] <- t(sapply(data_words, head, n = 7))
data_parsed[, 1:4] <- t(sapply(data_words, head, n = 4)) # ranking, gender prefix, last name, first name
data_parsed[, 5:7] <- t(sapply(data_words, tail, n = 3)) # day, month, year
# or else include the ne and nee!
?t() # trasnpose of 'x', need to transpose or each word go into subsequent row of same column!
head(data_parsed)
# [,1] [,2] [,3] [,4] [,5] [,6] [,7]
# [1,] "0001" "Mme" "Beaumont" "Anne" "1" "septembre" "1993"
# [2,] "0002" "M" "Petitdemange" "Arthur" "15" "septembre" "1993"
# ~~VOILA~~
as.tibble(data_parsed) # for column vars names
library(purrr)
data_parsed %>% as_tibble() %>% mutate(birth_date = pmap(list(V5, V6, V7), function(d, m, y) {
paste(d, m, y, collapse = "")
}) %>% lubridate::dmy()
)
# NOT WORK because "months" in french... ??
data_parsed %>% as.tibble %>% dplyr::select(V6)
data_parsed <- data_parsed %>% as_tibble()
data_parsed %>% select(V6)
data_parsed %>% select(V6) %>% n_distinct() # 12 distinct for 12 months duh
data_parsed %>% select(V6) %>% distinct()
data_parsed$V6 <- as.factor(data_parsed$V6)
glimpse(data_parsed)
library(forcats)
levels(data_parsed$V6)
data_parsed$V6 <- data_parsed$V6 %>%
fct_recode("january" = "janvier",
"february" = "février",
"march" = "mars",
"april" = "avril",
"may" = "mai",
"june" = "juin",
"july" = "juillet",
"august" = "août",
"september" = "septembre",
"october" = "octobre",
"november" = "novembre",
"december" = "décembre"
)
?fct_recode
data_parsed_tidy <- as_tibble(data_parsed) %>%
transmute(
ranking = as.integer(V1),
is_male = (V2 == "M"),
family_name = V3,
first_name = V4,
birth_date = pmap(list(V5, V6, V7), function(d, m, y) {
paste(d, m, y, collapse = "")
}) %>% lubridate::dmy()
)
head(data_parsed_tidy)
sum(is.na(data_parsed_tidy$birth_date))
complete()
mean(data_parsed_tidy$is_male)
# 0.4345 43.5% is male!
library(scales)
data_parsed_tidy %>%
ggplot() +
geom_histogram(aes(birth_date), bins = 100) +
scale_y_continuous(breaks = pretty_breaks()) +
scale_x_date(breaks = pretty_breaks(n = 10))
# mutate(actual age?)
glimpse(data_parsed_tidy)
data_parsed_tidy$birth_date %>% as.character() %>% str_extract(pattern = "[0-9]{4}")
data_parsed_tidy <- data_parsed_tidy %>% mutate(birth_year = (birth_date %>% as.character() %>% str_extract(pattern = "[0-9]{4}")),
age = (2017 - as.numeric(birth_year)))
glimpse(data_parsed_tidy)
summary(data_parsed_tidy$age) # min. age = 20, max. age = 54 !
cummean(data_parsed_tidy$is_male) # proportion of males AT each new observation 0.00 as Rank 1 = Female!
mean(data_parsed_tidy$is_male) # 0.43 as above...
data_parsed_tidy %>%
mutate(prop_male = cummean(is_male)) %>%
ggplot() +
geom_line(aes(ranking, prop_male)) +
geom_hline(yintercept = mean(data_parsed_tidy$is_male), col = "orange", size = 1.1)
(data_parsed_tidy %>%
ggplot() +
geom_point(aes(ranking, birth_date, color = is_male)) +
aes(text = asPlotlyText(data_parsed_tidy))) %>%
plotly::ggplotly(tooltip = "text")
data_parsed_tidy %>%
ggplot(aes(ranking, birth_date)) +
geom_point() +
geom_smooth(method = 'gam', aes(color = is_male), lwd = 0.8)
?geom_smooth
|
library(dplyr)
# read train data
X_train <- read.table("./UCI HAR Dataset/train/X_train.txt")
Y_train <- read.table("./UCI HAR Dataset/train/Y_train.txt")
Sub_train <- read.table("./UCI HAR Dataset/train/subject_train.txt")
# read test data
X_test <- read.table("./UCI HAR Dataset/test/X_test.txt")
Y_test <- read.table("./UCI HAR Dataset/test/Y_test.txt")
Sub_test <- read.table("./UCI HAR Dataset/test/subject_test.txt")
# read data description
variable_names <- read.table("./UCI HAR Dataset/features.txt")
# read activity labels
activity_labels <- read.table("./UCI HAR Dataset/activity_labels.txt")
# 1. Merges the training and the test sets to create one data set.
X_total <- rbind(X_train, X_test)
Y_total <- rbind(Y_train, Y_test)
Sub_total <- rbind(Sub_train, Sub_test)
# 2. Extracts only the measurements on the mean and standard deviation for each measurement.
selected_var <- variable_names[grep("mean\\(\\)|std\\(\\)",variable_names[,2]),]
X_total <- X_total[,selected_var[,1]]
# 3. Uses descriptive activity names to name the activities in the data set
colnames(Y_total) <- "activity"
Y_total$activitylabel <- factor(Y_total$activity, labels = as.character(activity_labels[,2]))
activitylabel <- Y_total[,-1]
# 4. Appropriately labels the data set with descriptive variable names.
colnames(X_total) <- variable_names[selected_var[,1],2]
# 5. From the data set in step 4, creates a second, independent tidy data set with the average
# of each variable for each activity and each subject.
colnames(Sub_total) <- "subject"
total <- cbind(X_total, activitylabel, Sub_total)
total_mean <- total %>% group_by(activitylabel, subject) %>% summarize_each(funs(mean))
write.table(total_mean, file = "./UCI HAR Dataset/tidydata.txt", row.names = FALSE, col.names = TRUE)
| /run_analysis.R | no_license | AshishDayama/programming-assignment-4 | R | false | false | 1,828 | r |
library(dplyr)
# read train data
X_train <- read.table("./UCI HAR Dataset/train/X_train.txt")
Y_train <- read.table("./UCI HAR Dataset/train/Y_train.txt")
Sub_train <- read.table("./UCI HAR Dataset/train/subject_train.txt")
# read test data
X_test <- read.table("./UCI HAR Dataset/test/X_test.txt")
Y_test <- read.table("./UCI HAR Dataset/test/Y_test.txt")
Sub_test <- read.table("./UCI HAR Dataset/test/subject_test.txt")
# read data description
variable_names <- read.table("./UCI HAR Dataset/features.txt")
# read activity labels
activity_labels <- read.table("./UCI HAR Dataset/activity_labels.txt")
# 1. Merges the training and the test sets to create one data set.
X_total <- rbind(X_train, X_test)
Y_total <- rbind(Y_train, Y_test)
Sub_total <- rbind(Sub_train, Sub_test)
# 2. Extracts only the measurements on the mean and standard deviation for each measurement.
selected_var <- variable_names[grep("mean\\(\\)|std\\(\\)",variable_names[,2]),]
X_total <- X_total[,selected_var[,1]]
# 3. Uses descriptive activity names to name the activities in the data set
colnames(Y_total) <- "activity"
Y_total$activitylabel <- factor(Y_total$activity, labels = as.character(activity_labels[,2]))
activitylabel <- Y_total[,-1]
# 4. Appropriately labels the data set with descriptive variable names.
colnames(X_total) <- variable_names[selected_var[,1],2]
# 5. From the data set in step 4, creates a second, independent tidy data set with the average
# of each variable for each activity and each subject.
colnames(Sub_total) <- "subject"
total <- cbind(X_total, activitylabel, Sub_total)
total_mean <- total %>% group_by(activitylabel, subject) %>% summarize_each(funs(mean))
write.table(total_mean, file = "./UCI HAR Dataset/tidydata.txt", row.names = FALSE, col.names = TRUE)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/caret.R
\name{fit_lssm_caret_wrapper}
\alias{fit_lssm_caret_wrapper}
\title{Wrapper around fit_lssm for use with caret::train}
\usage{
fit_lssm_caret_wrapper(x, y, param, ts_frequency = 1, verbose = FALSE, ...)
}
\arguments{
\item{x}{time series data to fit to}
\item{y}{ignored}
\item{param}{dataframe of one row of arguments to fit_lssm}
\item{...}{other arguments are ignored}
}
\value{
numeric vector of predictive medians with attributes:
\itemize{
\item family is a string with the parametric family, e.g. "norm"
\item other attributes are names of parameters for the parametric family
}
}
\description{
Wrapper around fit_lssm for use with caret::train
}
| /man/fit_lssm_caret_wrapper.Rd | permissive | reichlab/lssm | R | false | true | 743 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/caret.R
\name{fit_lssm_caret_wrapper}
\alias{fit_lssm_caret_wrapper}
\title{Wrapper around fit_lssm for use with caret::train}
\usage{
fit_lssm_caret_wrapper(x, y, param, ts_frequency = 1, verbose = FALSE, ...)
}
\arguments{
\item{x}{time series data to fit to}
\item{y}{ignored}
\item{param}{dataframe of one row of arguments to fit_lssm}
\item{...}{other arguments are ignored}
}
\value{
numeric vector of predictive medians with attributes:
\itemize{
\item family is a string with the parametric family, e.g. "norm"
\item other attributes are names of parameters for the parametric family
}
}
\description{
Wrapper around fit_lssm for use with caret::train
}
|
expect_no_error <- function(object, ...) {
expect_error({{ object }}, NA, ...)
}
expect_no_warning <- function(object, ...) {
expect_warning({{ object }}, NA, ...)
} | /tests/testthat/helpers.R | no_license | roliveros-ramos/calibrar | R | false | false | 169 | r | expect_no_error <- function(object, ...) {
expect_error({{ object }}, NA, ...)
}
expect_no_warning <- function(object, ...) {
expect_warning({{ object }}, NA, ...)
} |
###############################
##########################
# Alternative diffusion curve
Delay<- -0.00000693 #Effect on Market Access variable in Diffusion hazard
TownsHazardCons<-read.csv("C:\\Box\\Research\\Telephone\\project_telephone\\Data\\Stata\\TownsHazardCons.csv")
AltInstall<-TownsHazardCons$InstallMonth/exp(Delay*TownsHazardCons$MA_Post_Out_1880)
Cumulative<-matrix(0,nrow=max(TownsHazardCons$InstallMonth),ncol=4)
Cumulative[,1]<-seq(1,max(TownsHazardCons$InstallMonth),1)
for (i in 1: dim(Cumulative)[1]){
Cumulative[i,2]<-sum(TownsHazardCons$InstallMonth<=i)
Cumulative[i,3]<-sum(AltInstall<=i)
}
Cumulative[,4]<-Cumulative[,2]-Cumulative[,3]
matplot(Cumulative[,2:3], type="l", ylab="Number of Local Exchanges", xlab="")
plot(Cumulative[,c(4)]/Cumulative[,2])
#################
################
#Quantification of shares of phone lines due to long distance phone calls
##Read in Data sets
Towns<-read.csv("C:\\Box\\Research\\Telephone\\project_telephone\\Data\\Towns.csv", header=TRUE)
MatInvDistTel<-read.csv("C:\\Box\\Research\\Telephone\\project_telephone\\Data\\MatInvDistTel.csv", header=TRUE, row.names = 1)
MatInvDistTel<-as.matrix(MatInvDistTel) #confirm data in matrix form
##remove Pfalz from analysis
Main<-Towns$Region!='PF'
Towns<-Towns[Main==TRUE,]
MatInvDistTel<-MatInvDistTel[Main==TRUE,Main==TRUE]
#rescale population to make coefficients readable
Towns$Y1905<-Towns$Y1905/1000
Towns$Y1900<-Towns$Y1900/1000
Towns$Y1896<-Towns$Y1896/1000
#############################################
#
Effect<-0.305 #pull correct effect from spatial regression !!!!!!!!!
Shares<-(Effect*(MatInvDistTel%*%Towns$Lines1905))/Towns$Lines1905 | /Code/Quantification.R | no_license | ploeckl/project_telephone | R | false | false | 1,709 | r | ###############################
##########################
# Alternative diffusion curve
Delay<- -0.00000693 #Effect on Market Access variable in Diffusion hazard
TownsHazardCons<-read.csv("C:\\Box\\Research\\Telephone\\project_telephone\\Data\\Stata\\TownsHazardCons.csv")
AltInstall<-TownsHazardCons$InstallMonth/exp(Delay*TownsHazardCons$MA_Post_Out_1880)
Cumulative<-matrix(0,nrow=max(TownsHazardCons$InstallMonth),ncol=4)
Cumulative[,1]<-seq(1,max(TownsHazardCons$InstallMonth),1)
for (i in 1: dim(Cumulative)[1]){
Cumulative[i,2]<-sum(TownsHazardCons$InstallMonth<=i)
Cumulative[i,3]<-sum(AltInstall<=i)
}
Cumulative[,4]<-Cumulative[,2]-Cumulative[,3]
matplot(Cumulative[,2:3], type="l", ylab="Number of Local Exchanges", xlab="")
plot(Cumulative[,c(4)]/Cumulative[,2])
#################
################
#Quantification of shares of phone lines due to long distance phone calls
##Read in Data sets
Towns<-read.csv("C:\\Box\\Research\\Telephone\\project_telephone\\Data\\Towns.csv", header=TRUE)
MatInvDistTel<-read.csv("C:\\Box\\Research\\Telephone\\project_telephone\\Data\\MatInvDistTel.csv", header=TRUE, row.names = 1)
MatInvDistTel<-as.matrix(MatInvDistTel) #confirm data in matrix form
##remove Pfalz from analysis
Main<-Towns$Region!='PF'
Towns<-Towns[Main==TRUE,]
MatInvDistTel<-MatInvDistTel[Main==TRUE,Main==TRUE]
#rescale population to make coefficients readable
Towns$Y1905<-Towns$Y1905/1000
Towns$Y1900<-Towns$Y1900/1000
Towns$Y1896<-Towns$Y1896/1000
#############################################
#
Effect<-0.305 #pull correct effect from spatial regression !!!!!!!!!
Shares<-(Effect*(MatInvDistTel%*%Towns$Lines1905))/Towns$Lines1905 |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bigquery_objects.R
\name{BigtableColumn}
\alias{BigtableColumn}
\title{BigQuery API Objects
A data platform for customers to create, manage, share and query data.}
\usage{
BigtableColumn(encoding = NULL, fieldName = NULL, onlyReadLatest = NULL,
qualifierEncoded = NULL, qualifierString = NULL, type = NULL)
}
\arguments{
\item{encoding}{[Optional] The encoding of the values when the type is not STRING}
\item{fieldName}{[Optional] If the qualifier is not a valid BigQuery field identifier i}
\item{onlyReadLatest}{[Optional] If this is set, only the latest version of value in this column are exposed}
\item{qualifierEncoded}{[Required] Qualifier of the column}
\item{qualifierString}{No description}
\item{type}{[Optional] The type to convert the value in cells of this column}
}
\value{
BigtableColumn object
}
\description{
Auto-generated code by googleAuthR::gar_create_api_objects
at 2016-09-03 22:57:35
filename: /Users/mark/dev/R/autoGoogleAPI/googlebigqueryv2.auto/R/bigquery_objects.R
api_json: api_json
}
\details{
Objects for use by the functions created by googleAuthR::gar_create_api_skeleton
BigtableColumn Object
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
No description
}
| /googlebigqueryv2.auto/man/BigtableColumn.Rd | permissive | Phippsy/autoGoogleAPI | R | false | true | 1,304 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bigquery_objects.R
\name{BigtableColumn}
\alias{BigtableColumn}
\title{BigQuery API Objects
A data platform for customers to create, manage, share and query data.}
\usage{
BigtableColumn(encoding = NULL, fieldName = NULL, onlyReadLatest = NULL,
qualifierEncoded = NULL, qualifierString = NULL, type = NULL)
}
\arguments{
\item{encoding}{[Optional] The encoding of the values when the type is not STRING}
\item{fieldName}{[Optional] If the qualifier is not a valid BigQuery field identifier i}
\item{onlyReadLatest}{[Optional] If this is set, only the latest version of value in this column are exposed}
\item{qualifierEncoded}{[Required] Qualifier of the column}
\item{qualifierString}{No description}
\item{type}{[Optional] The type to convert the value in cells of this column}
}
\value{
BigtableColumn object
}
\description{
Auto-generated code by googleAuthR::gar_create_api_objects
at 2016-09-03 22:57:35
filename: /Users/mark/dev/R/autoGoogleAPI/googlebigqueryv2.auto/R/bigquery_objects.R
api_json: api_json
}
\details{
Objects for use by the functions created by googleAuthR::gar_create_api_skeleton
BigtableColumn Object
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
No description
}
|
#' Run the rnalab Shiny App
#'
#' @export runRNAapp
#' @importFrom shiny runApp
runRNAapp = function(){
shiny::runApp(system.file('rnalabApp', package='rnalab'))
}
| /rnalab.Rcheck/00_pkg_src/rnalab/R/runRNAapp.R | no_license | emilyd5077/rnalab | R | false | false | 174 | r | #' Run the rnalab Shiny App
#'
#' @export runRNAapp
#' @importFrom shiny runApp
runRNAapp = function(){
shiny::runApp(system.file('rnalabApp', package='rnalab'))
}
|
library(glmnet)
mydata = read.table("../../../../TrainingSet/FullSet/AvgRank/autonomic_ganglia.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.02,family="gaussian",standardize=FALSE)
sink('./autonomic_ganglia_012.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
| /Model/EN/AvgRank/autonomic_ganglia/autonomic_ganglia_012.R | no_license | esbgkannan/QSMART | R | false | false | 369 | r | library(glmnet)
mydata = read.table("../../../../TrainingSet/FullSet/AvgRank/autonomic_ganglia.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.02,family="gaussian",standardize=FALSE)
sink('./autonomic_ganglia_012.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
## Create more meaningful column names
renderColumnName <- function(column) {
## replace '-' to '.'
v <- gsub("-", ".", column)
## replace trailing '()'
v <- gsub("\\(\\)", "", v)
## fix mean
v <- gsub("mean", "Mean", v)
## replace leading 't' to 'Timed'
v <- gsub("^t", "Timed", v)
## replace leading 'f' to 'FTT'
v <- gsub("^f", "FTT", v)
## extend abbreviations to full name
v <- gsub("std", "StandardDeviation", v)
v <- gsub("Acc", "Accelerometer", v)
v <- gsub("Gyro", "Gyroscope", v)
v <- gsub("Mag", "Magnitude", v)
v <- gsub("Jerk", "JerkSignals", v)
v <- gsub("Freq", "Frequency", v)
as.character(v)
}
| /renderColumnName.R | no_license | poco-irrilevante/RunAnalysis | R | false | false | 662 | r | ## Create more meaningful column names
renderColumnName <- function(column) {
## replace '-' to '.'
v <- gsub("-", ".", column)
## replace trailing '()'
v <- gsub("\\(\\)", "", v)
## fix mean
v <- gsub("mean", "Mean", v)
## replace leading 't' to 'Timed'
v <- gsub("^t", "Timed", v)
## replace leading 'f' to 'FTT'
v <- gsub("^f", "FTT", v)
## extend abbreviations to full name
v <- gsub("std", "StandardDeviation", v)
v <- gsub("Acc", "Accelerometer", v)
v <- gsub("Gyro", "Gyroscope", v)
v <- gsub("Mag", "Magnitude", v)
v <- gsub("Jerk", "JerkSignals", v)
v <- gsub("Freq", "Frequency", v)
as.character(v)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/convert_counts.R
\name{wb_make_object}
\alias{wb_make_object}
\title{counts to waterbear object}
\usage{
wb_make_object(
counts_array,
gene_mapping,
control_guide_regex = "Non-",
bin_size_prior = NULL
)
}
\arguments{
\item{counts_array}{an array organized in the following dimension:}
\item{gene_mapping}{a data frame mapping of guide names to gene names. requires the following column names: (1) guide, (2) gene.}
\item{control_guide_regex}{a regular expression used to find/match control guides. default is 'Non-'.}
\item{bin_size_prior}{the expected mass in each bin. If NULL, defaults to uniform (e.g. c(0.25, 0.25, 0.25, 0.25)).}
}
\value{
a water bear object that inference can be performed on.
}
\description{
this function takes a count table and converts it to a water bear object
}
| /man/wb_make_object.Rd | permissive | pimentel/waterbear | R | false | true | 881 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/convert_counts.R
\name{wb_make_object}
\alias{wb_make_object}
\title{counts to waterbear object}
\usage{
wb_make_object(
counts_array,
gene_mapping,
control_guide_regex = "Non-",
bin_size_prior = NULL
)
}
\arguments{
\item{counts_array}{an array organized in the following dimension:}
\item{gene_mapping}{a data frame mapping of guide names to gene names. requires the following column names: (1) guide, (2) gene.}
\item{control_guide_regex}{a regular expression used to find/match control guides. default is 'Non-'.}
\item{bin_size_prior}{the expected mass in each bin. If NULL, defaults to uniform (e.g. c(0.25, 0.25, 0.25, 0.25)).}
}
\value{
a water bear object that inference can be performed on.
}
\description{
this function takes a count table and converts it to a water bear object
}
|
rm(list = ls())
source("DataGen4.R")
library(lmtest)
library(ivpack)
regressions <- function(datmat, exo=1, instrument=1){
r1 <- matrix(0, nrow=nrow(datmat), ncol= ncol(datmat))
r2 <- matrix(0, nrow=nrow(datmat), ncol= ncol(datmat))
r3 <- matrix(0, nrow=nrow(datmat), ncol= ncol(datmat))
r4 <- matrix(0, nrow=nrow(datmat), ncol= ncol(datmat))
r5 <- matrix(0, nrow=nrow(datmat), ncol= ncol(datmat))
results <- matrix(0, nrow=ncol(datmat), ncol= 5)
c1 <- matrix(0, nrow=nrow(datmat), ncol= ncol(datmat))
c2 <- matrix(0, nrow=nrow(datmat), ncol= ncol(datmat))
c3 <- matrix(0, nrow=nrow(datmat), ncol= ncol(datmat))
c4 <- matrix(0, nrow=nrow(datmat), ncol= ncol(datmat))
c5 <- matrix(0, nrow=nrow(datmat), ncol= ncol(datmat))
coverage <- matrix(0, nrow=ncol(datmat), ncol= 5)
e5 <- matrix(0, nrow=nrow(datmat), ncol= ncol(datmat))
endo <- matrix(0, nrow=ncol(datmat), ncol = 1)
test <- function(s){
if (s < .05){
return(1)
}
else {
return(0)
}
}
for (j in 1:ncol(datmat)){
for (i in 1:nrow(datmat)){
dat= unlist(datmat[i,j])
dat = matrix(dat, ncol=5 ,nrow = 1000)
y_values = dat[,1]
ypre = dat[,2]
x <- dat[, 3]
##Obtain IV z (excluded exogenous regressor)
z <- dat[, (4):(3+instrument)]
##Obtain included exogenous regressor
xo <- dat[, (4+instrument):(3+ instrument+exo)]
olspyre <- lm(ypre ~ x + xo)
r1[i, j] <- olspyre$coefficients[2]
cols <- coeftest(olspyre)[2, 2]
cover <- function(estimate, se){
upper <- estimate + 1.96*se
lower <- estimate - 1.96*se
if (.5 > lower & .5 < upper){
return(1)}
else{
return(0)}
}
c1[i, j] <- cover(estimate= r1[i,j], se = cols)
ivpre <- ivreg(ypre~x+ xo, ~z + xo)
r2[i,j] <- ivpre$coefficients[2]
invisible(ivse <- robust.se(ivpre)[2,2])
c2[i, j] <- cover(estimate = r2[i,j], se=ivse)
yvaldata = as.data.frame(cbind(y_values, x, xo))
olsyval <- lm(y_values ~., data=yvaldata)
r3[i, j] <- olsyval$coefficients[2]
invisible(cols3 <- coeftest(olsyval)[2, 2])
c3[i, j] <- cover(estimate = r3[i,j], se=cols3)
dat = as.data.frame(cbind(y_values, x,z,xo))
probyval <- glm(y_values ~., family = binomial(link = "probit"), data = yvaldata)
r4[i, j] <- probyval$coefficients[2]
invisible(seprobit <- coeftest(probyval)[2,2])
c4[i, j] <- cover(estimate = r4[i,j], se=seprobit)
ivyval <- ivreg(y_values~x+ xo, ~z + xo)
r5[i, j] <- ivyval$coefficients[2]
invisible(iv2se <- robust.se(ivyval)[2,2])
c5[i,j] <- cover(estimate = r5[i,j], se=iv2se)
##Endogeneity
firststage <- (lm(x~z+xo))$residuals
secondstep <- lm(y_values~x+xo +firststage)
s <- summary(secondstep)$coefficients[4,4]
e5[i,j] <- test(s=s)
}
results[j, 1] <- mean(abs(r1[, j]-0.5))
results[j, 2] <- mean(abs(r2[, j]-0.5))
results[j, 3] <- mean(abs(r3[, j]-0.5))
results[j, 4] <- mean(abs(r4[, j]-0.5))
results[j, 5] <- mean(abs(r5[, j]-0.5))
coverage[j, 1] <- sum(c1[,j])
coverage[j, 2] <- sum(c2[,j])
coverage[j, 3] <- sum(c3[,j])
coverage[j, 4] <- sum(c4[,j])
coverage[j, 5] <- sum(c5[,j])
endo[j,] = sum(e5[,j])
}
return(list(results =results, coverage=coverage, endo=endo ))
}
sink("NULL")
mad1 <- regressions(datmat=data1)
sink()
mad1$results
mad1$coverage
mad1$endo
setwd("..")
bias <- mad1$results[, 5]
coverage <- mad1$coverage[,5]
endogeneity <- mad1$endo
write.csv(bias, "Data/bias4.csv")
write.csv(coverage, "Data/coverage4.csv")
write.csv(endogeneity, "Data/endo4.csv")
auxbias <- mad1$results[, 1:4]
auxcoverage <- mad1$coverage[,1:4]
write.csv(auxcoverage, "Data/auxbias4.csv")
write.csv(auxbias, "Data/auxcoverage4.csv") | /R/Regressions4.R | no_license | cdanko42/Simulations | R | false | false | 3,493 | r | rm(list = ls())
source("DataGen4.R")
library(lmtest)
library(ivpack)
regressions <- function(datmat, exo=1, instrument=1){
r1 <- matrix(0, nrow=nrow(datmat), ncol= ncol(datmat))
r2 <- matrix(0, nrow=nrow(datmat), ncol= ncol(datmat))
r3 <- matrix(0, nrow=nrow(datmat), ncol= ncol(datmat))
r4 <- matrix(0, nrow=nrow(datmat), ncol= ncol(datmat))
r5 <- matrix(0, nrow=nrow(datmat), ncol= ncol(datmat))
results <- matrix(0, nrow=ncol(datmat), ncol= 5)
c1 <- matrix(0, nrow=nrow(datmat), ncol= ncol(datmat))
c2 <- matrix(0, nrow=nrow(datmat), ncol= ncol(datmat))
c3 <- matrix(0, nrow=nrow(datmat), ncol= ncol(datmat))
c4 <- matrix(0, nrow=nrow(datmat), ncol= ncol(datmat))
c5 <- matrix(0, nrow=nrow(datmat), ncol= ncol(datmat))
coverage <- matrix(0, nrow=ncol(datmat), ncol= 5)
e5 <- matrix(0, nrow=nrow(datmat), ncol= ncol(datmat))
endo <- matrix(0, nrow=ncol(datmat), ncol = 1)
test <- function(s){
if (s < .05){
return(1)
}
else {
return(0)
}
}
for (j in 1:ncol(datmat)){
for (i in 1:nrow(datmat)){
dat= unlist(datmat[i,j])
dat = matrix(dat, ncol=5 ,nrow = 1000)
y_values = dat[,1]
ypre = dat[,2]
x <- dat[, 3]
##Obtain IV z (excluded exogenous regressor)
z <- dat[, (4):(3+instrument)]
##Obtain included exogenous regressor
xo <- dat[, (4+instrument):(3+ instrument+exo)]
olspyre <- lm(ypre ~ x + xo)
r1[i, j] <- olspyre$coefficients[2]
cols <- coeftest(olspyre)[2, 2]
cover <- function(estimate, se){
upper <- estimate + 1.96*se
lower <- estimate - 1.96*se
if (.5 > lower & .5 < upper){
return(1)}
else{
return(0)}
}
c1[i, j] <- cover(estimate= r1[i,j], se = cols)
ivpre <- ivreg(ypre~x+ xo, ~z + xo)
r2[i,j] <- ivpre$coefficients[2]
invisible(ivse <- robust.se(ivpre)[2,2])
c2[i, j] <- cover(estimate = r2[i,j], se=ivse)
yvaldata = as.data.frame(cbind(y_values, x, xo))
olsyval <- lm(y_values ~., data=yvaldata)
r3[i, j] <- olsyval$coefficients[2]
invisible(cols3 <- coeftest(olsyval)[2, 2])
c3[i, j] <- cover(estimate = r3[i,j], se=cols3)
dat = as.data.frame(cbind(y_values, x,z,xo))
probyval <- glm(y_values ~., family = binomial(link = "probit"), data = yvaldata)
r4[i, j] <- probyval$coefficients[2]
invisible(seprobit <- coeftest(probyval)[2,2])
c4[i, j] <- cover(estimate = r4[i,j], se=seprobit)
ivyval <- ivreg(y_values~x+ xo, ~z + xo)
r5[i, j] <- ivyval$coefficients[2]
invisible(iv2se <- robust.se(ivyval)[2,2])
c5[i,j] <- cover(estimate = r5[i,j], se=iv2se)
##Endogeneity
firststage <- (lm(x~z+xo))$residuals
secondstep <- lm(y_values~x+xo +firststage)
s <- summary(secondstep)$coefficients[4,4]
e5[i,j] <- test(s=s)
}
results[j, 1] <- mean(abs(r1[, j]-0.5))
results[j, 2] <- mean(abs(r2[, j]-0.5))
results[j, 3] <- mean(abs(r3[, j]-0.5))
results[j, 4] <- mean(abs(r4[, j]-0.5))
results[j, 5] <- mean(abs(r5[, j]-0.5))
coverage[j, 1] <- sum(c1[,j])
coverage[j, 2] <- sum(c2[,j])
coverage[j, 3] <- sum(c3[,j])
coverage[j, 4] <- sum(c4[,j])
coverage[j, 5] <- sum(c5[,j])
endo[j,] = sum(e5[,j])
}
return(list(results =results, coverage=coverage, endo=endo ))
}
sink("NULL")
mad1 <- regressions(datmat=data1)
sink()
mad1$results
mad1$coverage
mad1$endo
setwd("..")
bias <- mad1$results[, 5]
coverage <- mad1$coverage[,5]
endogeneity <- mad1$endo
write.csv(bias, "Data/bias4.csv")
write.csv(coverage, "Data/coverage4.csv")
write.csv(endogeneity, "Data/endo4.csv")
auxbias <- mad1$results[, 1:4]
auxcoverage <- mad1$coverage[,1:4]
write.csv(auxcoverage, "Data/auxbias4.csv")
write.csv(auxbias, "Data/auxcoverage4.csv") |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tidyxl_fmts.R
\name{fmt_alignment_horizontal}
\alias{fmt_alignment_horizontal}
\title{Add formatting information from the fmt_alignment_horizontal format object
This function uses the format object created by \code{xlsx_formats} along with `local_format_id`` to create a vector representing cells' alignment_horizontal formatting.}
\usage{
fmt_alignment_horizontal(format_id_vec = local_format_id,
sheet_formats = formats)
}
\arguments{
\item{format_id_vec}{local format id vector}
\item{sheet_formats}{formats}
}
\description{
Add formatting information from the fmt_alignment_horizontal format object
This function uses the format object created by \code{xlsx_formats} along with `local_format_id`` to create a vector representing cells' alignment_horizontal formatting.
}
| /man/fmt_alignment_horizontal.Rd | no_license | ianmoran11/unpivotr | R | false | true | 856 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tidyxl_fmts.R
\name{fmt_alignment_horizontal}
\alias{fmt_alignment_horizontal}
\title{Add formatting information from the fmt_alignment_horizontal format object
This function uses the format object created by \code{xlsx_formats} along with `local_format_id`` to create a vector representing cells' alignment_horizontal formatting.}
\usage{
fmt_alignment_horizontal(format_id_vec = local_format_id,
sheet_formats = formats)
}
\arguments{
\item{format_id_vec}{local format id vector}
\item{sheet_formats}{formats}
}
\description{
Add formatting information from the fmt_alignment_horizontal format object
This function uses the format object created by \code{xlsx_formats} along with `local_format_id`` to create a vector representing cells' alignment_horizontal formatting.
}
|
## The functions create a special matrix, compute the inverse or recieve the inverse from the cache.
##This function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setsolve <- function(solve) m <<- solve
getsolve <- function() m
list(set = set, get = get,
setsolve = setsolve,
getsolve = getsolve)
}
## This function computes the inverse of the special "matrix" returned by makeCacheMatrix above. If the inverse has already been calculated (and the matrix has not changed), then the cachesolve should retrieve the inverse from the cache. The matrix supplied is always assumed to be invertible
cachesolve <- function(x, ...) {
m <- x$getsolve()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setsolve(m)
m
}
| /cachematrix.R | no_license | RainbowTyger/ProgrammingAssignment2 | R | false | false | 1,101 | r | ## The functions create a special matrix, compute the inverse or recieve the inverse from the cache.
##This function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setsolve <- function(solve) m <<- solve
getsolve <- function() m
list(set = set, get = get,
setsolve = setsolve,
getsolve = getsolve)
}
## This function computes the inverse of the special "matrix" returned by makeCacheMatrix above. If the inverse has already been calculated (and the matrix has not changed), then the cachesolve should retrieve the inverse from the cache. The matrix supplied is always assumed to be invertible
cachesolve <- function(x, ...) {
m <- x$getsolve()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setsolve(m)
m
}
|
###########################################################
# DOWNLOADS HDF DATA
#Rscript downloadData.R product=MOD09Q1 collection=005 begin=2000.02.01 end=2000.04.01 tileH=11:11 tileV=9:9 wait=1
###########################################################
#Get arguments
argsep <- "="
keys <- vector(mode = "character", length = 0)
values <- vector(mode = "character", length = 0)
#commandArgs <- c("uno=1", "dos=2")
for (arg in commandArgs()){
if(agrep(argsep, arg) == TRUE){
pair <- unlist(strsplit(arg, argsep))
keys <- append(keys, pair[1], after = length(pair))
values <- append(values, pair[2], after = length(pair))
}
}
#cat("\n-----------------\n")
#matrix(data = cbind(keys, values), ncol = 2, byrow = FALSE)
#cat("\n-----------------\n")
product <- values[which(keys == "product")]
begin <- values[which(keys == "begin")]
end <- values[which(keys == "end")]
tileH <- values[which(keys == "tileH")]
tileV <- values[which(keys == "tileV")]
collection <- values[which(keys == "collection")]
wait <- values[which(keys == "wait")]
if(agrep(":", tileH) == TRUE){
pair <- unlist(strsplit(tileH, ":"))
tileH <- seq(from = as.numeric(pair[1]), to = as.numeric(pair[2]), by = 1)
}else{
tileH <- as.numeric(tileH)
}
if(agrep(":", tileV) == TRUE){
pair <- unlist(strsplit(tileV, ":"))
tileV <- seq(from = as.numeric(pair[1]), to = as.numeric(pair[2]), by = 1)
}else{
tileV <- as.numeric(tileV)
}
# Downloads data
library(MODIS)
#MODISoptions(localArcPath, outDirPath, pixelSize, outProj, resamplingType, dataFormat, gdalPath, MODISserverOrder, dlmethod, stubbornness, systemwide = FALSE, quiet = FALSE, save=TRUE, checkPackages=TRUE)
res <- getHdf(product = product, begin = begin, end = end, tileH = tileH, tileV = tileV, collection = collection, wait = wait, quiet = FALSE)
| /downloadData.R | permissive | edzer/amazonGreenUp2005 | R | false | false | 1,809 | r | ###########################################################
# DOWNLOADS HDF DATA
#Rscript downloadData.R product=MOD09Q1 collection=005 begin=2000.02.01 end=2000.04.01 tileH=11:11 tileV=9:9 wait=1
###########################################################
#Get arguments
argsep <- "="
keys <- vector(mode = "character", length = 0)
values <- vector(mode = "character", length = 0)
#commandArgs <- c("uno=1", "dos=2")
for (arg in commandArgs()){
if(agrep(argsep, arg) == TRUE){
pair <- unlist(strsplit(arg, argsep))
keys <- append(keys, pair[1], after = length(pair))
values <- append(values, pair[2], after = length(pair))
}
}
#cat("\n-----------------\n")
#matrix(data = cbind(keys, values), ncol = 2, byrow = FALSE)
#cat("\n-----------------\n")
product <- values[which(keys == "product")]
begin <- values[which(keys == "begin")]
end <- values[which(keys == "end")]
tileH <- values[which(keys == "tileH")]
tileV <- values[which(keys == "tileV")]
collection <- values[which(keys == "collection")]
wait <- values[which(keys == "wait")]
if(agrep(":", tileH) == TRUE){
pair <- unlist(strsplit(tileH, ":"))
tileH <- seq(from = as.numeric(pair[1]), to = as.numeric(pair[2]), by = 1)
}else{
tileH <- as.numeric(tileH)
}
if(agrep(":", tileV) == TRUE){
pair <- unlist(strsplit(tileV, ":"))
tileV <- seq(from = as.numeric(pair[1]), to = as.numeric(pair[2]), by = 1)
}else{
tileV <- as.numeric(tileV)
}
# Downloads data
library(MODIS)
#MODISoptions(localArcPath, outDirPath, pixelSize, outProj, resamplingType, dataFormat, gdalPath, MODISserverOrder, dlmethod, stubbornness, systemwide = FALSE, quiet = FALSE, save=TRUE, checkPackages=TRUE)
res <- getHdf(product = product, begin = begin, end = end, tileH = tileH, tileV = tileV, collection = collection, wait = wait, quiet = FALSE)
|
context("List folder contents")
# ---- nm_fun ----
nm_ <- nm_fun("TEST-drive-ls", NULL)
# ---- clean ----
if (CLEAN) {
drive_trash(c(
nm_("list-me"),
nm_("this-should-not-exist")
))
}
# ---- setup ----
if (SETUP) {
drive_mkdir(nm_("list-me"))
drive_upload(
system.file("DESCRIPTION"),
path = file.path(nm_("list-me"), nm_("DESCRIPTION"))
)
drive_upload(
R.home('doc/html/about.html'),
path = file.path(nm_("list-me"), nm_("about-html"))
)
}
# ---- tests ----
test_that("drive_ls() errors if file does not exist", {
skip_if_no_token()
skip_if_offline()
expect_error(
drive_ls(nm_("this-should-not-exist")),
"does not identify at least one"
)
})
test_that("drive_ls() outputs contents of folder", {
skip_if_no_token()
skip_if_offline()
## path
out <- drive_ls(nm_("list-me"))
expect_s3_class(out, "dribble")
expect_true(setequal(out$name, c(nm_("about-html"), nm_("DESCRIPTION"))))
## dribble
d <- drive_get(nm_("list-me"))
out2 <- drive_ls(d)
expect_identical(out[c("name", "id")], out2[c("name", "id")])
## id
out3 <- drive_ls(as_id(d$id))
expect_identical(out[c("name", "id")], out3[c("name", "id")])
})
test_that("drive_ls() passes ... through to drive_find()", {
skip_if_no_token()
skip_if_offline()
d <- drive_get(nm_("list-me"))
## does user-specified q get appended to vs clobbered?
## if so, only about-html is listed here
about <- drive_get(nm_("about-html"))
out <- drive_ls(d, q = "fullText contains 'portable'")
expect_identical(
about[c("name", "id")],
out[c("name", "id")]
)
## does a non-q query parameter get passed through?
## if so, files are listed in reverse alphabetical order here
out <- drive_ls(d, orderBy = "name desc")
expect_identical(
out$name,
c(nm_("DESCRIPTION"), nm_("about-html"))
)
})
| /tests/testthat/test-drive_ls.R | no_license | hturner/googledrive | R | false | false | 1,855 | r | context("List folder contents")
# ---- nm_fun ----
nm_ <- nm_fun("TEST-drive-ls", NULL)
# ---- clean ----
if (CLEAN) {
drive_trash(c(
nm_("list-me"),
nm_("this-should-not-exist")
))
}
# ---- setup ----
if (SETUP) {
drive_mkdir(nm_("list-me"))
drive_upload(
system.file("DESCRIPTION"),
path = file.path(nm_("list-me"), nm_("DESCRIPTION"))
)
drive_upload(
R.home('doc/html/about.html'),
path = file.path(nm_("list-me"), nm_("about-html"))
)
}
# ---- tests ----
test_that("drive_ls() errors if file does not exist", {
skip_if_no_token()
skip_if_offline()
expect_error(
drive_ls(nm_("this-should-not-exist")),
"does not identify at least one"
)
})
test_that("drive_ls() outputs contents of folder", {
skip_if_no_token()
skip_if_offline()
## path
out <- drive_ls(nm_("list-me"))
expect_s3_class(out, "dribble")
expect_true(setequal(out$name, c(nm_("about-html"), nm_("DESCRIPTION"))))
## dribble
d <- drive_get(nm_("list-me"))
out2 <- drive_ls(d)
expect_identical(out[c("name", "id")], out2[c("name", "id")])
## id
out3 <- drive_ls(as_id(d$id))
expect_identical(out[c("name", "id")], out3[c("name", "id")])
})
test_that("drive_ls() passes ... through to drive_find()", {
skip_if_no_token()
skip_if_offline()
d <- drive_get(nm_("list-me"))
## does user-specified q get appended to vs clobbered?
## if so, only about-html is listed here
about <- drive_get(nm_("about-html"))
out <- drive_ls(d, q = "fullText contains 'portable'")
expect_identical(
about[c("name", "id")],
out[c("name", "id")]
)
## does a non-q query parameter get passed through?
## if so, files are listed in reverse alphabetical order here
out <- drive_ls(d, orderBy = "name desc")
expect_identical(
out$name,
c(nm_("DESCRIPTION"), nm_("about-html"))
)
})
|
ggplot(set.df,aes(x=as.Date(ORDER_DATE,"%Y-%m-%d"),y=as.Date(SHIPPED_DATE,"%Y-%m-%d")))+geom_point(aes(color=as.factor(TITLE)),na.rm=TRUE) + facet_wrap(~CUSTOMER_STATE) | /02 Visualizations/recreate_plot_2.R | no_license | alexpearce92/DV_RProject1 | R | false | false | 168 | r | ggplot(set.df,aes(x=as.Date(ORDER_DATE,"%Y-%m-%d"),y=as.Date(SHIPPED_DATE,"%Y-%m-%d")))+geom_point(aes(color=as.factor(TITLE)),na.rm=TRUE) + facet_wrap(~CUSTOMER_STATE) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils-httr.R
\name{VERB_n}
\alias{VERB_n}
\title{Generic implementation of HTTP methods with retries and authentication}
\usage{
VERB_n(VERB, n = 5)
}
\arguments{
\item{VERB}{function; an HTTP verb (e.g. GET, POST, etc.)}
\item{n}{integer; the number of retries}
}
\description{
Generic implementation of HTTP methods with retries and authentication
}
\note{
This function is meant to be used internally. Only use when debugging.
}
\keyword{internal}
| /man/VERB_n.Rd | no_license | muschellij2/squareupr | R | false | true | 530 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils-httr.R
\name{VERB_n}
\alias{VERB_n}
\title{Generic implementation of HTTP methods with retries and authentication}
\usage{
VERB_n(VERB, n = 5)
}
\arguments{
\item{VERB}{function; an HTTP verb (e.g. GET, POST, etc.)}
\item{n}{integer; the number of retries}
}
\description{
Generic implementation of HTTP methods with retries and authentication
}
\note{
This function is meant to be used internally. Only use when debugging.
}
\keyword{internal}
|
shinyPanelCluster <- fluidPage(
tags$div(
class = "container",
h3("Clustering"),
h5(tags$a(href = "https://compbiomed.github.io/sctk_docs/articles/clustering.html",
"(help)", target = "_blank")),
wellPanel(
# CLUSTERING --> VISUALIZATION
fluidRow(
column(
6,
selectInput("clustAlgo", "Select Algorithm",
list("Scran SNN" = c("walktrap" = 1, "louvain" = 2,
"infomap" = 3, "fastGreedy" = 4,
"labelProp" = 5, "leadingEigen" = 6),
"K-Means" = c("Hartigan-Wong" = 7, "Lloyd" = 8,
"MacQueen" = 9),
"Seurat" = c("louvain" = 10, "multilevel" = 11,
"SLM" = 12)),
)
)
),
h4("Input Parameters:"),
fluidRow(
# Scran SNN ####
conditionalPanel(
"input.clustAlgo >=1 && input.clustAlgo <= 6",
column(
6,
uiOutput("clustScranSNNMatUI"),
uiOutput("clustScranSNNAltExpAssayUI")
),
column(
4,
numericInput("clustScranSNNK", "K value:", 10, min = 1, step = 1),
),
conditionalPanel(
"input.clustScranSNNInType != 'ReducedDim'",
column(
4,
numericInput("clustScranSNNd", "Number of Components:",
50, min = 2, step = 5)
)
),
column(
4,
selectInput("clustScranSNNType", "Edge Weight Type:",
c("rank", "number", "jaccard"), selected = "rank")
)
),
# K-Means ####
conditionalPanel(
"input.clustAlgo >= 7 && input.clustAlgo <= 9",
column(
6,
selectInput("clustKMeansReddim", "Select A ReducedDim:", currreddim)
),
column(6),
column(
12,
helpText("A 'reducedDim' contains low-dimension representation of an assay.\n Dimension reduction has to be run in advance.")
),
column(
4,
numericInput("clustKMeansN", "Number of Centers (Clusters):",
value = NULL),
),
column(
4,
numericInput("clustKMeansNIter", "Max Number of Iterations:",
10, min = 2, step = 1)
),
column(
4,
numericInput("clustKMeansNStart", "Number of Random Sets:",
1, min = 1, step = 1)
)
),
# Seurat ####
conditionalPanel(
"input.clustAlgo >= 10 && input.clustAlgo <= 12",
column(
6,
selectInput("clustSeuratReddim", "Select A ReducedDim:", currreddim)
),
column(6),
column(
12,
helpText("A 'reducedDim' contains low-dimension representation of an assay.\n Dimension reduction has to be run in advance.")
),
column(
4,
numericInput("clustSeuratDims", "How Many Dimensions to Use:", 10,
min = 2, step = 1),
),
column(
4,
checkboxInput("clustSeuratGrpSgltn", "Group Singletons",
value = TRUE)
),
column(
4,
numericInput("clustSeuratRes", "Resolution", 0.8, step = 0.05)
)
)
), # fuildRow ends here
useShinyjs(),
uiOutput("clustNameUI"),
withBusyIndicatorUI(actionButton("clustRun", "Run"))
),
h3("Visualization"),
p("A cluster annotation needs to be specified, and a dimension reduction has to be provided.",
style = "color:grey;"),
panel(
radioButtons("clustVisChoicesType", NULL,
c("Select from Current Results:" = 1,
"Select from All Present Annotation:" = 2),
selected = 1, inline = TRUE, ),
conditionalPanel(
"input.clustVisChoicesType == 1",
selectInput("clustVisRes", NULL, "")
),
conditionalPanel(
"input.clustVisChoicesType == 2",
selectInput("clustVisCol", NULL, clusterChoice)
),
selectInput("clustVisReddim", "Use Reduction:", currreddim),
withBusyIndicatorUI(actionButton("clustPlot", "Plot")),
plotOutput("clustVisPlot")
)
)
)
| /inst/shiny/ui_03_2_cluster.R | permissive | vidyaap/singleCellTK | R | false | false | 4,599 | r | shinyPanelCluster <- fluidPage(
tags$div(
class = "container",
h3("Clustering"),
h5(tags$a(href = "https://compbiomed.github.io/sctk_docs/articles/clustering.html",
"(help)", target = "_blank")),
wellPanel(
# CLUSTERING --> VISUALIZATION
fluidRow(
column(
6,
selectInput("clustAlgo", "Select Algorithm",
list("Scran SNN" = c("walktrap" = 1, "louvain" = 2,
"infomap" = 3, "fastGreedy" = 4,
"labelProp" = 5, "leadingEigen" = 6),
"K-Means" = c("Hartigan-Wong" = 7, "Lloyd" = 8,
"MacQueen" = 9),
"Seurat" = c("louvain" = 10, "multilevel" = 11,
"SLM" = 12)),
)
)
),
h4("Input Parameters:"),
fluidRow(
# Scran SNN ####
conditionalPanel(
"input.clustAlgo >=1 && input.clustAlgo <= 6",
column(
6,
uiOutput("clustScranSNNMatUI"),
uiOutput("clustScranSNNAltExpAssayUI")
),
column(
4,
numericInput("clustScranSNNK", "K value:", 10, min = 1, step = 1),
),
conditionalPanel(
"input.clustScranSNNInType != 'ReducedDim'",
column(
4,
numericInput("clustScranSNNd", "Number of Components:",
50, min = 2, step = 5)
)
),
column(
4,
selectInput("clustScranSNNType", "Edge Weight Type:",
c("rank", "number", "jaccard"), selected = "rank")
)
),
# K-Means ####
conditionalPanel(
"input.clustAlgo >= 7 && input.clustAlgo <= 9",
column(
6,
selectInput("clustKMeansReddim", "Select A ReducedDim:", currreddim)
),
column(6),
column(
12,
helpText("A 'reducedDim' contains low-dimension representation of an assay.\n Dimension reduction has to be run in advance.")
),
column(
4,
numericInput("clustKMeansN", "Number of Centers (Clusters):",
value = NULL),
),
column(
4,
numericInput("clustKMeansNIter", "Max Number of Iterations:",
10, min = 2, step = 1)
),
column(
4,
numericInput("clustKMeansNStart", "Number of Random Sets:",
1, min = 1, step = 1)
)
),
# Seurat ####
conditionalPanel(
"input.clustAlgo >= 10 && input.clustAlgo <= 12",
column(
6,
selectInput("clustSeuratReddim", "Select A ReducedDim:", currreddim)
),
column(6),
column(
12,
helpText("A 'reducedDim' contains low-dimension representation of an assay.\n Dimension reduction has to be run in advance.")
),
column(
4,
numericInput("clustSeuratDims", "How Many Dimensions to Use:", 10,
min = 2, step = 1),
),
column(
4,
checkboxInput("clustSeuratGrpSgltn", "Group Singletons",
value = TRUE)
),
column(
4,
numericInput("clustSeuratRes", "Resolution", 0.8, step = 0.05)
)
)
), # fuildRow ends here
useShinyjs(),
uiOutput("clustNameUI"),
withBusyIndicatorUI(actionButton("clustRun", "Run"))
),
h3("Visualization"),
p("A cluster annotation needs to be specified, and a dimension reduction has to be provided.",
style = "color:grey;"),
panel(
radioButtons("clustVisChoicesType", NULL,
c("Select from Current Results:" = 1,
"Select from All Present Annotation:" = 2),
selected = 1, inline = TRUE, ),
conditionalPanel(
"input.clustVisChoicesType == 1",
selectInput("clustVisRes", NULL, "")
),
conditionalPanel(
"input.clustVisChoicesType == 2",
selectInput("clustVisCol", NULL, clusterChoice)
),
selectInput("clustVisReddim", "Use Reduction:", currreddim),
withBusyIndicatorUI(actionButton("clustPlot", "Plot")),
plotOutput("clustVisPlot")
)
)
)
|
MLRC <- function(y, x, check.data=TRUE, lean=FALSE, n.cut=5, verbose=TRUE, ...)
{
if (check.data) {
if (any(apply(y, 1, sum) < 1.0E-8))
stop(paste("Species data have zero abundances for the following rows:", paste(which(apply(y, 1, sum) < 1.0E-8), collapse=",")))
if (any(apply(y, 2, sum) < 1.0E-8))
stop(paste("Species data have zero abundances for the following columns:", paste(which(apply(y, 2, sum) < 1.0E-8), collapse=",")))
if(n.cut < 5 & any(apply(y>0, 2, sum) < 5))
warning("Trying to fit responses to some taxa with less than 5 occurrences - results may be unreliable")
}
if (any(y>1) | any (y<0))
stop("Species data must be proportions between 0 and 1")
fit <- MLRC.fit(y=y, x=x, lean=lean, n.cut=n.cut, verbose=verbose, ...)
xHat <- predict.internal.MLRC(object=fit, y=y, lean=lean, ...)
call.print <- match.call()
call.fit <- as.call(list(quote(MLRC.fit), y=quote(y), x=quote(x), lean=FALSE))
result <- c(fit, list(fitted.values=xHat, call.fit=call.fit, call.print=call.print, x=x))
result$cv.summary <- list(cv.method="none")
if (!lean)
result$y <- y
class(result) <- "MLRC"
result
}
MLRC.fit <- function(y, x, n.cut=2, use.glm = FALSE, max.iter=50, lean=FALSE, verbose=FALSE, ...)
{
glr <- function(x, e) {
gfit <- glm.fit(e, x, family = quasibinomial(link=logit), ...)
coef <- gfit$coefficients
if (coef[3] > 0) {
gfit <- glm.fit(e[, 1:2], x, family = quasibinomial(link=logit), ...)
coef <- c(gfit$coefficients, 0)
}
if (gfit$converged)
return(coef)
else
return(c(NA, NA, NA))
}
skip <- colSums(y > 0) < n.cut
if (use.glm) {
# glr <- function(x, e) {
# gfit <- glm(x ~ e + I(e^2), family = quasibinomial(link=logit), ...)
# if (gfit$converged)
# return(gfit$coefficients)
# else
# return(c(NA, NA, NA))
# }
lp <- cbind(rep(1, nrow(y)), x, x^2)
beta <- apply(y[, !skip], 2, glr, e=lp)
BETA <- matrix(NA, nrow = 3, ncol = ncol(y))
BETA[, !skip] <- beta
beta <- t(BETA)
rownames(beta) <- colnames(y)
colnames(beta) <- c("b0", "b1", "b2")
return (list(coefficients=beta, meanX=mean(x, na.rm=TRUE)))
} else {
res <- .Call("MLRC_regress", as.matrix(y[, !skip]), as.matrix(x), as.integer(max.iter), as.integer(verbose), PACKAGE="rioja")
beta <- matrix(res$Beta, ncol=3)
BETA <- matrix(NA, ncol = 3, nrow = ncol(y))
BETA[!skip, ] <- beta
IBETA <- vector("integer", length=ncol(y))
IBETA[] <- NA
IBETA[!skip] <- res$IBeta
rownames(BETA) <- colnames(y)
colnames(BETA) <- c("b0", "b1", "b2")
list(coefficients=BETA, meanX=mean(x, na.rm=TRUE), IBeta=IBETA, n.cut=n.cut)
}
}
predict.internal.MLRC <- function(object, y, lean=FALSE, verbose=FALSE, ...)
{
coef <- object$coefficients
if (!lean) {
if (nrow(object$coefficients) != ncol(y))
stop("Number of columns different in y, beta in predict.internal.MLRC")
}
xHat <- .Call("MLRC_predict", as.matrix(y), as.matrix(object$coefficients), as.double(object$meanX), as.integer(verbose), PACKAGE="rioja")
xHat <- as.matrix(xHat, ncol=1)
colnames(xHat) <- "MLRC"
rownames(xHat) <- rownames(y)
xHat
}
crossval.MLRC <- function(object, cv.method="loo", verbose=TRUE, ngroups=10, nboot=100, h.cutoff=0, h.dist=NULL, ...) {
.crossval(object=object, cv.method=cv.method, verbose=verbose, ngroups=ngroups, nboot=nboot, h.cutoff=h.cutoff, h.dist=h.dist, ...)
}
predict.MLRC <- function(object, newdata=NULL, sse=FALSE, nboot=100, match.data=TRUE, verbose=TRUE, ...) {
if (!is.null(newdata))
if (any(newdata < 0) | any(newdata > 1))
stop("newdata must be proportions between 0 and 1")
.predict(object=object, newdata=newdata, sse=sse, nboot=nboot, match.data=match.data, verbose=verbose, ...)
}
performance.MLRC <- function(object, ...) {
.performance(object, ...)
}
print.MLRC <- function(x, ...)
{
cat("\n")
cat("Method : Maximum Likelihood using Response Curves \n")
cat("Call : ")
cat(paste(deparse(x$call.print), "\n\n"))
cat(paste("No. samples :", length(x$x), "\n"))
cat(paste("No. species :", nrow(x$coefficients), "\n"))
.print.crossval(x)
cat("\nPerformance:\n")
.print.performance(x)
cat("\n")
}
summary.MLRC <- function(object, full=FALSE, ...)
{
print(object, ...)
if (object$cv.summary$cv.method == "none")
fitted <- as.data.frame(object$fitted.values)
else
fitted <- as.data.frame(object$fitted.values, object$predicted)
cat("\nFitted values\n")
if (full) {
print(fitted)
cat("\nSpecies coefficients\n")
print(data.frame(object$coefficients))
} else {
print(dot(fitted))
cat("\nSpecies coefficients\n")
print(dot(data.frame(object$coefficients)))
}
}
plot.MLRC <- function(x, resid=FALSE, xval=FALSE, xlab="", ylab="", ylim=NULL, xlim=NULL, add.ref=TRUE, add.smooth=FALSE, ...) {
if (xval & x$cv.summary$cv.method=="none")
stop("MLRC model does not have cross validation estimates")
xx <- x$x
if (resid) {
if (xval) {
yy <- x$predicted[, 1]
} else {
yy <- residuals(x)[, 1]
}
} else {
if (xval) {
yy <- x$predicted[, 1]
} else {
yy <- x$fitted.values[, 1]
}
}
if (missing(ylim)) {
if (resid) {
ylim <- range(yy)
} else {
ylim <- range(yy, x$x)
}
}
if (missing(xlim))
xlim <- range(xx, x$x)
plot(xx, yy, ylim=ylim, xlim=xlim, xlab=xlab, ylab=ylab, las=1, ...)
if (add.ref) {
if (resid)
abline(h=0, col="grey")
else
abline(0,1, col="grey")
}
if (add.smooth) {
lines(lowess(xx, yy), col="red")
}
}
fitted.MLRC <- function(object, ...) {
object$fitted.values
}
residuals.MLRC <- function(object, cv=FALSE, ...) {
if (cv == FALSE)
return (object$x - object$fitted.values)
else {
if (object$cv.summary$cv.method == "none")
stop("Object does not contain cross validation results")
return (object$residuals.cv)
}
}
coef.MLRC <- function(object, ...) {
object$coefficients
}
#predict.internal.MLRC <- function(object, y, lean=FALSE, ...)
#{
# y <- as.matrix(y)
# nnn <- nrow(y)
# xresp <- object$xSearch
# yresp <- object$resp
# nn <- length(xresp)
# p <- log(yresp)
# ppp <- log(1-yresp)
# LL.res <- as.matrix(p) %*% t(y) + as.matrix(ppp) %*% t(1.0-y)
# LL.res[is.na(LL.res)] <- -1.0E10
# xHat <- xresp[apply(LL.res, 2, order)[nn, ]]
# xHat <- as.matrix(xHat, ncol=1)
# colnames(xHat) <- "MLRC"
# rownames(xHat) <- rownames(y)
# xHat
#}
#MLRC.fit <- function(y, x, xSearch, lean=FALSE)
#{
# glr <- function(x, e, xSearch) {
# gfit <- glm(x ~ e + I(e^2), family = quasibinomial(link=logit))
# gfit$coefficients
# predict.glm(gfit, data.frame(e=xSearch), type="response")
# }
# resp <- apply(y, 2, glr, e=x, xSearch)
# result <- list(resp=resp, xSearch=xSearch)
#}
| /R/MLRC.r | no_license | nsj3/rioja | R | false | false | 6,919 | r | MLRC <- function(y, x, check.data=TRUE, lean=FALSE, n.cut=5, verbose=TRUE, ...)
{
if (check.data) {
if (any(apply(y, 1, sum) < 1.0E-8))
stop(paste("Species data have zero abundances for the following rows:", paste(which(apply(y, 1, sum) < 1.0E-8), collapse=",")))
if (any(apply(y, 2, sum) < 1.0E-8))
stop(paste("Species data have zero abundances for the following columns:", paste(which(apply(y, 2, sum) < 1.0E-8), collapse=",")))
if(n.cut < 5 & any(apply(y>0, 2, sum) < 5))
warning("Trying to fit responses to some taxa with less than 5 occurrences - results may be unreliable")
}
if (any(y>1) | any (y<0))
stop("Species data must be proportions between 0 and 1")
fit <- MLRC.fit(y=y, x=x, lean=lean, n.cut=n.cut, verbose=verbose, ...)
xHat <- predict.internal.MLRC(object=fit, y=y, lean=lean, ...)
call.print <- match.call()
call.fit <- as.call(list(quote(MLRC.fit), y=quote(y), x=quote(x), lean=FALSE))
result <- c(fit, list(fitted.values=xHat, call.fit=call.fit, call.print=call.print, x=x))
result$cv.summary <- list(cv.method="none")
if (!lean)
result$y <- y
class(result) <- "MLRC"
result
}
MLRC.fit <- function(y, x, n.cut=2, use.glm = FALSE, max.iter=50, lean=FALSE, verbose=FALSE, ...)
{
glr <- function(x, e) {
gfit <- glm.fit(e, x, family = quasibinomial(link=logit), ...)
coef <- gfit$coefficients
if (coef[3] > 0) {
gfit <- glm.fit(e[, 1:2], x, family = quasibinomial(link=logit), ...)
coef <- c(gfit$coefficients, 0)
}
if (gfit$converged)
return(coef)
else
return(c(NA, NA, NA))
}
skip <- colSums(y > 0) < n.cut
if (use.glm) {
# glr <- function(x, e) {
# gfit <- glm(x ~ e + I(e^2), family = quasibinomial(link=logit), ...)
# if (gfit$converged)
# return(gfit$coefficients)
# else
# return(c(NA, NA, NA))
# }
lp <- cbind(rep(1, nrow(y)), x, x^2)
beta <- apply(y[, !skip], 2, glr, e=lp)
BETA <- matrix(NA, nrow = 3, ncol = ncol(y))
BETA[, !skip] <- beta
beta <- t(BETA)
rownames(beta) <- colnames(y)
colnames(beta) <- c("b0", "b1", "b2")
return (list(coefficients=beta, meanX=mean(x, na.rm=TRUE)))
} else {
res <- .Call("MLRC_regress", as.matrix(y[, !skip]), as.matrix(x), as.integer(max.iter), as.integer(verbose), PACKAGE="rioja")
beta <- matrix(res$Beta, ncol=3)
BETA <- matrix(NA, ncol = 3, nrow = ncol(y))
BETA[!skip, ] <- beta
IBETA <- vector("integer", length=ncol(y))
IBETA[] <- NA
IBETA[!skip] <- res$IBeta
rownames(BETA) <- colnames(y)
colnames(BETA) <- c("b0", "b1", "b2")
list(coefficients=BETA, meanX=mean(x, na.rm=TRUE), IBeta=IBETA, n.cut=n.cut)
}
}
predict.internal.MLRC <- function(object, y, lean=FALSE, verbose=FALSE, ...)
{
coef <- object$coefficients
if (!lean) {
if (nrow(object$coefficients) != ncol(y))
stop("Number of columns different in y, beta in predict.internal.MLRC")
}
xHat <- .Call("MLRC_predict", as.matrix(y), as.matrix(object$coefficients), as.double(object$meanX), as.integer(verbose), PACKAGE="rioja")
xHat <- as.matrix(xHat, ncol=1)
colnames(xHat) <- "MLRC"
rownames(xHat) <- rownames(y)
xHat
}
crossval.MLRC <- function(object, cv.method="loo", verbose=TRUE, ngroups=10, nboot=100, h.cutoff=0, h.dist=NULL, ...) {
.crossval(object=object, cv.method=cv.method, verbose=verbose, ngroups=ngroups, nboot=nboot, h.cutoff=h.cutoff, h.dist=h.dist, ...)
}
predict.MLRC <- function(object, newdata=NULL, sse=FALSE, nboot=100, match.data=TRUE, verbose=TRUE, ...) {
if (!is.null(newdata))
if (any(newdata < 0) | any(newdata > 1))
stop("newdata must be proportions between 0 and 1")
.predict(object=object, newdata=newdata, sse=sse, nboot=nboot, match.data=match.data, verbose=verbose, ...)
}
performance.MLRC <- function(object, ...) {
.performance(object, ...)
}
print.MLRC <- function(x, ...)
{
cat("\n")
cat("Method : Maximum Likelihood using Response Curves \n")
cat("Call : ")
cat(paste(deparse(x$call.print), "\n\n"))
cat(paste("No. samples :", length(x$x), "\n"))
cat(paste("No. species :", nrow(x$coefficients), "\n"))
.print.crossval(x)
cat("\nPerformance:\n")
.print.performance(x)
cat("\n")
}
summary.MLRC <- function(object, full=FALSE, ...)
{
print(object, ...)
if (object$cv.summary$cv.method == "none")
fitted <- as.data.frame(object$fitted.values)
else
fitted <- as.data.frame(object$fitted.values, object$predicted)
cat("\nFitted values\n")
if (full) {
print(fitted)
cat("\nSpecies coefficients\n")
print(data.frame(object$coefficients))
} else {
print(dot(fitted))
cat("\nSpecies coefficients\n")
print(dot(data.frame(object$coefficients)))
}
}
plot.MLRC <- function(x, resid=FALSE, xval=FALSE, xlab="", ylab="", ylim=NULL, xlim=NULL, add.ref=TRUE, add.smooth=FALSE, ...) {
if (xval & x$cv.summary$cv.method=="none")
stop("MLRC model does not have cross validation estimates")
xx <- x$x
if (resid) {
if (xval) {
yy <- x$predicted[, 1]
} else {
yy <- residuals(x)[, 1]
}
} else {
if (xval) {
yy <- x$predicted[, 1]
} else {
yy <- x$fitted.values[, 1]
}
}
if (missing(ylim)) {
if (resid) {
ylim <- range(yy)
} else {
ylim <- range(yy, x$x)
}
}
if (missing(xlim))
xlim <- range(xx, x$x)
plot(xx, yy, ylim=ylim, xlim=xlim, xlab=xlab, ylab=ylab, las=1, ...)
if (add.ref) {
if (resid)
abline(h=0, col="grey")
else
abline(0,1, col="grey")
}
if (add.smooth) {
lines(lowess(xx, yy), col="red")
}
}
fitted.MLRC <- function(object, ...) {
object$fitted.values
}
residuals.MLRC <- function(object, cv=FALSE, ...) {
if (cv == FALSE)
return (object$x - object$fitted.values)
else {
if (object$cv.summary$cv.method == "none")
stop("Object does not contain cross validation results")
return (object$residuals.cv)
}
}
coef.MLRC <- function(object, ...) {
object$coefficients
}
#predict.internal.MLRC <- function(object, y, lean=FALSE, ...)
#{
# y <- as.matrix(y)
# nnn <- nrow(y)
# xresp <- object$xSearch
# yresp <- object$resp
# nn <- length(xresp)
# p <- log(yresp)
# ppp <- log(1-yresp)
# LL.res <- as.matrix(p) %*% t(y) + as.matrix(ppp) %*% t(1.0-y)
# LL.res[is.na(LL.res)] <- -1.0E10
# xHat <- xresp[apply(LL.res, 2, order)[nn, ]]
# xHat <- as.matrix(xHat, ncol=1)
# colnames(xHat) <- "MLRC"
# rownames(xHat) <- rownames(y)
# xHat
#}
#MLRC.fit <- function(y, x, xSearch, lean=FALSE)
#{
# glr <- function(x, e, xSearch) {
# gfit <- glm(x ~ e + I(e^2), family = quasibinomial(link=logit))
# gfit$coefficients
# predict.glm(gfit, data.frame(e=xSearch), type="response")
# }
# resp <- apply(y, 2, glr, e=x, xSearch)
# result <- list(resp=resp, xSearch=xSearch)
#}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/subset.R
\name{subset.Mcomp}
\alias{subset.Mcomp}
\title{Subset of time series from the M Competitions
\code{subset.Mcomp} returns a subset of the time series data from the M
Competitions. Subsets can be for specific periods, or specific types of data
or both.}
\usage{
\method{subset}{Mcomp}(x, cond1, cond2, ...)
}
\arguments{
\item{x}{M-competition data or a subset of M-competition data}
\item{cond1}{Type or period of the data. Type is a character variable and
period could be character or numeric.}
\item{cond2}{Optional second condition specifying type or period of the
data, depending on \code{cond1}. If \code{cond1} denotes type then
\code{cond2} would denote period, but if \code{cond1} denotes period then
\code{cond2} would denote type.}
\item{...}{Other arguments.}
}
\value{
An object of class \code{Mcomp} consisting of the selected series.
}
\description{
Possible values for \code{cond1} and \code{cond2} denoting period are 1, 4,
12, 24, 52, 365, "yearly", "quarterly", "monthly", "hourly", "weekly", "daily" and "other".
}
\details{
If \code{cond1} or \code{cond2} equals 111, then the 111 series used in the
extended comparisons in the 1982 M-competition are selected.
Possible values for \code{cond1} and \code{cond2} denoting type are "macro",
"micro", "industry", "finance", "demographic", "allother", "macro1",
"macro2", "micro1", "micro2", "micro3". These correspond to the descriptions
used in the competitions. See the references for details.
Partial matching used for both conditions.
}
\examples{
library(seer)
data(M4)
M4.quarterly <- subset(M4,4)
M4.yearly.industry <- subset(M4,1,"industry")
}
\references{
Rob Hyndman (2018). Mcomp: Data from the M-Competitions. R package version 2.7. https://CRAN.R-project.org/package=Mcomp
}
\author{
Thiyanga Talagala (Thiyanga has done small tweaks to adopt the code to M4data, original authors
of the code are Muhammad Akram and Rob Hyndman)
}
\keyword{data}
| /man/subset.Mcomp.Rd | no_license | mohcinemadkour/seer | R | false | true | 2,017 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/subset.R
\name{subset.Mcomp}
\alias{subset.Mcomp}
\title{Subset of time series from the M Competitions
\code{subset.Mcomp} returns a subset of the time series data from the M
Competitions. Subsets can be for specific periods, or specific types of data
or both.}
\usage{
\method{subset}{Mcomp}(x, cond1, cond2, ...)
}
\arguments{
\item{x}{M-competition data or a subset of M-competition data}
\item{cond1}{Type or period of the data. Type is a character variable and
period could be character or numeric.}
\item{cond2}{Optional second condition specifying type or period of the
data, depending on \code{cond1}. If \code{cond1} denotes type then
\code{cond2} would denote period, but if \code{cond1} denotes period then
\code{cond2} would denote type.}
\item{...}{Other arguments.}
}
\value{
An object of class \code{Mcomp} consisting of the selected series.
}
\description{
Possible values for \code{cond1} and \code{cond2} denoting period are 1, 4,
12, 24, 52, 365, "yearly", "quarterly", "monthly", "hourly", "weekly", "daily" and "other".
}
\details{
If \code{cond1} or \code{cond2} equals 111, then the 111 series used in the
extended comparisons in the 1982 M-competition are selected.
Possible values for \code{cond1} and \code{cond2} denoting type are "macro",
"micro", "industry", "finance", "demographic", "allother", "macro1",
"macro2", "micro1", "micro2", "micro3". These correspond to the descriptions
used in the competitions. See the references for details.
Partial matching used for both conditions.
}
\examples{
library(seer)
data(M4)
M4.quarterly <- subset(M4,4)
M4.yearly.industry <- subset(M4,1,"industry")
}
\references{
Rob Hyndman (2018). Mcomp: Data from the M-Competitions. R package version 2.7. https://CRAN.R-project.org/package=Mcomp
}
\author{
Thiyanga Talagala (Thiyanga has done small tweaks to adopt the code to M4data, original authors
of the code are Muhammad Akram and Rob Hyndman)
}
\keyword{data}
|
####Team BS#############
####17-01-2017##########
####Geoscripting########
####Lesson_7 Exercise###
rm(list=ls())
library(raster)
source("R_functions/Download_Brick.R")
#download, unzip and brick the InputData#
Neth <- Download_Brick("https://raw.githubusercontent.com/GeoScripting-WUR/VectorRaster/gh-pages/data/MODIS.zip")
#Convert to 'normal' NDVI values
Neth_NDVI = 0.0001* Neth
nlMunicipality <- getData('GADM',country='NLD', level=2)
#Get projection for both maps the same
nlMunicipality_proj <- spTransform(nlMunicipality, CRS(proj4string(Neth_NDVI)))
#Only select the area of the Netherlands
NDVI_mask <- mask(Neth_NDVI, mask = nlMunicipality_proj)
###### HERE we find the maximum NDVI for every month ###
###### January###
NDVI_Jan <- subset(NDVI_mask, 1)
NDVI_Jan_Mun <- extract(NDVI_Jan, nlMunicipality_proj, fun=mean, na.rm=TRUE, sp=T)
max_NDVI_JAN <- subset(NDVI_Jan_Mun$NAME_2, NDVI_Jan_Mun$January == (max(NDVI_Jan_Mun$January, na.rm = T)))
max_NDVI_JAN
#plot
colfunc <- colorRampPalette(c("red", "green"))
spplot(NDVI_Jan_Mun, zcol = "January", col.regions= colfunc(30), main="NDVI in January")
###### Augustus###
NDVI_Aug <- subset(NDVI_mask, 8)
NDVI_Aug_Mun <- extract(NDVI_Aug, nlMunicipality_proj, fun=mean, na.rm=TRUE, sp=T)
max_NDVI_Aug <- subset(NDVI_Aug_Mun$NAME_2, NDVI_Aug_Mun$August == (max(NDVI_Aug_Mun$August, na.rm = T)))
max_NDVI_Aug
#plot
spplot(NDVI_Aug_Mun, zcol = "August", col.regions= colfunc(30),main="NDVI in August")
#####whole year########
NDVI_mask$Average <- as.numeric(rowMeans(NDVI_mask[,], na.rm=T))
NDVI_year_Mun <- extract(NDVI_mask$Average, nlMunicipality_proj, sp=T, fun=mean, na.rm=TRUE)
max_NDVI_Year <- subset(NDVI_year_Mun$NAME_2, NDVI_year_Mun$Average == (max(NDVI_year_Mun$Average, na.rm = T)))
max_NDVI_Year
#plot
spplot(NDVI_year_Mun, zcol = "Average", col.regions= colfunc(30), main="NDVI for whole year")
#conclusion
print(paste("For January the greenest municipality:",max_NDVI_JAN,"For August:",max_NDVI_Aug,"and for the whole year:",max_NDVI_Year))
####NICE plot####
#Make nice plot
plot_mun_jan <- subset(nlMunicipality_proj, nlMunicipality_proj$NAME_2 == max_NDVI_JAN)
plot_mun_aug <- subset(nlMunicipality_proj, nlMunicipality_proj$NAME_2 == max_NDVI_Aug)
plot_mun_year <- subset(nlMunicipality_proj, nlMunicipality_proj$NAME_2 == max_NDVI_Year)
plot(NDVI_Jan, main="NDVI in the Netherlands", xlab= "m", ylab= "m")
lines(plot_mun_jan, col= "Red")
text(plot_mun_jan@bbox[1], plot_mun_jan@bbox[2], labels = paste(max_NDVI_JAN), pos=3, cex= 0.7, col="black")
lines(plot_mun_aug, col= "blue")
text(plot_mun_aug@bbox[1], plot_mun_aug@bbox[2], labels = paste(max_NDVI_Aug), pos=3, cex= 0.7, col="black")
lines(plot_mun_year, col= "black")
text(plot_mun_year@bbox[1], plot_mun_year@bbox[2], labels = paste(max_NDVI_Year), pos=3, cex= 0.7, col="black")
legend("bottomright", c("Max August","Max January", "Max year"),
lty=c(1,1,1), # gives the legend appropriate symbols (lines)
lwd=c(1,1,1),col=c("blue","red", "black")) # gives the legend lines the correct color and width
###PROVINCE#####
#Select at another level to get the boundaries of the provinces
nlProvince <- getData('GADM',country='NLD', level=1)
#Get projection for both maps the same
nlProvince_proj <- spTransform(nlProvince, CRS(proj4string(Neth_NDVI)))
###### FIND the maximum NDVI for the municipality for January###
NDVI_Jan_Prov <- extract(NDVI_Jan, nlProvince_proj, fun=mean, na.rm=TRUE, sp=T)
max_NDVI_Jan_Prov <- subset(NDVI_Jan_Prov$NAME_1, NDVI_Jan_Prov$January == (max(NDVI_Jan_Prov$January, na.rm = T)))
###### FIND the maximum NDVI for the municipality for Augustus###
NDVI_Aug_Prov <- extract(NDVI_Aug, nlProvince_proj, fun=mean, na.rm=TRUE, sp=T)
max_NDVI_Aug_Prov <- subset(NDVI_Aug_Prov$NAME_1, NDVI_Aug_Prov$August == (max(NDVI_Aug_Prov$August, na.rm = T)))
#####Calculate NVDI for a whole year########
NDVI_year_Prov <- extract(NDVI_mask$Average, nlProvince_proj, sp=T, fun=mean, na.rm=TRUE)
max_NDVI_Year_Prov <- subset(NDVI_year_Prov$NAME_1, NDVI_year_Prov$Average == (max(NDVI_year_Prov$Average, na.rm = T)))
print(paste("To conclude, in January",max_NDVI_Jan_Prov, "is the greenest in August", max_NDVI_Aug_Prov, "for the whole year also", max_NDVI_Year_Prov))
#source("R_functions/Max_Muni_month.R")
########### !!!! EXTRA !!!! #######
####### WE TRIED to build a function that would do the calculation for a month of choice. We didn get this running in time####
#####the script is given below, maybe someone can give feedback or tips how this could work?####
#Max_Muni_month = function(x, y, z)
#{
# NDVI_month <- subset(y, x)
# NDVI_month_muni <- extract(NDVI_month, z, fun=mean, na.rm=TRUE, sp=TRUE)
#
# mymonths <- c("January","February","March",
#
# "April","May","June",
#
# "July","August","September",
#
# "October","November","December")
#
# month <- mymonths[x]
##give the name in the output#
#
#name_max_NDVI_muni <- subset(NDVI_month_muni$NAME_2, eval(parse(text=paste0("NDVI_month_muni$",month))) == max(eval(parse(text=paste0("NDVI_month_muni$",month))), na.rm = TRUE))
#plot
#colfunc <- colorRampPalette(c("red", "green"))
#spplot(NDVI_month_muni, zcol = "January", col.regions= colfunc(30))
#return(name_max_NDVI_muni)
# }
#NDVI_max_Jan <- Max_Muni_month(1, NDVI_mask, nlMunicipality_proj)
#NDVI_max_Jan
#NDVI_max_Aug <- Max_Muni_month(8, NDVI_mask, nlMunicipality_proj)
#NDVI_max_Aug
| /Lesson_7/main.R | no_license | stijnbeki/TeamBS_GeoScripting | R | false | false | 5,511 | r | ####Team BS#############
####17-01-2017##########
####Geoscripting########
####Lesson_7 Exercise###
rm(list=ls())
library(raster)
source("R_functions/Download_Brick.R")
#download, unzip and brick the InputData#
Neth <- Download_Brick("https://raw.githubusercontent.com/GeoScripting-WUR/VectorRaster/gh-pages/data/MODIS.zip")
#Convert to 'normal' NDVI values
Neth_NDVI = 0.0001* Neth
nlMunicipality <- getData('GADM',country='NLD', level=2)
#Get projection for both maps the same
nlMunicipality_proj <- spTransform(nlMunicipality, CRS(proj4string(Neth_NDVI)))
#Only select the area of the Netherlands
NDVI_mask <- mask(Neth_NDVI, mask = nlMunicipality_proj)
###### HERE we find the maximum NDVI for every month ###
###### January###
NDVI_Jan <- subset(NDVI_mask, 1)
NDVI_Jan_Mun <- extract(NDVI_Jan, nlMunicipality_proj, fun=mean, na.rm=TRUE, sp=T)
max_NDVI_JAN <- subset(NDVI_Jan_Mun$NAME_2, NDVI_Jan_Mun$January == (max(NDVI_Jan_Mun$January, na.rm = T)))
max_NDVI_JAN
#plot
colfunc <- colorRampPalette(c("red", "green"))
spplot(NDVI_Jan_Mun, zcol = "January", col.regions= colfunc(30), main="NDVI in January")
###### Augustus###
NDVI_Aug <- subset(NDVI_mask, 8)
NDVI_Aug_Mun <- extract(NDVI_Aug, nlMunicipality_proj, fun=mean, na.rm=TRUE, sp=T)
max_NDVI_Aug <- subset(NDVI_Aug_Mun$NAME_2, NDVI_Aug_Mun$August == (max(NDVI_Aug_Mun$August, na.rm = T)))
max_NDVI_Aug
#plot
spplot(NDVI_Aug_Mun, zcol = "August", col.regions= colfunc(30),main="NDVI in August")
#####whole year########
NDVI_mask$Average <- as.numeric(rowMeans(NDVI_mask[,], na.rm=T))
NDVI_year_Mun <- extract(NDVI_mask$Average, nlMunicipality_proj, sp=T, fun=mean, na.rm=TRUE)
max_NDVI_Year <- subset(NDVI_year_Mun$NAME_2, NDVI_year_Mun$Average == (max(NDVI_year_Mun$Average, na.rm = T)))
max_NDVI_Year
#plot
spplot(NDVI_year_Mun, zcol = "Average", col.regions= colfunc(30), main="NDVI for whole year")
#conclusion
print(paste("For January the greenest municipality:",max_NDVI_JAN,"For August:",max_NDVI_Aug,"and for the whole year:",max_NDVI_Year))
####NICE plot####
#Make nice plot
plot_mun_jan <- subset(nlMunicipality_proj, nlMunicipality_proj$NAME_2 == max_NDVI_JAN)
plot_mun_aug <- subset(nlMunicipality_proj, nlMunicipality_proj$NAME_2 == max_NDVI_Aug)
plot_mun_year <- subset(nlMunicipality_proj, nlMunicipality_proj$NAME_2 == max_NDVI_Year)
plot(NDVI_Jan, main="NDVI in the Netherlands", xlab= "m", ylab= "m")
lines(plot_mun_jan, col= "Red")
text(plot_mun_jan@bbox[1], plot_mun_jan@bbox[2], labels = paste(max_NDVI_JAN), pos=3, cex= 0.7, col="black")
lines(plot_mun_aug, col= "blue")
text(plot_mun_aug@bbox[1], plot_mun_aug@bbox[2], labels = paste(max_NDVI_Aug), pos=3, cex= 0.7, col="black")
lines(plot_mun_year, col= "black")
text(plot_mun_year@bbox[1], plot_mun_year@bbox[2], labels = paste(max_NDVI_Year), pos=3, cex= 0.7, col="black")
legend("bottomright", c("Max August","Max January", "Max year"),
lty=c(1,1,1), # gives the legend appropriate symbols (lines)
lwd=c(1,1,1),col=c("blue","red", "black")) # gives the legend lines the correct color and width
###PROVINCE#####
#Select at another level to get the boundaries of the provinces
nlProvince <- getData('GADM',country='NLD', level=1)
#Get projection for both maps the same
nlProvince_proj <- spTransform(nlProvince, CRS(proj4string(Neth_NDVI)))
###### FIND the maximum NDVI for the municipality for January###
NDVI_Jan_Prov <- extract(NDVI_Jan, nlProvince_proj, fun=mean, na.rm=TRUE, sp=T)
max_NDVI_Jan_Prov <- subset(NDVI_Jan_Prov$NAME_1, NDVI_Jan_Prov$January == (max(NDVI_Jan_Prov$January, na.rm = T)))
###### FIND the maximum NDVI for the municipality for Augustus###
NDVI_Aug_Prov <- extract(NDVI_Aug, nlProvince_proj, fun=mean, na.rm=TRUE, sp=T)
max_NDVI_Aug_Prov <- subset(NDVI_Aug_Prov$NAME_1, NDVI_Aug_Prov$August == (max(NDVI_Aug_Prov$August, na.rm = T)))
#####Calculate NVDI for a whole year########
NDVI_year_Prov <- extract(NDVI_mask$Average, nlProvince_proj, sp=T, fun=mean, na.rm=TRUE)
max_NDVI_Year_Prov <- subset(NDVI_year_Prov$NAME_1, NDVI_year_Prov$Average == (max(NDVI_year_Prov$Average, na.rm = T)))
print(paste("To conclude, in January",max_NDVI_Jan_Prov, "is the greenest in August", max_NDVI_Aug_Prov, "for the whole year also", max_NDVI_Year_Prov))
#source("R_functions/Max_Muni_month.R")
########### !!!! EXTRA !!!! #######
####### WE TRIED to build a function that would do the calculation for a month of choice. We didn get this running in time####
#####the script is given below, maybe someone can give feedback or tips how this could work?####
#Max_Muni_month = function(x, y, z)
#{
# NDVI_month <- subset(y, x)
# NDVI_month_muni <- extract(NDVI_month, z, fun=mean, na.rm=TRUE, sp=TRUE)
#
# mymonths <- c("January","February","March",
#
# "April","May","June",
#
# "July","August","September",
#
# "October","November","December")
#
# month <- mymonths[x]
##give the name in the output#
#
#name_max_NDVI_muni <- subset(NDVI_month_muni$NAME_2, eval(parse(text=paste0("NDVI_month_muni$",month))) == max(eval(parse(text=paste0("NDVI_month_muni$",month))), na.rm = TRUE))
#plot
#colfunc <- colorRampPalette(c("red", "green"))
#spplot(NDVI_month_muni, zcol = "January", col.regions= colfunc(30))
#return(name_max_NDVI_muni)
# }
#NDVI_max_Jan <- Max_Muni_month(1, NDVI_mask, nlMunicipality_proj)
#NDVI_max_Jan
#NDVI_max_Aug <- Max_Muni_month(8, NDVI_mask, nlMunicipality_proj)
#NDVI_max_Aug
|
library(LifeTables)
### Name: hmd.DA.mx
### Title: Model Life Table Discriminant Analysis
### Aliases: hmd.DA.mx
### Keywords: cluster misc
### ** Examples
# some test data
data(MLTobs)
##48 Belgium 1860-64 (known class = 1)
##180 England 1925-29 (known class = 2)
##207 Estonia 2005-09 (known class = 7)
##266 France 1960-64 (known class = 3)
##410 Japan 2000-04 (known class = 5)
##607 Russia 1980-84 (known class = 6)
##798 USA 2000-04 (known class = 4)
country.nums <- c(48,180,207,266,410,607,798)
test.mx <- t(flt.mx[3:10,country.nums]) # mortality rates for ages 5-40
test.age <- seq(5,40,5)
# classify the test data matrix
examp.out <- hmd.DA.mx(data=test.mx, age=test.age, sex="female")
examp.out$classification
# classify the test data single schedule as matrix
examp.out2 <- hmd.DA.mx(data=t(as.matrix(test.mx[4,])), age=test.age, sex="female")
examp.out2$classification
| /data/genthat_extracted_code/LifeTables/examples/hmd.DA.mx.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 895 | r | library(LifeTables)
### Name: hmd.DA.mx
### Title: Model Life Table Discriminant Analysis
### Aliases: hmd.DA.mx
### Keywords: cluster misc
### ** Examples
# some test data
data(MLTobs)
##48 Belgium 1860-64 (known class = 1)
##180 England 1925-29 (known class = 2)
##207 Estonia 2005-09 (known class = 7)
##266 France 1960-64 (known class = 3)
##410 Japan 2000-04 (known class = 5)
##607 Russia 1980-84 (known class = 6)
##798 USA 2000-04 (known class = 4)
country.nums <- c(48,180,207,266,410,607,798)
test.mx <- t(flt.mx[3:10,country.nums]) # mortality rates for ages 5-40
test.age <- seq(5,40,5)
# classify the test data matrix
examp.out <- hmd.DA.mx(data=test.mx, age=test.age, sex="female")
examp.out$classification
# classify the test data single schedule as matrix
examp.out2 <- hmd.DA.mx(data=t(as.matrix(test.mx[4,])), age=test.age, sex="female")
examp.out2$classification
|
# 2019-10-29 funclibs for parsing genome annotations
#' Parse genome annotation
#'
#' parseGenomeAnnotation parses different types of genome annotations.
#'
#' Due to the complex GFF3/GTF/TxDB structure of different genome annotation files from different species,
#' this function may not be always applicable for any given file. You may need to check mannually.
#' @usage parseGenomeAnnotation(aGFF)
#' @param anAnno can be a list of anno.rna/anno.need, or .rda/.rdata/.gff3/.gtf file name, or TxDB object.
#' @return a parsed genome annotation object, which is a list of three elements (anno.rna, anno.need, anno.frame) and can be used for annotatePAC().
#' @examples
#' ## Way1: Based on an annotation file in gff3 format, You can dowonload annotation from Ensemble Plants
#' #Prepare the annotation
#' #wget -c ftp://ftp.ensemblgenomes.org/pub/plants/release-44/gff3/arabidopsis_thaliana/Arabidopsis_thaliana.TAIR10.44.gff3.gz
#' #gunzip Arabidopsis_thaliana.TAIR10.44.gff3.gz
#' gff.path <- "/path/Arabidopsis_thaliana.TAIR10.44.gff3"
#' anno <- parseGenomeAnnotation(anAnno=gff.path)
#'
#' ##way2: load from a .rda file (already processed file)
#' anno <- parseGenomeAnnotation("anno.rda")
#'
#' ##Way3: Based on a TxDb object generated from BioMart.
#' # Parse Arabidopsis Txdb
#' library(TxDb.Athaliana.BioMart.plantsmart28)
#' anno <- parseGenomeAnnotation(TxDb.Athaliana.BioMart.plantsmart28)
#' # Parse mm10 Txdb
#' BiocManager::install("TxDb.Mmusculus.UCSC.mm10.ensGene")
#' library(TxDb.Mmusculus.UCSC.mm10.ensGene)
#' anno <- parseGenomeAnnotation(TxDb.Mmusculus.UCSC.mm10.ensGene)
#' @name parseGenomeAnnotation
#' @seealso [annotatePAC()] to annotate a PACdataset.
#' @family genome annotation functions
#' @export
parseGenomeAnnotation <- function(anAnno) {
#library(rtracklayer)
#library(GenomicRanges)
#library(GenomicFeatures)
if (class(anAnno)=='list') {
if ( sum(names(anAnno) %in% c('anno.rna', 'anno.need', 'anno.frame'))!=3) stop("anAnno is a list, but no anno.rna/anno.need/anno.frame!")
return(anAnno)
}
if (is.character(anAnno)) {
if (grepl('\\.rda|\\.rdata', tolower(anAnno))) {
if (!file.exists(anAnno)) {
stop("anAnno is .rda/.rdata but file not exists!")
}
a = new.env()
load(anAnno, envir = a)
for (v in ls(a)) {
if (class(get(v, envir = a))=='list') {
if (!(AinB(c('anno.rna','anno.need','anno.frame'), names(get(v, envir = a))))) next
} else {
next
}
return(get(v, envir = a))
}
stop('No list(anno.rna, anno.need, anno.frame) in .rda file anAnno')
} else if (grepl('\\.gff3|\\.gtf', tolower(anAnno))) {
rt=parseGff(anAnno)
}
invisible(gc())
return(rt)
}#~chr
if (class(anAnno)=='TxDb') {
rt=parseTxdb(anAnno)
invisible(gc())
return(rt)
}
}
#' Parse TxDb genome annotation
#'
#' parseTxdb parses genome annotation object of TxDb
#'
#' @usage parseTxdb(aGFF)
#' @param an.txdb a TxDb object
#' @return a parsed genome annotation object, which is a list of three elements (anno.rna, anno.need, anno.frame) and can be used for annotatePAC().
#' @examples
#' library(TxDb.Athaliana.BioMart.plantsmart28)
#' txdbAnno <- parseTxdb(an.txdb=TxDb.Athaliana.BioMart.plantsmart28)
#' @name parseTxdb
#' @seealso [parseGff()] to parse a Gff file.
#' @family Genome annotation functions
#' @export
parseTxdb <- function (an.txdb) {
if(class(an.txdb)!='TxDb') stop("an.txdb not of class TxDb!")
genes <- genes(an.txdb,columns=c("tx_type","gene_id"))
genes <- as.data.frame(genes)
genes <- data.frame(seqnames=as.character(genes$seqnames) ,start=as.integer(genes$start),
end=as.integer(genes$end),width=as.integer(genes$width),
strand=as.character(genes$strand),type="gene",
ID =as.character(genes$gene_id),biotype=as.character(genes$tx_type),
gene_id =as.character(genes$gene_id),Parent=NA,transcript_id=NA)
#setdiff(colnames(genes),colnames(tari))
rnas <- transcripts(an.txdb,columns=c("tx_name","tx_type","gene_id"))
rnas<- as.data.frame(rnas)
#test <- strsplit(as.character(rnas$gene_id) ,"\\s+")
#temp3 <- paste("",lapply(test,"[[",1),sep="");
#head(temp3)
rnas <- data.frame(seqnames=as.character(rnas$seqnames) ,start=as.integer(rnas$start),
end=as.integer(rnas$end),width=as.integer(rnas$width),
strand=as.character(rnas$strand),type="RNA",
ID =as.character(rnas$tx_name),biotype=as.character(rnas$tx_type),
gene_id =as.character(rnas$gene_id),Parent=as.character(rnas$gene_id),
transcript_id=as.character(rnas$tx_name))
# exons <- exons(an.txdb,columns=c("exon_name","tx_name","tx_type","gene_id"))
# exons <- as.data.frame(exons)
# head(exons)
exons <- exonsBy(an.txdb,by=c("tx"),use.names=TRUE)
exons <- as.data.frame(exons)
exons <- data.frame(seqnames=as.character(exons$seqnames) ,start=as.integer(exons$start),
end=as.integer(exons$end),width=as.integer(exons$width),
strand=as.character(exons$strand),type="exon",
ID =as.character(exons$exon_name),biotype=NA,
gene_id =NA,Parent=as.character(exons$group_name),
transcript_id=as.character(exons$group_name))
index <- match(exons$Parent,rnas$transcript_id)
#which(is.na(index))
exons$gene_id <- rnas$Parent[index]
exons$biotype <- rnas$biotype[index]
#==================================
#CDS
cdss <- cdsBy(an.txdb,by=c("tx"),use.names=TRUE)
cdss <- as.data.frame(cdss)
cdss <- data.frame(seqnames=as.character(cdss$seqnames) ,start=as.integer(cdss$start),
end=as.integer(cdss$end),width=as.integer(cdss$width),
strand=as.character(cdss$strand),type="CDS",
ID =as.character(cdss$cds_name),biotype=NA,
gene_id =NA,Parent=as.character(cdss$group_name),
transcript_id=as.character(cdss$group_name))
index <- match(cdss$Parent,rnas$transcript_id)
#which(is.na(index))
cdss$gene_id <- rnas$Parent[index]
cdss$biotype <- rnas$biotype[index]
#head(cdss)
#cdss <- cds(an.txdb,columns=c("cds_name","tx_name","tx_type","gene_id"))
#==================================
#introns
introns <- intronsByTranscript(an.txdb,use.names=TRUE)
introns <- as.data.frame(introns)
introns <- data.frame(seqnames=as.character(introns$seqnames) ,start=as.integer(introns$start),
end=as.integer(introns$end),width=as.integer(introns$width),
strand=as.character(introns$strand),type="intron",
ID =NA,biotype=NA,
gene_id =NA,Parent=as.character(introns$group_name),
transcript_id=as.character(introns$group_name))
index <- match(introns$Parent,rnas$transcript_id)
#which(is.na(index))
introns$gene_id <- rnas$Parent[index]
introns$biotype <- rnas$biotype[index]
#head(introns)
#===================================================
#five UTR
fiveUTRs <- fiveUTRsByTranscript(an.txdb,use.names=TRUE)
fiveUTRs <- as.data.frame(fiveUTRs)
fiveUTRs <- data.frame(seqnames=as.character(fiveUTRs$seqnames) ,start=as.integer(fiveUTRs$start),
end=as.integer(fiveUTRs$end),width=as.integer(fiveUTRs$width),
strand=as.character(fiveUTRs$strand),type="five_prime_UTR",
ID =NA,biotype=NA,
gene_id =NA,Parent=as.character(fiveUTRs$group_name),
transcript_id=as.character(fiveUTRs$group_name))
index <- match(fiveUTRs$Parent,rnas$transcript_id)
#which(is.na(index))
fiveUTRs$gene_id <- rnas$Parent[index]
fiveUTRs$biotype <- rnas$biotype[index]
#head(fiveUTRs)
#===========================================
#three UTR
threeUTRs <- threeUTRsByTranscript(an.txdb,use.names=TRUE)
threeUTRs <- as.data.frame(threeUTRs)
threeUTRs <- data.frame(seqnames=as.character(threeUTRs$seqnames) ,start=as.integer(threeUTRs$start),
end=as.integer(threeUTRs$end),width=as.integer(threeUTRs$width),
strand=as.character(threeUTRs$strand),type="three_prime_UTR",
ID =NA,biotype=NA,
gene_id =NA,Parent=as.character(threeUTRs$group_name),
transcript_id=as.character(threeUTRs$group_name))
index <- match(threeUTRs$Parent,rnas$transcript_id)
#which(is.na(index))
threeUTRs$gene_id <- rnas$Parent[index]
threeUTRs$biotype <- rnas$biotype[index]
anno.frame <- rbind(genes,rnas,exons,cdss,introns,fiveUTRs,threeUTRs)
anno.frame$type <- factor(anno.frame$type,levels=c("gene","RNA","five_prime_UTR","exon","CDS","intron",
"three_prime_UTR"))
#anno.frame <- anno.frame[order(anno.frame$transcript_id,anno.frame$gene_id,
# anno.frame$start,anno.frame$strand,anno.frame$type),]
anno.need <- rbind(exons,cdss,introns,fiveUTRs,threeUTRs)
anno.rna <- rnas
return(list(anno.need=anno.need, anno.rna=anno.rna, anno.frame=anno.frame))
}
#' Parse gff3/gtf genome annotation
#'
#' parseGff parses genome annotation file of gff3/gtf format
#'
#' Due to the complex GFF3/GFF/GTF structure of different genome annotation files from different species,
#' this function may not be always applicable for any given file. You may need to check mannually.
#' @usage parseGff(aGFF)
#' @param aGFF .gff3/.gff/.gtf file name
#' @return a parsed genome annotation object, which is a list of three elements (anno.rna, anno.need, anno.frame) and can be used for annotatePAC().
#' @examples
#' ## parse from a gff file, and save as .rda for further use.
#' gff=parseGff(aGFF='Bamboo.Hic.gff')
#' @name parseGff
#' @seealso [parseTxdb()] to parse a Txdb object.
#' @family genome annotation functions
#' @export
parseGff <- function(aGFF) {
if (!is.character(aGFF)) stop("aGFF not a character string!")
if (!grepl('\\.gff3|\\.gtf', tolower(aGFF))) stop('aGFF not .gff3/.gff/.gtf!')
if (grepl('\\.gff3|\\.gff', tolower(aGFF))) {
#------------------------------------------------------
#Loading annotation (gff3 format)
#-------------------------------------------------------
gff.path=aGFF
anno <- import.gff3(gff.path)
anno.frame <- as.data.frame(anno,stringsAsFactors =FALSE)
anno.frame$seqnames <- as.character(anno.frame$seqnames)
anno.frame$strand <- as.character(anno.frame$strand)
anno.frame$type <- as.character(anno.frame$type)
#print("###annotation file type information")
#print(table(anno.frame$type))
#delete chromosome information
anno.frame$Parent <- sub(pattern="\\S+\\:",replacement = "",anno.frame$Parent)
anno.frame$ID <- sub(pattern="\\S+\\:",replacement = "",anno.frame$ID)
if(length(which(anno.frame$type=="chromosome"))){
anno.frame <- anno.frame[-which(anno.frame$type=="chromosome"),]
}
#instead transcript to RNA
anno.frame$type[which(anno.frame$type == "transcript")] <-"RNA"
#getting RNA row
rna.id <- grep("RNA$",anno.frame$type,ignore.case = FALSE)
anno.rna <- anno.frame[rna.id,]
} else if (grepl('\\.gtf', tolower(aGFF))) {
gtf.path=aGFF
anno <- import(gtf.path,format="gtf")
anno.frame <- as.data.frame(anno,stringsAsFactors =FALSE)
anno.frame$seqnames <- as.character(anno.frame$seqnames)
anno.frame$strand <- as.character(anno.frame$strand)
anno.frame$type <- as.character(anno.frame$type)
anno.frame$Parent <- as.character(anno.frame$transcript_id)
anno.frame$type[which(anno.frame$type == "transcript")] <-"RNA"
#getting RNA row
trans.id <- grep("transcript",anno.frame$type,ignore.case = FALSE)
if(length(trans.id)){
anno.frame$type[which(anno.frame$type == "transcript")] <-"RNA"
rna.id <- grep("RNA$",anno.frame$type,ignore.case = FALSE)
}else{
rna.id <- grep("RNA$",anno.frame$type,ignore.case = FALSE)
}
if(length(rna.id)==0){
anno.frame <- add_rna(anno.frame)
rna.id <- grep("RNA$",anno.frame$type,ignore.case = FALSE)
}
if(length(which(anno.frame$type=="gene"))==0){
anno.gene <- anno.frame[rna.id,]
anno.gene$type<- "gene"
anno.gene$Parent <- ""
anno.frame <- rbind(anno.frame,anno.gene)
}
anno.frame$ID <- anno.frame$Parent
if(length(which(anno.frame$type=="chromosome"))){
anno.frame <- anno.frame[-which(anno.frame$type=="chromosome"),]
}
anno.rna <- anno.frame[rna.id,]
} #~gtf
#If the comment is incomplete, losing transcript_id
if(!length(which(colnames(anno.rna) == "transcript_id"))){
anno.rna$transcript_id<-anno.rna$ID
}
#table(anno.rna$type)
#ID=transcript:AT1G01010.1;Parent=gene:AT1G01010;biotype=protein_coding;transcript_id=AT1G01010.1
#1 araport11 five_prime_UTR 3631 3759 . + . Parent=transcript:AT1G01010.1
#confirm that the names of transcript is consistent with exon/cds/utr
if(length(setdiff(anno.rna$transcript_id,anno.frame$Parent))){
stop("Not consistent between transcript id in rna and exon/cds/utr")
}
#anno.frame$Parent[which(anno.frame$type=="gene")]<- ""
# anno.frame.raw <- anno.frame
if(is.na(match("three_prime_UTR",unique(anno.frame$type)))){
if(is.na(match("CDS",unique(anno.frame$type)))){
warning("This annotation without CDS, we can't identify UTR region")
}else{
print("Extracting UTR region")
anno.frame <- add_utr(anno.frame)
}
}
#=========================================================================
#anno.need store cds/exon/5utr/3utr information
anno.need <- anno.frame[which(anno.frame$Parent %in% anno.rna$transcript_id),]
need.rna.id <- grep("RNA$",anno.need$type,ignore.case = FALSE)
if(length(need.rna.id)){
anno.need<-anno.need[-need.rna.id,]
}
index <- match(anno.need$Parent,anno.rna$transcript_id)
if(length(which(is.na(index)))){
stop("error can't find exon/cds/5utr/3utr 's parent")
}
anno.need$gene_id <- anno.rna$Parent[index]
if(is.na(match("biotype",colnames(anno.rna)))){
anno.rna$biotype <- NA
}
anno.need$biotype <- anno.rna$biotype[index]
#====================================================================
#ann.intron stores intron information
exon.id <- grep("exon",anno.need$type,ignore.case = FALSE)
ann.exon <- anno.need[exon.id,]
if(length(which(is.na(ann.exon$Parent)))){
print("exist some exon can't find parent id ")
}
ann.exon <- ann.exon[order(ann.exon$Parent,ann.exon$start,ann.exon$strand),]
ann.exon.1 <- ann.exon[seq(1,nrow(ann.exon),2),]
ann.exon.2 <- ann.exon[seq(2,nrow(ann.exon),2),]
ann.exon.3 <- ann.exon[seq(3,nrow(ann.exon),2),]
keep.num1 <- min(nrow(ann.exon.1),nrow(ann.exon.2))
ann.exon.k1<-ann.exon.1[1:keep.num1,]
ann.exon.k2<-ann.exon.2[1:keep.num1,]
index <- which(ann.exon.k1$Parent == ann.exon.k2$Parent)
if(!identical(ann.exon.k1$Parent[index],ann.exon.k2$Parent[index])){
stop("something error with extart intron region")
}
ann.intron1 <- ann.exon.k1[index,]
ann.intron1$type <- "intron"
ann.intron1$start <- ann.exon.k1$end[index]+1
ann.intron1$end <- ann.exon.k2$start[index]-1
keep.num2 <- min(nrow(ann.exon.2),nrow(ann.exon.3))
ann.exon.kk2<-ann.exon.2[1:keep.num2,]
ann.exon.k3<-ann.exon.3[1:keep.num2,]
index <- which(ann.exon.kk2$Parent == ann.exon.k3$Parent)
if(!identical(ann.exon.kk2$Parent[index],ann.exon.k3$Parent[index])){
stop("something error with extart intron region")
}
ann.intron2 <- ann.exon.kk2[index,]
ann.intron2$type <- "intron"
ann.intron2$start <- ann.exon.kk2$end[index]+1
ann.intron2$end <- ann.exon.k3$start[index]-1
ann.intron <- rbind(ann.intron1,ann.intron2)
ann.intron <- ann.intron[order(ann.intron$Parent,ann.intron$start,ann.intron$strand),]
anno.need <- rbind(anno.need,ann.intron)
#table(anno.need$type)
rna.error <- grep("RNA$",anno.need$type,ignore.case = FALSE)
if(length(rna.error)){
anno.need <- anno.need[-rna.error,]
}
return(list(anno.need=anno.need, anno.rna=anno.rna, anno.frame=anno.frame))
}
#=========================================================
#------------------------------------------------------
#function:add_utr()
#Adding 3UTR and 5UTR region
#--------------------------------------------------------
#======================================================
add_utr <- function(anno.frame=NULL){
anno.cds <- anno.frame[which(anno.frame$type=="CDS"),]
anno.exon <- anno.frame[which(anno.frame$type=="exon"),]
rna.id <- grep("RNA$",anno.frame$type,ignore.case = FALSE)
anno.rna <- anno.frame[rna.id,]
if(!length(which(colnames(anno.rna) == "transcript_id"))){
anno.rna$transcript_id<-anno.rna$ID
}
anno.cds.frist <- anno.cds[order(anno.cds$Parent,anno.cds$start,anno.cds$strand,decreasing = FALSE),]
anno.cds.last <- anno.cds[order(anno.cds$Parent,anno.cds$start,anno.cds$strand,decreasing = TRUE),]
anno.cds.frist <- anno.cds.frist[!duplicated(anno.cds.frist$Parent),]
anno.cds.last <- anno.cds.last[!duplicated(anno.cds.last$Parent),]
index.frist <-match(anno.cds.frist$Parent,anno.rna$transcript_id)
index.last <-match(anno.cds.last$Parent,anno.rna$transcript_id)
if(length(which(is.na(c(index.frist,index.last))))){
stop("Can't find cds parent based on input annotation file ")
}
anno.cds.frist$utr.start <- anno.rna$start[index.frist]
anno.cds.frist$utr.end <- anno.cds.frist$start -1
anno.cds.frist <- anno.cds.frist[which( (anno.cds.frist$utr.end- anno.cds.frist$utr.start) >=0),]
anno.cds.last$utr.start <- anno.cds.last$end +1
anno.cds.last$utr.end <- anno.rna$end[index.last]
anno.cds.last <- anno.cds.last[which((anno.cds.last$utr.end- anno.cds.last$utr.start) >=0),]
gr.first <- GRanges(seqnames =as.character(anno.cds.frist$Parent) ,
ranges =IRanges(start=as.integer(anno.cds.frist$utr.start) ,
end=as.integer(anno.cds.frist$utr.end)),
strand =as.character(anno.cds.frist$strand))
gr.last <- GRanges(seqnames =as.character(anno.cds.last$Parent) ,
ranges =IRanges(start=as.integer(anno.cds.last$utr.start) ,
end=as.integer(anno.cds.last$utr.end)),
strand =as.character(anno.cds.last$strand))
gr.exon <- GRanges(seqnames =as.character(anno.exon$Parent) ,
ranges =IRanges(start=as.integer(anno.exon$start) ,
end=as.integer(anno.exon$end)),
strand =as.character(anno.exon$strand))
ov.first <- findOverlaps(gr.first,gr.exon)
ov.last <- findOverlaps(gr.last,gr.exon)
ov.first <- as.data.frame(ov.first)
ov.last <- as.data.frame(ov.last)
colnames(ov.first)<-c("cdsID","exonID")
colnames(ov.last) <- c("cdsID","exonID")
ov.first$utr.start <- as.integer(anno.cds.frist$utr.start[ov.first$cdsID])
ov.first$utr.end <- as.integer(anno.cds.frist$utr.end[ov.first$cdsID])
ov.first$exon.start <- as.integer(anno.exon$start[ov.first$exonID])
ov.first$exon.end <- as.integer(anno.exon$end[ov.first$exonID])
ov.first$utr.start.r <- ov.first$exon.start
ov.first$utr.end.r <- apply(ov.first[,c("utr.end","exon.end")],1,min)
five.utr <- anno.exon[ov.first$exonID,]
five.utr$start <- ov.first$utr.start.r
five.utr$end <- ov.first$utr.end.r
if(nrow(five.utr)){
five.utr$type <- "five_prime_UTR"
five.utr$type[which(five.utr$strand=="-")] <- "three_prime_UTR"
}
ov.last$utr.start <- as.integer(anno.cds.last$utr.start[ov.last$cdsID])
ov.last$utr.end <- as.integer(anno.cds.last$utr.end[ov.last$cdsID])
ov.last$exon.start <- as.integer(anno.exon$start[ov.last$exonID])
ov.last$exon.end <- as.integer(anno.exon$end[ov.last$exonID])
ov.last$utr.start.r <- apply(ov.last[,c("utr.start","exon.start")],1,max)
ov.last$utr.end.r <- ov.last$exon.end
three.utr <- anno.exon[ov.last$exonID,]
three.utr$start <- ov.last$utr.start.r
three.utr$end <- ov.last$utr.end.r
if(nrow(three.utr)){
three.utr$type <- "three_prime_UTR"
three.utr$type[which(three.utr$strand=="-")] <- "five_prime_UTR"
}
utr <- rbind(three.utr,five.utr)
utr <- utr[order(utr$Parent,utr$type,utr$start),]
utr$width <- as.integer(utr$end-utr$start+1)
#-------------------------------------
#check result
# really.utr <- anno.frame[which(anno.frame$type %in% c("three_prime_UTR","five_prime_UTR")),]
# really.utr <- really.utr[order(really.utr$Parent,really.utr$type,really.utr$start),]
# length(unique(really.utr$Parent))
# length(unique(utr$Parent))
# identical(utr$start,really.utr$start)
# identical(utr$end,really.utr$end)
# identical(utr$strand,really.utr$strand)
# write.table(really.utr,file="really_utr.txt",col.names = TRUE,row.names = FALSE,sep="\t",
# quote=FALSE)
# write.table(utr,file="build_utr.txt",col.names = TRUE,row.names = FALSE,sep="\t",
# quote=FALSE)
anno.frame <-rbind(anno.frame,utr)
return(anno.frame)
}
#=========================================================
#------------------------------------------------------
#function:add_rna()
#Adding RNA region
#--------------------------------------------------------
#======================================================
add_rna <- function(anno.frame=NULL){
anno.exon <- anno.frame[which(anno.frame$type=="exon"),]
anno.exon.order <- anno.exon[order(anno.exon$gene_id,anno.exon$transcript_id,
anno.exon$strand,anno.exon$start,decreasing = FALSE),]
anno.exon.rev <- anno.exon.order[nrow(anno.exon.order):1,]
anno.exon.order.unique <- anno.exon.order[!duplicated(anno.exon.order$transcript_id),]
anno.exon.rev.order <- anno.exon.rev[!duplicated(anno.exon.rev$transcript_id),]
anno.rna <- anno.exon.order.unique
index <- match(anno.rna$transcript_id,anno.exon.rev.order$transcript_id)
anno.rna$end <- anno.exon.rev.order$end[index]
anno.rna$Parent <- anno.rna$gene_id
anno.rna$type <- "mRNA"
anno.frame <-rbind(anno.frame,anno.rna)
return(anno.frame)
}
| /R/R_funclib_GFF.r | no_license | BMILAB/movAPA | R | false | false | 22,585 | r | # 2019-10-29 funclibs for parsing genome annotations
#' Parse genome annotation
#'
#' parseGenomeAnnotation parses different types of genome annotations.
#'
#' Due to the complex GFF3/GTF/TxDB structure of different genome annotation files from different species,
#' this function may not be always applicable for any given file. You may need to check mannually.
#' @usage parseGenomeAnnotation(aGFF)
#' @param anAnno can be a list of anno.rna/anno.need, or .rda/.rdata/.gff3/.gtf file name, or TxDB object.
#' @return a parsed genome annotation object, which is a list of three elements (anno.rna, anno.need, anno.frame) and can be used for annotatePAC().
#' @examples
#' ## Way1: Based on an annotation file in gff3 format, You can dowonload annotation from Ensemble Plants
#' #Prepare the annotation
#' #wget -c ftp://ftp.ensemblgenomes.org/pub/plants/release-44/gff3/arabidopsis_thaliana/Arabidopsis_thaliana.TAIR10.44.gff3.gz
#' #gunzip Arabidopsis_thaliana.TAIR10.44.gff3.gz
#' gff.path <- "/path/Arabidopsis_thaliana.TAIR10.44.gff3"
#' anno <- parseGenomeAnnotation(anAnno=gff.path)
#'
#' ##way2: load from a .rda file (already processed file)
#' anno <- parseGenomeAnnotation("anno.rda")
#'
#' ##Way3: Based on a TxDb object generated from BioMart.
#' # Parse Arabidopsis Txdb
#' library(TxDb.Athaliana.BioMart.plantsmart28)
#' anno <- parseGenomeAnnotation(TxDb.Athaliana.BioMart.plantsmart28)
#' # Parse mm10 Txdb
#' BiocManager::install("TxDb.Mmusculus.UCSC.mm10.ensGene")
#' library(TxDb.Mmusculus.UCSC.mm10.ensGene)
#' anno <- parseGenomeAnnotation(TxDb.Mmusculus.UCSC.mm10.ensGene)
#' @name parseGenomeAnnotation
#' @seealso [annotatePAC()] to annotate a PACdataset.
#' @family genome annotation functions
#' @export
parseGenomeAnnotation <- function(anAnno) {
#library(rtracklayer)
#library(GenomicRanges)
#library(GenomicFeatures)
if (class(anAnno)=='list') {
if ( sum(names(anAnno) %in% c('anno.rna', 'anno.need', 'anno.frame'))!=3) stop("anAnno is a list, but no anno.rna/anno.need/anno.frame!")
return(anAnno)
}
if (is.character(anAnno)) {
if (grepl('\\.rda|\\.rdata', tolower(anAnno))) {
if (!file.exists(anAnno)) {
stop("anAnno is .rda/.rdata but file not exists!")
}
a = new.env()
load(anAnno, envir = a)
for (v in ls(a)) {
if (class(get(v, envir = a))=='list') {
if (!(AinB(c('anno.rna','anno.need','anno.frame'), names(get(v, envir = a))))) next
} else {
next
}
return(get(v, envir = a))
}
stop('No list(anno.rna, anno.need, anno.frame) in .rda file anAnno')
} else if (grepl('\\.gff3|\\.gtf', tolower(anAnno))) {
rt=parseGff(anAnno)
}
invisible(gc())
return(rt)
}#~chr
if (class(anAnno)=='TxDb') {
rt=parseTxdb(anAnno)
invisible(gc())
return(rt)
}
}
#' Parse TxDb genome annotation
#'
#' parseTxdb parses genome annotation object of TxDb
#'
#' @usage parseTxdb(aGFF)
#' @param an.txdb a TxDb object
#' @return a parsed genome annotation object, which is a list of three elements (anno.rna, anno.need, anno.frame) and can be used for annotatePAC().
#' @examples
#' library(TxDb.Athaliana.BioMart.plantsmart28)
#' txdbAnno <- parseTxdb(an.txdb=TxDb.Athaliana.BioMart.plantsmart28)
#' @name parseTxdb
#' @seealso [parseGff()] to parse a Gff file.
#' @family Genome annotation functions
#' @export
parseTxdb <- function (an.txdb) {
if(class(an.txdb)!='TxDb') stop("an.txdb not of class TxDb!")
genes <- genes(an.txdb,columns=c("tx_type","gene_id"))
genes <- as.data.frame(genes)
genes <- data.frame(seqnames=as.character(genes$seqnames) ,start=as.integer(genes$start),
end=as.integer(genes$end),width=as.integer(genes$width),
strand=as.character(genes$strand),type="gene",
ID =as.character(genes$gene_id),biotype=as.character(genes$tx_type),
gene_id =as.character(genes$gene_id),Parent=NA,transcript_id=NA)
#setdiff(colnames(genes),colnames(tari))
rnas <- transcripts(an.txdb,columns=c("tx_name","tx_type","gene_id"))
rnas<- as.data.frame(rnas)
#test <- strsplit(as.character(rnas$gene_id) ,"\\s+")
#temp3 <- paste("",lapply(test,"[[",1),sep="");
#head(temp3)
rnas <- data.frame(seqnames=as.character(rnas$seqnames) ,start=as.integer(rnas$start),
end=as.integer(rnas$end),width=as.integer(rnas$width),
strand=as.character(rnas$strand),type="RNA",
ID =as.character(rnas$tx_name),biotype=as.character(rnas$tx_type),
gene_id =as.character(rnas$gene_id),Parent=as.character(rnas$gene_id),
transcript_id=as.character(rnas$tx_name))
# exons <- exons(an.txdb,columns=c("exon_name","tx_name","tx_type","gene_id"))
# exons <- as.data.frame(exons)
# head(exons)
exons <- exonsBy(an.txdb,by=c("tx"),use.names=TRUE)
exons <- as.data.frame(exons)
exons <- data.frame(seqnames=as.character(exons$seqnames) ,start=as.integer(exons$start),
end=as.integer(exons$end),width=as.integer(exons$width),
strand=as.character(exons$strand),type="exon",
ID =as.character(exons$exon_name),biotype=NA,
gene_id =NA,Parent=as.character(exons$group_name),
transcript_id=as.character(exons$group_name))
index <- match(exons$Parent,rnas$transcript_id)
#which(is.na(index))
exons$gene_id <- rnas$Parent[index]
exons$biotype <- rnas$biotype[index]
#==================================
#CDS
cdss <- cdsBy(an.txdb,by=c("tx"),use.names=TRUE)
cdss <- as.data.frame(cdss)
cdss <- data.frame(seqnames=as.character(cdss$seqnames) ,start=as.integer(cdss$start),
end=as.integer(cdss$end),width=as.integer(cdss$width),
strand=as.character(cdss$strand),type="CDS",
ID =as.character(cdss$cds_name),biotype=NA,
gene_id =NA,Parent=as.character(cdss$group_name),
transcript_id=as.character(cdss$group_name))
index <- match(cdss$Parent,rnas$transcript_id)
#which(is.na(index))
cdss$gene_id <- rnas$Parent[index]
cdss$biotype <- rnas$biotype[index]
#head(cdss)
#cdss <- cds(an.txdb,columns=c("cds_name","tx_name","tx_type","gene_id"))
#==================================
#introns
introns <- intronsByTranscript(an.txdb,use.names=TRUE)
introns <- as.data.frame(introns)
introns <- data.frame(seqnames=as.character(introns$seqnames) ,start=as.integer(introns$start),
end=as.integer(introns$end),width=as.integer(introns$width),
strand=as.character(introns$strand),type="intron",
ID =NA,biotype=NA,
gene_id =NA,Parent=as.character(introns$group_name),
transcript_id=as.character(introns$group_name))
index <- match(introns$Parent,rnas$transcript_id)
#which(is.na(index))
introns$gene_id <- rnas$Parent[index]
introns$biotype <- rnas$biotype[index]
#head(introns)
#===================================================
#five UTR
fiveUTRs <- fiveUTRsByTranscript(an.txdb,use.names=TRUE)
fiveUTRs <- as.data.frame(fiveUTRs)
fiveUTRs <- data.frame(seqnames=as.character(fiveUTRs$seqnames) ,start=as.integer(fiveUTRs$start),
end=as.integer(fiveUTRs$end),width=as.integer(fiveUTRs$width),
strand=as.character(fiveUTRs$strand),type="five_prime_UTR",
ID =NA,biotype=NA,
gene_id =NA,Parent=as.character(fiveUTRs$group_name),
transcript_id=as.character(fiveUTRs$group_name))
index <- match(fiveUTRs$Parent,rnas$transcript_id)
#which(is.na(index))
fiveUTRs$gene_id <- rnas$Parent[index]
fiveUTRs$biotype <- rnas$biotype[index]
#head(fiveUTRs)
#===========================================
#three UTR
threeUTRs <- threeUTRsByTranscript(an.txdb,use.names=TRUE)
threeUTRs <- as.data.frame(threeUTRs)
threeUTRs <- data.frame(seqnames=as.character(threeUTRs$seqnames) ,start=as.integer(threeUTRs$start),
end=as.integer(threeUTRs$end),width=as.integer(threeUTRs$width),
strand=as.character(threeUTRs$strand),type="three_prime_UTR",
ID =NA,biotype=NA,
gene_id =NA,Parent=as.character(threeUTRs$group_name),
transcript_id=as.character(threeUTRs$group_name))
index <- match(threeUTRs$Parent,rnas$transcript_id)
#which(is.na(index))
threeUTRs$gene_id <- rnas$Parent[index]
threeUTRs$biotype <- rnas$biotype[index]
anno.frame <- rbind(genes,rnas,exons,cdss,introns,fiveUTRs,threeUTRs)
anno.frame$type <- factor(anno.frame$type,levels=c("gene","RNA","five_prime_UTR","exon","CDS","intron",
"three_prime_UTR"))
#anno.frame <- anno.frame[order(anno.frame$transcript_id,anno.frame$gene_id,
# anno.frame$start,anno.frame$strand,anno.frame$type),]
anno.need <- rbind(exons,cdss,introns,fiveUTRs,threeUTRs)
anno.rna <- rnas
return(list(anno.need=anno.need, anno.rna=anno.rna, anno.frame=anno.frame))
}
#' Parse gff3/gtf genome annotation
#'
#' parseGff parses genome annotation file of gff3/gtf format
#'
#' Due to the complex GFF3/GFF/GTF structure of different genome annotation files from different species,
#' this function may not be always applicable for any given file. You may need to check mannually.
#' @usage parseGff(aGFF)
#' @param aGFF .gff3/.gff/.gtf file name
#' @return a parsed genome annotation object, which is a list of three elements (anno.rna, anno.need, anno.frame) and can be used for annotatePAC().
#' @examples
#' ## parse from a gff file, and save as .rda for further use.
#' gff=parseGff(aGFF='Bamboo.Hic.gff')
#' @name parseGff
#' @seealso [parseTxdb()] to parse a Txdb object.
#' @family genome annotation functions
#' @export
parseGff <- function(aGFF) {
if (!is.character(aGFF)) stop("aGFF not a character string!")
if (!grepl('\\.gff3|\\.gtf', tolower(aGFF))) stop('aGFF not .gff3/.gff/.gtf!')
if (grepl('\\.gff3|\\.gff', tolower(aGFF))) {
#------------------------------------------------------
#Loading annotation (gff3 format)
#-------------------------------------------------------
gff.path=aGFF
anno <- import.gff3(gff.path)
anno.frame <- as.data.frame(anno,stringsAsFactors =FALSE)
anno.frame$seqnames <- as.character(anno.frame$seqnames)
anno.frame$strand <- as.character(anno.frame$strand)
anno.frame$type <- as.character(anno.frame$type)
#print("###annotation file type information")
#print(table(anno.frame$type))
#delete chromosome information
anno.frame$Parent <- sub(pattern="\\S+\\:",replacement = "",anno.frame$Parent)
anno.frame$ID <- sub(pattern="\\S+\\:",replacement = "",anno.frame$ID)
if(length(which(anno.frame$type=="chromosome"))){
anno.frame <- anno.frame[-which(anno.frame$type=="chromosome"),]
}
#instead transcript to RNA
anno.frame$type[which(anno.frame$type == "transcript")] <-"RNA"
#getting RNA row
rna.id <- grep("RNA$",anno.frame$type,ignore.case = FALSE)
anno.rna <- anno.frame[rna.id,]
} else if (grepl('\\.gtf', tolower(aGFF))) {
gtf.path=aGFF
anno <- import(gtf.path,format="gtf")
anno.frame <- as.data.frame(anno,stringsAsFactors =FALSE)
anno.frame$seqnames <- as.character(anno.frame$seqnames)
anno.frame$strand <- as.character(anno.frame$strand)
anno.frame$type <- as.character(anno.frame$type)
anno.frame$Parent <- as.character(anno.frame$transcript_id)
anno.frame$type[which(anno.frame$type == "transcript")] <-"RNA"
#getting RNA row
trans.id <- grep("transcript",anno.frame$type,ignore.case = FALSE)
if(length(trans.id)){
anno.frame$type[which(anno.frame$type == "transcript")] <-"RNA"
rna.id <- grep("RNA$",anno.frame$type,ignore.case = FALSE)
}else{
rna.id <- grep("RNA$",anno.frame$type,ignore.case = FALSE)
}
if(length(rna.id)==0){
anno.frame <- add_rna(anno.frame)
rna.id <- grep("RNA$",anno.frame$type,ignore.case = FALSE)
}
if(length(which(anno.frame$type=="gene"))==0){
anno.gene <- anno.frame[rna.id,]
anno.gene$type<- "gene"
anno.gene$Parent <- ""
anno.frame <- rbind(anno.frame,anno.gene)
}
anno.frame$ID <- anno.frame$Parent
if(length(which(anno.frame$type=="chromosome"))){
anno.frame <- anno.frame[-which(anno.frame$type=="chromosome"),]
}
anno.rna <- anno.frame[rna.id,]
} #~gtf
#If the comment is incomplete, losing transcript_id
if(!length(which(colnames(anno.rna) == "transcript_id"))){
anno.rna$transcript_id<-anno.rna$ID
}
#table(anno.rna$type)
#ID=transcript:AT1G01010.1;Parent=gene:AT1G01010;biotype=protein_coding;transcript_id=AT1G01010.1
#1 araport11 five_prime_UTR 3631 3759 . + . Parent=transcript:AT1G01010.1
#confirm that the names of transcript is consistent with exon/cds/utr
if(length(setdiff(anno.rna$transcript_id,anno.frame$Parent))){
stop("Not consistent between transcript id in rna and exon/cds/utr")
}
#anno.frame$Parent[which(anno.frame$type=="gene")]<- ""
# anno.frame.raw <- anno.frame
if(is.na(match("three_prime_UTR",unique(anno.frame$type)))){
if(is.na(match("CDS",unique(anno.frame$type)))){
warning("This annotation without CDS, we can't identify UTR region")
}else{
print("Extracting UTR region")
anno.frame <- add_utr(anno.frame)
}
}
#=========================================================================
#anno.need store cds/exon/5utr/3utr information
anno.need <- anno.frame[which(anno.frame$Parent %in% anno.rna$transcript_id),]
need.rna.id <- grep("RNA$",anno.need$type,ignore.case = FALSE)
if(length(need.rna.id)){
anno.need<-anno.need[-need.rna.id,]
}
index <- match(anno.need$Parent,anno.rna$transcript_id)
if(length(which(is.na(index)))){
stop("error can't find exon/cds/5utr/3utr 's parent")
}
anno.need$gene_id <- anno.rna$Parent[index]
if(is.na(match("biotype",colnames(anno.rna)))){
anno.rna$biotype <- NA
}
anno.need$biotype <- anno.rna$biotype[index]
#====================================================================
#ann.intron stores intron information
exon.id <- grep("exon",anno.need$type,ignore.case = FALSE)
ann.exon <- anno.need[exon.id,]
if(length(which(is.na(ann.exon$Parent)))){
print("exist some exon can't find parent id ")
}
ann.exon <- ann.exon[order(ann.exon$Parent,ann.exon$start,ann.exon$strand),]
ann.exon.1 <- ann.exon[seq(1,nrow(ann.exon),2),]
ann.exon.2 <- ann.exon[seq(2,nrow(ann.exon),2),]
ann.exon.3 <- ann.exon[seq(3,nrow(ann.exon),2),]
keep.num1 <- min(nrow(ann.exon.1),nrow(ann.exon.2))
ann.exon.k1<-ann.exon.1[1:keep.num1,]
ann.exon.k2<-ann.exon.2[1:keep.num1,]
index <- which(ann.exon.k1$Parent == ann.exon.k2$Parent)
if(!identical(ann.exon.k1$Parent[index],ann.exon.k2$Parent[index])){
stop("something error with extart intron region")
}
ann.intron1 <- ann.exon.k1[index,]
ann.intron1$type <- "intron"
ann.intron1$start <- ann.exon.k1$end[index]+1
ann.intron1$end <- ann.exon.k2$start[index]-1
keep.num2 <- min(nrow(ann.exon.2),nrow(ann.exon.3))
ann.exon.kk2<-ann.exon.2[1:keep.num2,]
ann.exon.k3<-ann.exon.3[1:keep.num2,]
index <- which(ann.exon.kk2$Parent == ann.exon.k3$Parent)
if(!identical(ann.exon.kk2$Parent[index],ann.exon.k3$Parent[index])){
stop("something error with extart intron region")
}
ann.intron2 <- ann.exon.kk2[index,]
ann.intron2$type <- "intron"
ann.intron2$start <- ann.exon.kk2$end[index]+1
ann.intron2$end <- ann.exon.k3$start[index]-1
ann.intron <- rbind(ann.intron1,ann.intron2)
ann.intron <- ann.intron[order(ann.intron$Parent,ann.intron$start,ann.intron$strand),]
anno.need <- rbind(anno.need,ann.intron)
#table(anno.need$type)
rna.error <- grep("RNA$",anno.need$type,ignore.case = FALSE)
if(length(rna.error)){
anno.need <- anno.need[-rna.error,]
}
return(list(anno.need=anno.need, anno.rna=anno.rna, anno.frame=anno.frame))
}
#=========================================================
#------------------------------------------------------
#function:add_utr()
#Adding 3UTR and 5UTR region
#--------------------------------------------------------
#======================================================
add_utr <- function(anno.frame=NULL){
anno.cds <- anno.frame[which(anno.frame$type=="CDS"),]
anno.exon <- anno.frame[which(anno.frame$type=="exon"),]
rna.id <- grep("RNA$",anno.frame$type,ignore.case = FALSE)
anno.rna <- anno.frame[rna.id,]
if(!length(which(colnames(anno.rna) == "transcript_id"))){
anno.rna$transcript_id<-anno.rna$ID
}
anno.cds.frist <- anno.cds[order(anno.cds$Parent,anno.cds$start,anno.cds$strand,decreasing = FALSE),]
anno.cds.last <- anno.cds[order(anno.cds$Parent,anno.cds$start,anno.cds$strand,decreasing = TRUE),]
anno.cds.frist <- anno.cds.frist[!duplicated(anno.cds.frist$Parent),]
anno.cds.last <- anno.cds.last[!duplicated(anno.cds.last$Parent),]
index.frist <-match(anno.cds.frist$Parent,anno.rna$transcript_id)
index.last <-match(anno.cds.last$Parent,anno.rna$transcript_id)
if(length(which(is.na(c(index.frist,index.last))))){
stop("Can't find cds parent based on input annotation file ")
}
anno.cds.frist$utr.start <- anno.rna$start[index.frist]
anno.cds.frist$utr.end <- anno.cds.frist$start -1
anno.cds.frist <- anno.cds.frist[which( (anno.cds.frist$utr.end- anno.cds.frist$utr.start) >=0),]
anno.cds.last$utr.start <- anno.cds.last$end +1
anno.cds.last$utr.end <- anno.rna$end[index.last]
anno.cds.last <- anno.cds.last[which((anno.cds.last$utr.end- anno.cds.last$utr.start) >=0),]
gr.first <- GRanges(seqnames =as.character(anno.cds.frist$Parent) ,
ranges =IRanges(start=as.integer(anno.cds.frist$utr.start) ,
end=as.integer(anno.cds.frist$utr.end)),
strand =as.character(anno.cds.frist$strand))
gr.last <- GRanges(seqnames =as.character(anno.cds.last$Parent) ,
ranges =IRanges(start=as.integer(anno.cds.last$utr.start) ,
end=as.integer(anno.cds.last$utr.end)),
strand =as.character(anno.cds.last$strand))
gr.exon <- GRanges(seqnames =as.character(anno.exon$Parent) ,
ranges =IRanges(start=as.integer(anno.exon$start) ,
end=as.integer(anno.exon$end)),
strand =as.character(anno.exon$strand))
ov.first <- findOverlaps(gr.first,gr.exon)
ov.last <- findOverlaps(gr.last,gr.exon)
ov.first <- as.data.frame(ov.first)
ov.last <- as.data.frame(ov.last)
colnames(ov.first)<-c("cdsID","exonID")
colnames(ov.last) <- c("cdsID","exonID")
ov.first$utr.start <- as.integer(anno.cds.frist$utr.start[ov.first$cdsID])
ov.first$utr.end <- as.integer(anno.cds.frist$utr.end[ov.first$cdsID])
ov.first$exon.start <- as.integer(anno.exon$start[ov.first$exonID])
ov.first$exon.end <- as.integer(anno.exon$end[ov.first$exonID])
ov.first$utr.start.r <- ov.first$exon.start
ov.first$utr.end.r <- apply(ov.first[,c("utr.end","exon.end")],1,min)
five.utr <- anno.exon[ov.first$exonID,]
five.utr$start <- ov.first$utr.start.r
five.utr$end <- ov.first$utr.end.r
if(nrow(five.utr)){
five.utr$type <- "five_prime_UTR"
five.utr$type[which(five.utr$strand=="-")] <- "three_prime_UTR"
}
ov.last$utr.start <- as.integer(anno.cds.last$utr.start[ov.last$cdsID])
ov.last$utr.end <- as.integer(anno.cds.last$utr.end[ov.last$cdsID])
ov.last$exon.start <- as.integer(anno.exon$start[ov.last$exonID])
ov.last$exon.end <- as.integer(anno.exon$end[ov.last$exonID])
ov.last$utr.start.r <- apply(ov.last[,c("utr.start","exon.start")],1,max)
ov.last$utr.end.r <- ov.last$exon.end
three.utr <- anno.exon[ov.last$exonID,]
three.utr$start <- ov.last$utr.start.r
three.utr$end <- ov.last$utr.end.r
if(nrow(three.utr)){
three.utr$type <- "three_prime_UTR"
three.utr$type[which(three.utr$strand=="-")] <- "five_prime_UTR"
}
utr <- rbind(three.utr,five.utr)
utr <- utr[order(utr$Parent,utr$type,utr$start),]
utr$width <- as.integer(utr$end-utr$start+1)
#-------------------------------------
#check result
# really.utr <- anno.frame[which(anno.frame$type %in% c("three_prime_UTR","five_prime_UTR")),]
# really.utr <- really.utr[order(really.utr$Parent,really.utr$type,really.utr$start),]
# length(unique(really.utr$Parent))
# length(unique(utr$Parent))
# identical(utr$start,really.utr$start)
# identical(utr$end,really.utr$end)
# identical(utr$strand,really.utr$strand)
# write.table(really.utr,file="really_utr.txt",col.names = TRUE,row.names = FALSE,sep="\t",
# quote=FALSE)
# write.table(utr,file="build_utr.txt",col.names = TRUE,row.names = FALSE,sep="\t",
# quote=FALSE)
anno.frame <-rbind(anno.frame,utr)
return(anno.frame)
}
#=========================================================
#------------------------------------------------------
#function:add_rna()
#Adding RNA region
#--------------------------------------------------------
#======================================================
add_rna <- function(anno.frame=NULL){
anno.exon <- anno.frame[which(anno.frame$type=="exon"),]
anno.exon.order <- anno.exon[order(anno.exon$gene_id,anno.exon$transcript_id,
anno.exon$strand,anno.exon$start,decreasing = FALSE),]
anno.exon.rev <- anno.exon.order[nrow(anno.exon.order):1,]
anno.exon.order.unique <- anno.exon.order[!duplicated(anno.exon.order$transcript_id),]
anno.exon.rev.order <- anno.exon.rev[!duplicated(anno.exon.rev$transcript_id),]
anno.rna <- anno.exon.order.unique
index <- match(anno.rna$transcript_id,anno.exon.rev.order$transcript_id)
anno.rna$end <- anno.exon.rev.order$end[index]
anno.rna$Parent <- anno.rna$gene_id
anno.rna$type <- "mRNA"
anno.frame <-rbind(anno.frame,anno.rna)
return(anno.frame)
}
|
library(rmutil)
x <- seq(-2,20,0.01)
par(mfrow=c(2,2))
xc=dlaplace(x,0,1)
xn=dnorm(x,0,1)
plot(x,xc,type="l")
lines(x,xn,col="red")
plot(x,xn/xc,type="l")
M=max(xn/xc)
nxl=M*dlaplace(x,0,1)
plot(x,nxl,type="l")
lines(x,xn,col="red")
n=1000000
ll=rlaplace(n,0,1)
u=runif(n)
g=rep(0,n)
counter=0
for ( i in 1:n)
{
if(ll[i]>-2){
if(u[i]*M*dlaplace(ll[i],0,1)<=dnorm(ll[i],0,1))
{
counter=counter+1
g[counter]=ll[i]
} }
}
hist(g[1:counter],breaks= 100)
| /Random Variable generation R/2,2.R | no_license | sharpblade95/University-Projects | R | false | false | 489 | r |
library(rmutil)
x <- seq(-2,20,0.01)
par(mfrow=c(2,2))
xc=dlaplace(x,0,1)
xn=dnorm(x,0,1)
plot(x,xc,type="l")
lines(x,xn,col="red")
plot(x,xn/xc,type="l")
M=max(xn/xc)
nxl=M*dlaplace(x,0,1)
plot(x,nxl,type="l")
lines(x,xn,col="red")
n=1000000
ll=rlaplace(n,0,1)
u=runif(n)
g=rep(0,n)
counter=0
for ( i in 1:n)
{
if(ll[i]>-2){
if(u[i]*M*dlaplace(ll[i],0,1)<=dnorm(ll[i],0,1))
{
counter=counter+1
g[counter]=ll[i]
} }
}
hist(g[1:counter],breaks= 100)
|
#############################################################################################################
### Calculate the percentage of area under drought conditions within any polygon (county, watershed, ..) ###
### provided in the shapfile format using U.S. Drought Monitor Weekly Data. ###
### SNAPP working group Ecological Drought - https://www.nceas.ucsb.edu/projects/12703 ###
### ###
### Created on: Feb 3, 2016 ###
### Authors: Gabriel Antunes Daldegan (gdaldegan@nceas.ucsb.edu), Ian McCullough (immccull@gmail.com) ###
### Julien Brun (brun@nceas.ucsb.edu) ###
### Contact: scicomp@nceas.ucsb.edu ###
#############################################################################################################
### Load necessary R packages ####
library(rgeos) # Display of maps
library(raster) # GIS operations
library(dplyr) # table manipulations
# Multiprocessing
library(doParallel)
library(foreach)
# Access the weekly drought shapefile download script (located in your working directory)
source('drought_monitoring_download_unzip_plot.R')
#### CONSTANTS ####
## Multiprocessing cores
# best to leave empty arguments; by default, the number of cores used for parallel
# execution is 1/2 the number of detected cores (if number is unspecified)
registerDoParallel()
## Set working directory
main_path <- "/Users/brun/GitHub/gitSNAPP/ecological-drought"
setwd(main_path)
## Input files
# Path to the admin shapefile used to extract percent area under various drought classes
admin_path <- main_path
admin_path <- "/Users/brun/Data/Tiger"
# Full path and filename
admin_shp <- file.path(admin_path,extract_shpname)
# Output directory
output_directory <- file.path(main_path,'output')
## Projection system used for the intersect, here NAD 1983 Albers Equal Area
NAD83_PROJ <- "+proj=aea +lat_1=20 +lat_2=60 +lat_0=40 +lon_0=-96 +x_0=0 +y_0=0 +datum=NAD83 +units=m +no_defs"
## Unique identifier of the polygons of interest (here US States)
ugeoid <- "GEOID"
## Years to download
YEAR_START <- 2016 # earliest available year = 2000
YEAR_END <- 2016 # if the current year is selected, all available data to date are downloaded
## Processing options
# If you want to download the file, set it to TRUE
download_status <- TRUE
# If you want to overwite the file when unzipping, set it to TRUE
overwrite_status <- TRUE
# If you want to plot the shapefile, set it to TRUE
plotting_status <- FALSE
#### FUNCTIONS ####
#' Read and reproject a shapefile to the provided coordinates system
#'
#' @param shapefile_folder A character
#' @param proj4_string
#' @return reprojected shapefile
#' @examples
#'
reproject_shapefile_dir <- function(shapefile_folder, proj4_string) {
shp <- raster::shapefile(shapefile_folder)
shp83 <- spTransform(shp,CRS(NAD83_PROJ))
return(shp83)
}
# Function to calculate percent drought area within specified administrative boundaries
#' drought_area
#'
#' @param admin_shp A spatial dataframe
#' @param drought_direc A character
#' @return A dataframe containing the yearly time-series
#' @examples
#'
drought_area <- function(admin_shp, drought_direc) {
## DEFINITION of ARGUMENTS
#admin_shp = single shapefile of administrative boundaries (e.g., US states, counties)
#drought_direc = directory containing time-series of drought area shapefiles
# List the shapefiles for a specific year
drought_list <- list.files(drought_direc, pattern='.shp$')
## Create the output dataframe to store the drought area (pct) time-series
# Drought categories, following the U.S. Drought Monitor classification scheme (http://droughtmonitor.unl.edu/AboutUs/ClassificationScheme.aspx)
# Coding used: 0 = D0; 1 = D1; 2 = D2; 3 = D3; 4 = D4 and 10 = No drought
DroughtClass = c(0:4,10)
# All admin units
geoids <- unique(admin_shp_prj@data[,ugeoid])
# Combination of all the options => fix the problem of missing info when there is no drought in certain areas
drought_ts <- expand.grid(GEOID=geoids,DM=DroughtClass) #expand.grid creates data frame from all combinations of factors
drought_ts <- left_join(drought_ts,admin_shp_prj@data, by=c(ugeoid))
# for (shp in drought_list[1:length(drought_list)]) {
drought_year <- foreach(shp=drought_list[1:length(drought_list)],.combine='cbind',.inorder = TRUE) %dopar% {
## READ AND REPROJECT THE WEEKLY DROUGHT SHAPEFILES (from the containing directory)
shape_weekly_drought_NAlbers <- reproject_shapefile_dir(file.path(drought_direc,shp),NAD83_PROJ)
## Intersect shapefiles (admin shapefile, drought shapefile)
inter.drought <- raster::intersect(admin_shp_prj,shape_weekly_drought_NAlbers)
## Compute Area
# Calculate areas from intersected polygons, then append as attribute
inter.drought@data$Area_km2 <- gArea(inter.drought, byid = TRUE) / 1e6 #1e6 to convert sq m to sq km
## Compute the total drought area by admin units and drought level
drought_area <- inter.drought@data %>%
group_by(GEOID,DM) %>%
summarise(DroughtArea_km2=sum(Area_km2))
# Add the Drought Area
drought_week <- left_join(drought_ts,drought_area, by=c(ugeoid, 'DM'))
# Set the drought category with no area to 0
drought_week[(drought_week$DM<10)&(is.na(drought_week$DroughtArea_km2)),"DroughtArea_km2"] <- 0
# Compute the No Drought area per admin unit
no_drought_area <- drought_week %>%
group_by(GEOID) %>%
summarise(No_DroughtArea_km2 = (mean(AreaUnit_km2) - sum(DroughtArea_km2, na.rm=T)))
#join the no drought area
drought_week <- left_join(drought_week,no_drought_area,by=c(ugeoid))
## Assign the No drought value and compute the percentage area
drought_week <- mutate(drought_week, DroughtArea_p = ifelse(is.na(DroughtArea_km2),
round(100*No_DroughtArea_km2/AreaUnit_km2),
round(100*DroughtArea_km2/AreaUnit_km2))) %>%
# select(-DroughtArea_km2,-No_DroughtArea_km2)
select(DroughtArea_p)
# Rename the column with the filename containing the date
names(drought_week)[names(drought_week)=="DroughtArea_p"] <- substr(shp,1,(nchar(shp)-4))
drought_week
}
return(cbind(drought_ts,drought_year))
}
#### MAIN ####
### DOWNLOAD THE FILES ####
if (download_status | overwrite_status) {
# Loop through the year of interest
for (year in YEAR_START:YEAR_END){
## Getting all the shapefiles for a year into a list of
myshapefile_list <- yearlyimport(year,main_path,download_status,plotting_status)
## Plotting all the shapefiles
if (plotting_status) {
yearlyplots(myshapefile_list)
}
}
}
print("All the files have been downloaded and unzipped")
### COMPUTE THE DROUGHT LEVELS RELATIVE AREA TIME-SERIES####
## Load and Reproject the shapefile used to extract the drought information
admin_shp_prj <- reproject_shapefile_dir(admin_shp, NAD83_PROJ)
## Calculate area for the admin shapefiles in km2
admin_shp_prj@data$AreaUnit_km2 <- gArea(admin_shp_prj, byid = TRUE)/1e6
## Create the output directory
dir.create(output_directory, showWarnings = FALSE)
## Compute the percentage are under drought conditions
for (y in YEAR_START:YEAR_END) {
# Directory containing the drought shapefiles for a particular year
year_path <- file.path(main_path, y, 'SHP')
# Compute the percentage area for the different drought classes
yearly_drought = drought_area(admin_shp = admin_shp, drought_direc = year_path)
# Write the output file
filename <- paste0(output_directory,'/USAdrought', y, '.csv')
write.csv(yearly_drought, file=filename,row.names =FALSE)
}
print("Drought levels relative area have been computed for all years")
| /drought-monitoring-time-series/intersect_shapefiles.R | permissive | alessiobocco/ecological-drought | R | false | false | 8,147 | r | #############################################################################################################
### Calculate the percentage of area under drought conditions within any polygon (county, watershed, ..) ###
### provided in the shapfile format using U.S. Drought Monitor Weekly Data. ###
### SNAPP working group Ecological Drought - https://www.nceas.ucsb.edu/projects/12703 ###
### ###
### Created on: Feb 3, 2016 ###
### Authors: Gabriel Antunes Daldegan (gdaldegan@nceas.ucsb.edu), Ian McCullough (immccull@gmail.com) ###
### Julien Brun (brun@nceas.ucsb.edu) ###
### Contact: scicomp@nceas.ucsb.edu ###
#############################################################################################################
### Load necessary R packages ####
library(rgeos) # Display of maps
library(raster) # GIS operations
library(dplyr) # table manipulations
# Multiprocessing
library(doParallel)
library(foreach)
# Access the weekly drought shapefile download script (located in your working directory)
source('drought_monitoring_download_unzip_plot.R')
#### CONSTANTS ####
## Multiprocessing cores
# best to leave empty arguments; by default, the number of cores used for parallel
# execution is 1/2 the number of detected cores (if number is unspecified)
registerDoParallel()
## Set working directory
main_path <- "/Users/brun/GitHub/gitSNAPP/ecological-drought"
setwd(main_path)
## Input files
# Path to the admin shapefile used to extract percent area under various drought classes
admin_path <- main_path
admin_path <- "/Users/brun/Data/Tiger"
# Full path and filename
admin_shp <- file.path(admin_path,extract_shpname)
# Output directory
output_directory <- file.path(main_path,'output')
## Projection system used for the intersect, here NAD 1983 Albers Equal Area
NAD83_PROJ <- "+proj=aea +lat_1=20 +lat_2=60 +lat_0=40 +lon_0=-96 +x_0=0 +y_0=0 +datum=NAD83 +units=m +no_defs"
## Unique identifier of the polygons of interest (here US States)
ugeoid <- "GEOID"
## Years to download
YEAR_START <- 2016 # earliest available year = 2000
YEAR_END <- 2016 # if the current year is selected, all available data to date are downloaded
## Processing options
# If you want to download the file, set it to TRUE
download_status <- TRUE
# If you want to overwite the file when unzipping, set it to TRUE
overwrite_status <- TRUE
# If you want to plot the shapefile, set it to TRUE
plotting_status <- FALSE
#### FUNCTIONS ####
#' Read and reproject a shapefile to the provided coordinates system
#'
#' @param shapefile_folder A character
#' @param proj4_string
#' @return reprojected shapefile
#' @examples
#'
reproject_shapefile_dir <- function(shapefile_folder, proj4_string) {
shp <- raster::shapefile(shapefile_folder)
shp83 <- spTransform(shp,CRS(NAD83_PROJ))
return(shp83)
}
# Function to calculate percent drought area within specified administrative boundaries
#' drought_area
#'
#' @param admin_shp A spatial dataframe
#' @param drought_direc A character
#' @return A dataframe containing the yearly time-series
#' @examples
#'
drought_area <- function(admin_shp, drought_direc) {
## DEFINITION of ARGUMENTS
#admin_shp = single shapefile of administrative boundaries (e.g., US states, counties)
#drought_direc = directory containing time-series of drought area shapefiles
# List the shapefiles for a specific year
drought_list <- list.files(drought_direc, pattern='.shp$')
## Create the output dataframe to store the drought area (pct) time-series
# Drought categories, following the U.S. Drought Monitor classification scheme (http://droughtmonitor.unl.edu/AboutUs/ClassificationScheme.aspx)
# Coding used: 0 = D0; 1 = D1; 2 = D2; 3 = D3; 4 = D4 and 10 = No drought
DroughtClass = c(0:4,10)
# All admin units
geoids <- unique(admin_shp_prj@data[,ugeoid])
# Combination of all the options => fix the problem of missing info when there is no drought in certain areas
drought_ts <- expand.grid(GEOID=geoids,DM=DroughtClass) #expand.grid creates data frame from all combinations of factors
drought_ts <- left_join(drought_ts,admin_shp_prj@data, by=c(ugeoid))
# for (shp in drought_list[1:length(drought_list)]) {
drought_year <- foreach(shp=drought_list[1:length(drought_list)],.combine='cbind',.inorder = TRUE) %dopar% {
## READ AND REPROJECT THE WEEKLY DROUGHT SHAPEFILES (from the containing directory)
shape_weekly_drought_NAlbers <- reproject_shapefile_dir(file.path(drought_direc,shp),NAD83_PROJ)
## Intersect shapefiles (admin shapefile, drought shapefile)
inter.drought <- raster::intersect(admin_shp_prj,shape_weekly_drought_NAlbers)
## Compute Area
# Calculate areas from intersected polygons, then append as attribute
inter.drought@data$Area_km2 <- gArea(inter.drought, byid = TRUE) / 1e6 #1e6 to convert sq m to sq km
## Compute the total drought area by admin units and drought level
drought_area <- inter.drought@data %>%
group_by(GEOID,DM) %>%
summarise(DroughtArea_km2=sum(Area_km2))
# Add the Drought Area
drought_week <- left_join(drought_ts,drought_area, by=c(ugeoid, 'DM'))
# Set the drought category with no area to 0
drought_week[(drought_week$DM<10)&(is.na(drought_week$DroughtArea_km2)),"DroughtArea_km2"] <- 0
# Compute the No Drought area per admin unit
no_drought_area <- drought_week %>%
group_by(GEOID) %>%
summarise(No_DroughtArea_km2 = (mean(AreaUnit_km2) - sum(DroughtArea_km2, na.rm=T)))
#join the no drought area
drought_week <- left_join(drought_week,no_drought_area,by=c(ugeoid))
## Assign the No drought value and compute the percentage area
drought_week <- mutate(drought_week, DroughtArea_p = ifelse(is.na(DroughtArea_km2),
round(100*No_DroughtArea_km2/AreaUnit_km2),
round(100*DroughtArea_km2/AreaUnit_km2))) %>%
# select(-DroughtArea_km2,-No_DroughtArea_km2)
select(DroughtArea_p)
# Rename the column with the filename containing the date
names(drought_week)[names(drought_week)=="DroughtArea_p"] <- substr(shp,1,(nchar(shp)-4))
drought_week
}
return(cbind(drought_ts,drought_year))
}
#### MAIN ####
### DOWNLOAD THE FILES ####
if (download_status | overwrite_status) {
# Loop through the year of interest
for (year in YEAR_START:YEAR_END){
## Getting all the shapefiles for a year into a list of
myshapefile_list <- yearlyimport(year,main_path,download_status,plotting_status)
## Plotting all the shapefiles
if (plotting_status) {
yearlyplots(myshapefile_list)
}
}
}
print("All the files have been downloaded and unzipped")
### COMPUTE THE DROUGHT LEVELS RELATIVE AREA TIME-SERIES####
## Load and Reproject the shapefile used to extract the drought information
admin_shp_prj <- reproject_shapefile_dir(admin_shp, NAD83_PROJ)
## Calculate area for the admin shapefiles in km2
admin_shp_prj@data$AreaUnit_km2 <- gArea(admin_shp_prj, byid = TRUE)/1e6
## Create the output directory
dir.create(output_directory, showWarnings = FALSE)
## Compute the percentage are under drought conditions
for (y in YEAR_START:YEAR_END) {
# Directory containing the drought shapefiles for a particular year
year_path <- file.path(main_path, y, 'SHP')
# Compute the percentage area for the different drought classes
yearly_drought = drought_area(admin_shp = admin_shp, drought_direc = year_path)
# Write the output file
filename <- paste0(output_directory,'/USAdrought', y, '.csv')
write.csv(yearly_drought, file=filename,row.names =FALSE)
}
print("Drought levels relative area have been computed for all years")
|
##
## Model selection using orthogonal data augmentation following Ghosh and Clyde: "Rao-blackwellization for Bayesian Variable Selection and Model Averaging in a Linear and Binary Regression: A Novel Data Augmentation Approach
##
rm(list = ls())
set.seed(101)
##
## libraries and subroutines
##
source('~/1dSpatialSim/functions/rMVN.R')
## simulate the data
source('~/1dSpatialSim/functions/make.spatial.field.R')
## load the ODA mcmc code
source('~/1dSpatialSim/modelAveraging/mcmc.pcaModelAveraging.spatial.R')
## code for plotting the output
source('~/1dSpatialSim/plots/make.output.plot.ci.R')
library(statmod)
library(mvtnorm)
##
## simulate the data
##
m <- 1000 # number of spatial locations
locs <- seq(0, 1, , m) # spatial coordinate
X <- cbind(rep(1, m), locs)
reps <- 20 # number of spatial fields
beta <- c(0, 2) # beta
s2.s <- 1
phi <- 0.25
s2.e <- 0.1
samp.size <- 5:40
scale.predictor <- function(X){
n <- dim(X)[1]
p <- dim(X)[2]
scale <- matrix(nrow = p, ncol = 2)
X.tmp <- X
for(i in 1:p){
scale[i, ] <- c(mean(X[, i]), sqrt((n - 1) / n) * sd(X[, i]))
X.tmp[, i] <- (X[, i] - scale[i, 1]) / scale[i, 2]
}
list(X = X.tmp, scale = scale)
}
field <- make.spatial.field(reps, X, beta, locs, c(s2.s, phi), method = 'exponential', s2.e, samp.size)
D <- as.matrix(dist(locs))
layout(matrix(1:2, ncol = 2))
plot.Y.field(field$Y.list[1:(reps / 2)], field$H.list[1:(reps / 2)], locs)
plot.Z.field(field$Z.list[(reps / 2 + 1):reps], locs, main = 'Full Data')
Y.list <- field$Y.list[1:(reps / 2)]
H.list <- field$H.list[1:(reps / 2)]
Z.list.hist <- field$Z.list[1:(reps / 2)]
Z.list.pca <- field$Z.list[(reps / 2 + 1):reps]
X <- matrix(unlist(Z.list.pca), ncol = reps / 2, byrow = FALSE)
X.new <- matrix(unlist(Z.list.hist), ncol = reps / 2, byrow = FALSE)
scaled <- scale.predictor(X)
X.o <- scaled$X ## no intercept
# X.o <- cbind(rep(1, m), scaled$X)
# X.o <- cbind(1:m, scaled$X)
# X.o <- cbind(rep(1, m), (1:m - mean(1:m)) / (sqrt(m / (m - 1)) * sd(1:m)), scaled$X)
p <- dim(X.o)[2]
matplot(X, type = 'l')
matplot(X.o, type = 'l')
# X.pred <- X.new
# for(i in 1:(reps / 2)){
# X.pred[, i] <- (X.new[, i] - scaled$scale[i, 1]) / scaled$scale[i, 2]
# }
# D <- diag(rep(max(eigen(t(X.o) %*% X.o)$values), dim(X.o)[2])) + 0.0001
# X.a <- chol(D - t(X.o) %*% X.o)
# X.c <- rbind(X.o, X.a)
# t(X.c) %*% X.c
##
## Initialize priors and tuning paramteters
##
alpha <- 2
pi.prior <- rep( 1 / 2, p)
epsilon = 0.001
n.mcmc <- 5000 #50000
# lambda <- c(0, rep(1, p))
lambda <- rep(1, p)
n.burn <- n.mcmc / 5
alpha.eta <- 1
beta.eta <- 1
phi.lower <- 0.01
phi.upper <- 100
# params <- list('vector')
params <- list(n.mcmc = n.mcmc, alpha = alpha, pi.prior = pi.prior, lambda = lambda, alpha.eta = alpha.eta, beta.eta = beta.eta, phi.lower = phi.lower, phi.upper = phi.upper, D = D)
sigma.tune <- 1
phi.tune <- 1
sigma.eta.tune <- 50
gamma.tune <- 0.025
tune <- list(phi.tune = phi.tune, sigma.eta.tune = sigma.eta.tune, gamma.tune = gamma.tune)
# tune <- list(sigma.tune = sigma.tune, phi.tune = phi.tune, sigma.eta.tune = sigma.eta.tune)
##
## fit mcmc using ODA model
##
# X.pca <- prcomp(X.new, center = TRUE, scale. = TRUE, retx = TRUE)$x
#
# pca.scale <- scale.predictor(X.pca)
# X.pca.scale <- pca.scale$X
# matplot(X.pca.scale, type = 'l')
out <- mcmc.pcaMA(Y.list = Y.list, X.o = X.o, H.list = H.list, params = params, tune = tune)
## Rao-blackwell estimates
#
# beta.fit <- matrix(nrow = p, ncol = reps / 2)
# for(i in 1:(reps / 2)){
# for(j in 1:p){
# # beta.fit[j, i] <- apply(
# beta.fit[j, i] <- mean(out$rho.save[j, i, ] * out$delta.save[i] / (out$delta.save[i] + lambda[j]) * out$beta.save[j, i, ])
# #, 1, mean)
# }
# }
#
# X.pca <- prcomp(X.o)$x
# Y.pred <- matrix(nrow = m, ncol = reps / 2)
# for(i in 1:(reps / 2)){
# Y.pred[, i] <- X.pca %*% beta.fit[, i]
# # Y.pred[, i] <- X.pca.scale %*% beta.fit[, i]
# }
#
# matplot(Y.pred, type = 'l')
# matplot((Y.pred - X.new)^2, type = 'l')
# ## mean square prediction error
# MSPE.RB <- mean((Y.pred - X.new)^2)
# MSPE.RB
# # log.score <- mean(out$log.score.save[(n.burn + 1):n.mcmc])
#
out.Y.pred <- matrix(nrow = m, ncol = (reps / 2))
for(i in 1:(reps / 2)){
out.Y.pred[, i] <- apply(out$Y.pred[, i, ], 1, mean)
}
matplot(out.Y.pred, type = 'l')
matplot((out.Y.pred - X.new)^2, type = 'l')
MSPE <- mean((out.Y.pred - X.new)^2)
MSPE
out$gamma.accept
matplot(out$sigma.squared.save, type = 'l')
matplot(out$sigma.squared.eta.save, type = 'l', main = round(out$eta.accept, digits = 4))
matplot(out$phi.save, type = 'l', main = round(out$phi.accept, digits = 4))
| /modelAveraging/pcaModelAveragingSpatial.R | no_license | jtipton25/1dSpatialSim | R | false | false | 4,614 | r | ##
## Model selection using orthogonal data augmentation following Ghosh and Clyde: "Rao-blackwellization for Bayesian Variable Selection and Model Averaging in a Linear and Binary Regression: A Novel Data Augmentation Approach
##
rm(list = ls())
set.seed(101)
##
## libraries and subroutines
##
source('~/1dSpatialSim/functions/rMVN.R')
## simulate the data
source('~/1dSpatialSim/functions/make.spatial.field.R')
## load the ODA mcmc code
source('~/1dSpatialSim/modelAveraging/mcmc.pcaModelAveraging.spatial.R')
## code for plotting the output
source('~/1dSpatialSim/plots/make.output.plot.ci.R')
library(statmod)
library(mvtnorm)
##
## simulate the data
##
m <- 1000 # number of spatial locations
locs <- seq(0, 1, , m) # spatial coordinate
X <- cbind(rep(1, m), locs)
reps <- 20 # number of spatial fields
beta <- c(0, 2) # beta
s2.s <- 1
phi <- 0.25
s2.e <- 0.1
samp.size <- 5:40
scale.predictor <- function(X){
n <- dim(X)[1]
p <- dim(X)[2]
scale <- matrix(nrow = p, ncol = 2)
X.tmp <- X
for(i in 1:p){
scale[i, ] <- c(mean(X[, i]), sqrt((n - 1) / n) * sd(X[, i]))
X.tmp[, i] <- (X[, i] - scale[i, 1]) / scale[i, 2]
}
list(X = X.tmp, scale = scale)
}
field <- make.spatial.field(reps, X, beta, locs, c(s2.s, phi), method = 'exponential', s2.e, samp.size)
D <- as.matrix(dist(locs))
layout(matrix(1:2, ncol = 2))
plot.Y.field(field$Y.list[1:(reps / 2)], field$H.list[1:(reps / 2)], locs)
plot.Z.field(field$Z.list[(reps / 2 + 1):reps], locs, main = 'Full Data')
Y.list <- field$Y.list[1:(reps / 2)]
H.list <- field$H.list[1:(reps / 2)]
Z.list.hist <- field$Z.list[1:(reps / 2)]
Z.list.pca <- field$Z.list[(reps / 2 + 1):reps]
X <- matrix(unlist(Z.list.pca), ncol = reps / 2, byrow = FALSE)
X.new <- matrix(unlist(Z.list.hist), ncol = reps / 2, byrow = FALSE)
scaled <- scale.predictor(X)
X.o <- scaled$X ## no intercept
# X.o <- cbind(rep(1, m), scaled$X)
# X.o <- cbind(1:m, scaled$X)
# X.o <- cbind(rep(1, m), (1:m - mean(1:m)) / (sqrt(m / (m - 1)) * sd(1:m)), scaled$X)
p <- dim(X.o)[2]
matplot(X, type = 'l')
matplot(X.o, type = 'l')
# X.pred <- X.new
# for(i in 1:(reps / 2)){
# X.pred[, i] <- (X.new[, i] - scaled$scale[i, 1]) / scaled$scale[i, 2]
# }
# D <- diag(rep(max(eigen(t(X.o) %*% X.o)$values), dim(X.o)[2])) + 0.0001
# X.a <- chol(D - t(X.o) %*% X.o)
# X.c <- rbind(X.o, X.a)
# t(X.c) %*% X.c
##
## Initialize priors and tuning paramteters
##
alpha <- 2
pi.prior <- rep( 1 / 2, p)
epsilon = 0.001
n.mcmc <- 5000 #50000
# lambda <- c(0, rep(1, p))
lambda <- rep(1, p)
n.burn <- n.mcmc / 5
alpha.eta <- 1
beta.eta <- 1
phi.lower <- 0.01
phi.upper <- 100
# params <- list('vector')
params <- list(n.mcmc = n.mcmc, alpha = alpha, pi.prior = pi.prior, lambda = lambda, alpha.eta = alpha.eta, beta.eta = beta.eta, phi.lower = phi.lower, phi.upper = phi.upper, D = D)
sigma.tune <- 1
phi.tune <- 1
sigma.eta.tune <- 50
gamma.tune <- 0.025
tune <- list(phi.tune = phi.tune, sigma.eta.tune = sigma.eta.tune, gamma.tune = gamma.tune)
# tune <- list(sigma.tune = sigma.tune, phi.tune = phi.tune, sigma.eta.tune = sigma.eta.tune)
##
## fit mcmc using ODA model
##
# X.pca <- prcomp(X.new, center = TRUE, scale. = TRUE, retx = TRUE)$x
#
# pca.scale <- scale.predictor(X.pca)
# X.pca.scale <- pca.scale$X
# matplot(X.pca.scale, type = 'l')
out <- mcmc.pcaMA(Y.list = Y.list, X.o = X.o, H.list = H.list, params = params, tune = tune)
## Rao-blackwell estimates
#
# beta.fit <- matrix(nrow = p, ncol = reps / 2)
# for(i in 1:(reps / 2)){
# for(j in 1:p){
# # beta.fit[j, i] <- apply(
# beta.fit[j, i] <- mean(out$rho.save[j, i, ] * out$delta.save[i] / (out$delta.save[i] + lambda[j]) * out$beta.save[j, i, ])
# #, 1, mean)
# }
# }
#
# X.pca <- prcomp(X.o)$x
# Y.pred <- matrix(nrow = m, ncol = reps / 2)
# for(i in 1:(reps / 2)){
# Y.pred[, i] <- X.pca %*% beta.fit[, i]
# # Y.pred[, i] <- X.pca.scale %*% beta.fit[, i]
# }
#
# matplot(Y.pred, type = 'l')
# matplot((Y.pred - X.new)^2, type = 'l')
# ## mean square prediction error
# MSPE.RB <- mean((Y.pred - X.new)^2)
# MSPE.RB
# # log.score <- mean(out$log.score.save[(n.burn + 1):n.mcmc])
#
out.Y.pred <- matrix(nrow = m, ncol = (reps / 2))
for(i in 1:(reps / 2)){
out.Y.pred[, i] <- apply(out$Y.pred[, i, ], 1, mean)
}
matplot(out.Y.pred, type = 'l')
matplot((out.Y.pred - X.new)^2, type = 'l')
MSPE <- mean((out.Y.pred - X.new)^2)
MSPE
out$gamma.accept
matplot(out$sigma.squared.save, type = 'l')
matplot(out$sigma.squared.eta.save, type = 'l', main = round(out$eta.accept, digits = 4))
matplot(out$phi.save, type = 'l', main = round(out$phi.accept, digits = 4))
|
rankhospital <- function(state, outcome, num) {
ocm <- read.csv("outcome-of-care-measures.csv", colClasses = "character")
ocmForState <- subset(ocm, ocm$State == state)
if (nrow(ocmForState) == 0) {
stop("invalid state")
}
if (num != "best" && num != "worst" && num > nrow(ocmForState)) {
return("NA")
stop()
}
if (outcome != "heart attack" && outcome != "heart failure" && outcome != "pneumonia") {
stop("invalid outcome")
}
HospitalsInState <- ocmForState$Hospital.Name
if (outcome == "heart attack") {
MortalityRate <- ocmForState[,11]
} else if (outcome == "heart failure") {
MortalityRate <- ocmForState[,17]
} else if (outcome == "pneumonia") {
MortalityRate <- ocmForState[,23]
}
df <- cbind(HospitalsInState, MortalityRate)
dfwona <- subset(df, df[,2] != "Not Available")
# order the data frame alphabetically by hospital names
d <- dfwona[order(dfwona[,1]),]
# again order the data frame based on mortality rate
df <- d[order(as.numeric(d[,2])),]
if (num == "best") {
return(df[[1,1]])
} else if (num == "worst") {
return(df[[nrow(df),1]])
} else {
return(df[[num,1]])
}
} | /rankhospital.R | no_license | neeraj-k/Rprogramming_Assignment3 | R | false | false | 1,135 | r |
rankhospital <- function(state, outcome, num) {
ocm <- read.csv("outcome-of-care-measures.csv", colClasses = "character")
ocmForState <- subset(ocm, ocm$State == state)
if (nrow(ocmForState) == 0) {
stop("invalid state")
}
if (num != "best" && num != "worst" && num > nrow(ocmForState)) {
return("NA")
stop()
}
if (outcome != "heart attack" && outcome != "heart failure" && outcome != "pneumonia") {
stop("invalid outcome")
}
HospitalsInState <- ocmForState$Hospital.Name
if (outcome == "heart attack") {
MortalityRate <- ocmForState[,11]
} else if (outcome == "heart failure") {
MortalityRate <- ocmForState[,17]
} else if (outcome == "pneumonia") {
MortalityRate <- ocmForState[,23]
}
df <- cbind(HospitalsInState, MortalityRate)
dfwona <- subset(df, df[,2] != "Not Available")
# order the data frame alphabetically by hospital names
d <- dfwona[order(dfwona[,1]),]
# again order the data frame based on mortality rate
df <- d[order(as.numeric(d[,2])),]
if (num == "best") {
return(df[[1,1]])
} else if (num == "worst") {
return(df[[nrow(df),1]])
} else {
return(df[[num,1]])
}
} |
###################################################################################
# ##
# ZEN 2014 Global eelgrass ecosystem structure: Data assembly ##
# RAW data are current as of 2017-04-24 ##
# Emmett Duffy (duffye@si.edu) ##
# updated 2022-06-28 by Matt Whalen ##
# ##
###################################################################################
###################################################################################
# TABLE OF CONTENTS #
# #
# METADATA #
# LOAD PACKAGES #
# READ IN AND PREPARE DATA #
# CREATE DERIVED VARIABLES #
# EXPLORE DISTRIBUTIONS OF VARIABLES (PLOT LEVEL) #
# LOG TRANSFORMS #
# OBTAIN SITE MEANS #
# PCA - ENVIRONMENTAL VARIABLES (GLOBAL) #
# PCA - ENVIRONMENTAL VARIABLES (ATLANTIC) #
# PCA - ENVIRONMENTAL VARIABLES (PACIFIC) #
# EXPLORE DATA COMPLETENESS #
# PCA - EELGRASS VARIABLES (GLOBAL) #
# CREATE SCALED VARIABLES #
# SUBSET DATA SETS BY GEOGRAPHY #
# OUTPUT CURATED DATA SETS #
# #
###################################################################################
###################################################################################
# METADATA #
###################################################################################
# This script assembles raw data from the ZEN 2014 global eelgrass ecosystem sampling
# project, and outputs data files for use in modeling and other applications. See also:
# ZEN_2014_model_comparison.R: for data exploration and model building
# ZEN_2014_figures.R series: for building figures for the MS
# Source data: For most of the history of this script I was using
# ZEN_2014_Site&PlotData_2016_05_17_Released.xlsx.
###################################################################################
# LOAD PACKAGES #
###################################################################################
# Load packages:
library(tidyverse) # for reformatting epibiota data
library(randomForest) # needed for data imputation
library(car) # needed or vif analysis
library(psych) # to visualize relationshiops in pairs panels
library(plyr) # to use ddply below in fixing richness values
###################################################################################
# READ AND PREPARE DATA #
###################################################################################
# MAIN ZEN 2014 DATA SET
# Read in summary data set for ZEN 2014:
d <- read.csv("data/input/Duffy_et_al_2022_main_data.csv", header = TRUE)
# General site data
sites <- read.csv("data/input/Duffy_et_al_2022_site_metadata.csv", header = TRUE)
# BIO-ORACLE CLIMATE AND ENVIRONMENTAL DATA
# Read in Bio-ORACLE and WorldClim environmental data for ZEN sites from Matt Whalen's script:
env <- read.csv("data/output/Duffy_et_al_2022_environmental.csv", header = TRUE)
# add in situ data
env.insitu <- read.csv("data/input/Duffy_et_al_2022_environmental_in_situ.csv") %>%
mutate(site=Site)
env <- left_join(env, env.insitu)
# EELGRASS GENETICS
d.gen_fca <- read.csv("data/input/Duffy_et_al_2022_FCA_scores.csv", header = TRUE)
# d.gen_fca_atlantic <- read.csv("data/input/ZEN_2014_fca_scores_atlantic_20210125_copy.csv", header = TRUE)
# d.gen_fca_pacific <- read.csv("data/input/ZEN_2014_fca_scores_pacific_20210125_copy.csv", header = TRUE)
#### CLEAN UP AND CONSOLIDATE
# Convert categorical variables to factors
d$Site.Code <- as.factor(d$Site.Code)
d$Ocean <- as.factor(d$Ocean)
# Rename Long Island sites
d$Site <- as.factor(d$Site)
levels(d$Site)[levels(d$Site)=="LI.1"] <- "LI.A"
levels(d$Site)[levels(d$Site)=="LI.2"] <- "LI.B"
# Rename misspelled or confusing variables
names(d)[names(d)=="Mean.Sheath.Width.cm."] <- "Zostera.sheath.width"
names(d)[names(d)=="Mean.Shealth.Length.cm."] <- "Zostera.sheath.length"
names(d)[names(d)=="Mean.Longest.Leaft.Length.cm."] <- "Zostera.longest.leaf.length"
names(d)[names(d)=="Mean.Above.Zmarina.g"] <- "Zostera.aboveground.mean.mass"
names(d)[names(d)=="Mean.Below.Zmarina.g"] <- "Zostera.belowground.mean.mass"
names(d)[names(d)=="Shoots.Zmarina.per.m2"] <- "Zostera.shoots.per.m2.core"
names(d)[names(d)=="Mean.Fetch"] <- "mean.fetch"
names(d)[names(d)=="PopDens2"] <- "pop.density.2015"
names(d)[names(d)=="mesograzer.total.site.richness"] <- "grazer.richness.site"
# MESOGRAZER SITE RICHNESS: FIX MISSING VALUES
# Create vector of plots with missing values to see what is missing:
missing.richness <- d[is.na(d$grazer.richness.site), c(3,7)] # columns 3 and 7 are Site, Unique.ID
# replace all site richness values with "mean" for that site. First, create vector of means:
temp <- d %>%
group_by( Site) %>%
summarize( grazer.richness.site = mean(grazer.richness.site, na.rm = T))
# But CR.A has NO mesograzers at all so returns NaN. Assume species pool is same as for CR.B (S = 3) and replace:
# temp$grazer.richness.site[is.na(temp$grazer.richness.site)] <- 3 # CR.A grazer richness now = 3
temp$grazer.richness.site[temp$Site == "CR.A" ] <- 3 # CR.A grazer richness now = 3
d$grazer.richness.site <- temp$grazer.richness.site[match(d$Site, temp$Site)]
# Add BioOracle environmental data to main ZEN dataframe:
d$sst.min <- env$sstmin[match(d$Site, env$Site)]
d$sst.mean <- env$sstmean[match(d$Site, env$Site)]
d$sst.max <- env$sstmax[match(d$Site, env$Site)]
d$sst.range <- env$sstrange[match(d$Site, env$Site)]
d$chlomean <- env$chlomean[match(d$Site, env$Site)]
d$nitrate <- env$nitrate[match(d$Site, env$Site)]
d$parmean <- env$parmean[match(d$Site, env$Site)]
d$cloudmean <- env$cloudmean[match(d$Site, env$Site)]
d$day.length <- env$Day.length.hours[match(d$Site, env$Site)]
d$ph <- env$ph[match(d$Site, env$Site)]
d$phosphate <- env$phosphate[match(d$Site, env$Site)]
d$salinity <- env$salinity[match(d$Site, env$Site)]
d$precipitation <- env$precip[match(d$Site, env$Site)]
# Reorder variables 'Coast': WP to EA
d$Coast <- as.factor(d$Coast)
d$Coast <- factor(d$Coast, levels = c("West Pacific", "East Pacific", "West Atlantic", "East Atlantic"))
###################################################################################
# CREATE DERIVED VARIABLES #
###################################################################################
# Percentage of crustaceans and gastropods among the mesograzers
d$crust.pct.mass <- d$Malacostraca.mesograzer.plot.biomass.std.mg.g / d$mesograzer.total.plot.biomass.std.mg.g
d$gast.pct.mass <- d$Gastropoda.mesograzer.plot.biomass.std.mg.g / d$mesograzer.total.plot.biomass.std.mg.g
# grazer and periphyton nunmbers per unit bottom area (i.e., core)
d$mesograzer.abund.per.area <- d$mesograzer.total.plot.abund.std.g * d$Zostera.aboveground.mean.mass
d$crustacean.mass.per.area <- d$Malacostraca.mesograzer.plot.biomass.std.mg.g * d$Zostera.aboveground.mean.mass
d$gastropod.mass.per.area <- d$Gastropoda.mesograzer.plot.biomass.std.mg.g * d$Zostera.aboveground.mean.mass
d$mesograzer.mass.per.area <- d$mesograzer.total.plot.biomass.std.mg.g * d$Zostera.aboveground.mean.mass
d$periphyton.mass.per.area <- d$periphyton.mass.per.g.zostera * d$Zostera.aboveground.mean.mass
# Leaf C:N ratio
d$leaf.CN.ratio <- d$Leaf.PercC / d$Leaf.PercN
###################################################################################
# EXPLORE DISTRIBUTIONS OF VARIABLES (PLOT LEVEL) #
###################################################################################
# Examine frequency distribution of sites by environmental factor
# par(mfrow = c(1,1))
# par(mfrow = c(2,4))
# hist(d$Latitude, col = "cyan", main = "Surveys by latitude")
# hist(d$Longitude, col = "cyan", main = "Surveys by longitude")
# hist(d$Temperature.C, col = "cyan", main = "Surveys by temperature")
# hist(d$Salinity.ppt, col = "cyan", main = "Surveys by salinity")
# hist(d$pop.density.2015, col = "cyan", main = "Surveys by population density")
# hist(d$day.length, col = "cyan", main = "Surveys by day length")
# hist(d$mean.fetch, col = "cyan", main = "Surveys by mean fetch")
#
# hist(d$Zostera.aboveground.mean.mass, col = "cyan", main = "Surveys by Zostera AG biomass")
# hist(d$periphyton.mass.per.g.zostera, col = "cyan", main = "Surveys by periphyton biomass")
# hist(d$Malacostraca.mesograzer.plot.abund.std.g, col = "cyan", main = "Surveys by crustacean biomass")
# hist(d$Gastropoda.mesograzer.plot.biomass.std.mg.g, col = "cyan", main = "Surveys by gastropod biomass")
# hist(d$grazer.richness.site, col = "cyan", main = "Surveys by mesograzer richness")
#
# hist(d$mesograzer.total.plot.biomass.std.mg.g, col = "cyan", main = "Surveys by mesograzer biomass")
# hist(d$epifauna.total.plot.biomass.std.mg.g, col = "cyan", main = "Surveys by mobile epifauna biomass")
#
###################################################################################
# LOG TRANSFORMS #
###################################################################################
# NOTE: For many variables I add a constant roughly equal to the smallest value recorded
d$log10.Zostera.AG.mass <- log10(d$Zostera.aboveground.mean.mass + 1)
d$log10.Zostera.BG.mass <- log10(d$Zostera.belowground.mean.mass + 1)
d$log10.Zostera.shoots.core <- log10(d$Zostera.shoots.per.m2.core)
d$log10.Zostera.sheath.width <- log10(d$Zostera.sheath.width)
d$log10.Zostera.sheath.length <- log10(d$Zostera.sheath.length)
d$log10.Zostera.longest.leaf.length <- log10(d$Zostera.longest.leaf.length)
d$log10.epibiota.filter <- log10(d$epibiota.filter)
d$log10.epibiota.zostera.marina <- log10(d$epibiota.zostera.marina)
d$log10.periphyton.mass.per.g.zostera <- log10(d$periphyton.mass.per.g.zostera + 0.001)
d$log10.periphyton.mass.per.area <- log10(d$periphyton.mass.per.area + 0.1)
d$log10.mesograzer.abund.per.g.plant <- log10(d$mesograzer.total.plot.abund.std.g + 0.01)
d$log10.crustacean.abund.per.g.plant <- log10(d$Malacostraca.mesograzer.plot.abund.std.g + 0.01)
d$log10.gastropod.abund.per.g.plant <- log10(d$Gastropoda.mesograzer.plot.abund.std.g + 0.01)
d$log10.mesograzer.mass.per.g.plant <- log10(d$mesograzer.total.plot.biomass.std.mg.g + 0.01)
d$log10.crustacean.mass.per.g.plant <- log10(d$Malacostraca.mesograzer.plot.biomass.std.mg.g + 0.01)
d$log10.gastropod.mass.per.g.plant <- log10(d$Gastropoda.mesograzer.plot.biomass.std.mg.g + 0.01)
d$log10.mesograzer.abund.per.area <- log10(d$mesograzer.abund.per.area + 1)
d$log10.crustacean.mass.per.area <- log10(d$crustacean.mass.per.area + 1)
d$log10.gastropod.mass.per.area <- log10(d$gastropod.mass.per.area + 1)
d$log10.mesograzer.mass.per.area <- log10(d$mesograzer.mass.per.area + 1)
d$log10.grazer.richness.site <- log10(d$grazer.richness.site + 1)
d$log10.day.length <- log10(d$day.length)
d$log10.Leaf.PercN <- log10(d$Leaf.PercN)
d$sqrt.nitrate <- sqrt(d$nitrate)
d$log10.phosphate <- log10(d$phosphate)
d$log10.chlomean <- log10(d$chlomean)
d$log10.mean.fetch <- log10(d$mean.fetch)
# hist(d$nitrate)
# hist(d$sqrt.nitrate)
#
# hist(d$log10.Zostera.AG.mass)
#
# Change values of NaN to NA:
d[d == "NaN"] = NA
###################################################################################
# OBTAIN SITE MEANS #
###################################################################################
# CAN THIS GO AFTER IMPUTATION SECTION? SHOULD IT?
# Obtain mean values per site
site_means <- d %>%
group_by(Site) %>%
dplyr::summarize( Zostera.AG.mass.site = mean(Zostera.aboveground.mean.mass, na.rm = T),
Zostera.BG.mass.site = mean(Zostera.belowground.mean.mass, na.rm = T),
Zostera.shoots.core.site = mean(Zostera.shoots.per.m2.core, na.rm = T),
Zostera.sheath.width.site = mean(Zostera.sheath.width, na.rm = T),
Zostera.sheath.length.site = mean(Zostera.sheath.length, na.rm = T),
Zostera.longest.leaf.length.site = mean(Zostera.longest.leaf.length, na.rm = T),
epibiota.filter.site = mean(epibiota.filter, na.rm = T),
epibiota.zostera.marina.site = mean(epibiota.zostera.marina, na.rm = T),
periphyton.mass.per.g.zostera.site = mean(periphyton.mass.per.g.zostera, na.rm = T),
mesograzer.abund.per.g.plant.site = mean(mesograzer.total.plot.abund.std.g, na.rm = T),
crustacean.abund.per.g.plant.site = mean(Malacostraca.mesograzer.plot.abund.std.g, na.rm = T),
gastropod.abund.per.g.plant.site = mean(Gastropoda.mesograzer.plot.abund.std.g, na.rm = T),
mesograzer.mass.per.g.plant.site = mean(mesograzer.total.plot.biomass.std.mg.g, na.rm = T),
crustacean.mass.per.g.plant.site = mean(Malacostraca.mesograzer.plot.biomass.std.mg.g, na.rm = T),
gastropod.mass.per.g.plant.site = mean(Gastropoda.mesograzer.plot.biomass.std.mg.g, na.rm = T),
mesograzer.mass.per.area.site = mean(mesograzer.mass.per.area, na.rm = T),
crustacean.mass.per.area.site = mean(crustacean.mass.per.area, na.rm = T),
gastropod.mass.per.area.site = mean(gastropod.mass.per.area, na.rm = T),
periphyton.mass.per.area.site = mean(periphyton.mass.per.area, na.rm = T),
log10.grazer.richness.site = mean(log10.grazer.richness.site, na.rm = T),
crust.pct.mass.site = mean(crust.pct.mass, na.rm = T),
gast.pct.mass.site = mean(gast.pct.mass, na.rm = T),
Leaf.PercN.site = mean(Leaf.PercN, na.rm = T),
leaf.CN.ratio.site = mean(leaf.CN.ratio, na.rm = T),
log10.Zostera.AG.mass.site = mean(log10.Zostera.AG.mass, na.rm = T),
log10.Zostera.BG.mass.site = mean(log10.Zostera.BG.mass, na.rm = T),
log10.Zostera.shoots.core.site = mean(log10.Zostera.shoots.core, na.rm = T),
log10.Zostera.sheath.width.site = mean(log10.Zostera.sheath.width, na.rm = T),
log10.Zostera.sheath.length.site = mean(log10.Zostera.sheath.length, na.rm = T),
log10.Zostera.longest.leaf.length.cm.site = mean(log10.Zostera.longest.leaf.length, na.rm = T),
log10.periphyton.mass.per.g.zostera.site = mean(log10.periphyton.mass.per.g.zostera, na.rm = T),
log10.mesograzer.abund.per.g.plant.site = mean(log10.mesograzer.abund.per.g.plant, na.rm = T),
log10.crustacean.abund.per.g.plant.site = mean(log10.crustacean.abund.per.g.plant, na.rm = T),
log10.gastropod.abund.per.g.plant.site = mean(log10.gastropod.abund.per.g.plant, na.rm = T),
log10.mesograzer.mass.per.g.plant.site = mean(log10.mesograzer.mass.per.g.plant, na.rm = T),
log10.crustacean.mass.per.g.plant.site = mean(log10.crustacean.mass.per.g.plant, na.rm = T),
log10.gastropod.mass.per.g.plant.site = mean(log10.gastropod.mass.per.g.plant, na.rm = T),
log10.mesograzer.abund.per.area.site = mean(log10.mesograzer.abund.per.area, na.rm = T),
log10.mesograzer.mass.per.area.site = mean(log10.mesograzer.mass.per.area, na.rm = T),
log10.crustacean.mass.per.area.site = mean(log10.crustacean.mass.per.area, na.rm = T),
log10.gastropod.mass.per.area.site = mean(log10.gastropod.mass.per.area, na.rm = T),
log10.periphyton.mass.per.area.site = mean(log10.periphyton.mass.per.area, na.rm = T),
log10.Leaf.PercN.site = mean(log10.Leaf.PercN, na.rm = T) )
site_means$grazer.richness.site <- d$grazer.richness.site[match(site_means$Site, d$Site)]
# Change values of NaN to NA:
site_means[site_means == "NaN"] = NA
# Add site-level environmental (and other) variables back in
site_means$Ocean <- d$Ocean[match(site_means$Site, d$Site)]
site_means$Coast <- d$Coast[match(site_means$Site, d$Site)]
site_means$Latitude <- d$Latitude[match(site_means$Site, d$Site)]
site_means$Longitude <- d$Longitude[match(site_means$Site, d$Site)]
site_means$Temperature.C <- d$Temperature.C[match(site_means$Site, d$Site)]
site_means$Salinity.ppt <- d$Salinity.ppt[match(site_means$Site, d$Site)]
site_means$log10.mean.fetch <- d$log10.mean.fetch[match(site_means$Site, d$Site)]
site_means$day.length <- d$day.length[match(site_means$Site, d$Site)]
site_means$log10.day.length <- d$log10.day.length[match(site_means$Site, d$Site)]
site_means$sst.min <- d$sst.min[match(site_means$Site, d$Site)]
site_means$sst.mean <- d$sst.mean[match(site_means$Site, d$Site)]
site_means$sst.max <- d$sst.max[match(site_means$Site, d$Site)]
site_means$sst.range <- d$sst.range[match(site_means$Site, d$Site)]
site_means$salinity <- d$salinity[match(site_means$Site, d$Site)]
site_means$parmean <- d$parmean[match(site_means$Site, d$Site)]
site_means$cloudmean <- d$cloudmean[match(site_means$Site, d$Site)]
site_means$precipitation <- d$precipitation[match(site_means$Site, d$Site)]
site_means$nitrate <- d$nitrate[match(site_means$Site, d$Site)]
site_means$sqrt.nitrate <- d$sqrt.nitrate[match(site_means$Site, d$Site)]
site_means$ph <- d$ph[match(site_means$Site, d$Site)]
site_means$phosphate <- d$phosphate[match(site_means$Site, d$Site)]
site_means$log10.phosphate <- d$log10.phosphate[match(site_means$Site, d$Site)]
site_means$NP.ratio <- d$NP.ratio[match(site_means$Site, d$Site)]
site_means$chlomean <- d$chlomean[match(site_means$Site, d$Site)]
site_means$log10.chlomean <- d$log10.chlomean[match(site_means$Site, d$Site)]
site_means$pop.density.2015 <- d$pop.density.2015[match(site_means$Site, d$Site)]
# Add genetic data to site means data frame
site_means$FC1 <- d.gen_fca$FC1[match(site_means$Site, d.gen_fca$Site)]
site_means$FC2 <- d.gen_fca$FC2[match(site_means$Site, d.gen_fca$Site)]
# For boxplots, reorder variable 'Coast': WP to EA
site_means$Coast <- factor(site_means$Coast, levels = c("West Pacific", "East Pacific", "West Atlantic", "East Atlantic"))
# Create separate data sets by Ocean - SITE level
site_means_Atlantic <- droplevels(subset(site_means, Ocean == "Atlantic"))
site_means_Pacific <- droplevels(subset(site_means, Ocean == "Pacific"))
site_means_49_Atlantic <- droplevels(subset(site_means_Atlantic, Site != "SW.A"))
###################################################################################
# PCA - ENVIRONMENTAL VARIABLES (GLOBAL) #
###################################################################################
# # Explore correlations among environmental drivers
# pairs.panels(site_means[,c("Latitude", "sst.mean", "sst.range", "sst.min", "sst.max", "Salinity.ppt",
# "parmean", "log10.day.length", "cloudmean", "precipitation", "sqrt.nitrate", "log10.phosphate", "log10.chlomean",
# "Leaf.PercN.site", "log10.mean.fetch")],
# smooth=T,density=F,ellipses=F,lm=F,digits=2,scale=F, cex.cor = 8)
# Create data frame containing the ZEN 2014 environmental variables for PCA
# Note: Some exploration shows that nitrate is closely correlated with several other
# variables, and taking it out results in first 3 PC axes explaining ~75% of variation. This
# is parsimonious and simplifies the analysis.
ZEN.env <- site_means[c("sst.mean", "sst.range", "Salinity.ppt", "parmean",
"cloudmean", "log10.phosphate", "log10.chlomean", "Leaf.PercN.site"
# , "precipitation", "log10.day.length",
)]
ZEN.sites <- site_means[c("Site")]
# Compute PCAs
ZEN.env.pca <- prcomp(ZEN.env, center = TRUE, scale. = TRUE)
# print(ZEN.env.pca)
# PC1 PC2 PC3 PC4 PC5 PC6 PC7 PC8
# sst.mean 0.5344090 -0.04221968 0.12650153 -0.2221002 0.11595693 -0.56707288 0.49861640 0.25230424
# sst.range -0.1607624 -0.40262794 0.45918615 -0.4862507 0.41315371 0.25966358 -0.15719348 0.31925476
# Salinity.ppt 0.3702257 0.16135868 -0.48106388 -0.4651378 0.05646463 -0.08442206 -0.61172656 0.06779392
# parmean 0.4076216 0.22572201 0.39507514 0.3928616 -0.25219684 0.21903419 -0.29892746 0.52108800
# cloudmean -0.4937825 -0.21507910 -0.27382435 0.1300389 -0.18748290 -0.44075941 -0.12798127 0.61010822
# log10.phosphate -0.2101797 0.54450089 -0.13760560 -0.4243534 -0.22277173 0.36170941 0.41340358 0.33010411
# log10.chlomean -0.2566312 0.34762747 0.53996106 -0.2846051 -0.31346195 -0.45082306 -0.26740350 -0.26025590
# Leaf.PercN.site -0.1774368 0.54363232 0.01286878 0.2560322 0.75235033 -0.16600039 -0.06571552 0.09672818
# Interpretation:
# PCe1: latitude/climate: high = warmer, brighter, less cloudy (lower latitude)
# PCe2: nutrient status: high = high PO4, leaf N
# PCe3: estuarine: low salinity, variable temp, high chl
# # plot cumulative proportion of variance explained by PC axes
# plot(ZEN.env.pca, type = "l")
# # Calculate proportion of variance explained by each PC
# summary(ZEN.env.pca)
# PC1 PC2 PC3 PC4 PC5 PC6 PC7 PC8
# Standard deviation 1.6849 1.4240 1.1552 0.9516 0.65646 0.48125 0.36494 0.3124
# Proportion of Variance 0.3549 0.2535 0.1668 0.1132 0.05387 0.02895 0.01665 0.0122
# Cumulative Proportion 0.3549 0.6083 0.7751 0.8883 0.94220 0.97115 0.98780 1.0000
# Combine PCA scores with SITE-level data frame
site.env.pca.scores <- ZEN.env.pca$x
site.env.pca.scores <- cbind(ZEN.sites, site.env.pca.scores)
site_means <- cbind(site_means, site.env.pca.scores)
# Rename PCA variables 1-3 and cull PC4-7
names(site_means)[names(site_means)=="PC1"] <- "PC1.env.global"
names(site_means)[names(site_means)=="PC2"] <- "PC2.env.global"
names(site_means)[names(site_means)=="PC3"] <- "PC3.env.global"
site_means <- subset(site_means, select = -c(PC4,PC5,PC6, PC7, PC8))
###################################################################################
# PCA - ENVIRONMENTAL VARIABLES (ATLANTIC) #
###################################################################################
# # Explore correlations among environmental drivers
# pairs.panels(site_means_Atlantic[,c("sst.mean", "sst.range", "Salinity.ppt", "parmean",
# "cloudmean", "log10.phosphate", "log10.chlomean", "Leaf.PercN.site"
# # , "precipitation", "log10.day.length"
# )],
# smooth=T,density=F,ellipses=F,lm=F,digits=2,scale=F, cex.cor = 8)
# Create data frame containing the ZEN 2014 environmental variables for PCA
# Note: Some exploration shows that nitrate is closely corrtelated with several other
# variables, and taking it out results in first 3 PC axes explaining ~75% of variation. This
# is parsimonious and simplifies the analysis.
ZEN.env.atl <- site_means_Atlantic[c("sst.mean", "sst.range", "Salinity.ppt", "parmean",
"cloudmean", "log10.phosphate", "log10.chlomean", "Leaf.PercN.site"
# , "precipitation", "log10.day.length"
)]
ZEN.sites.atl <- site_means_Atlantic[c("Site")]
# Compute PCAs
ZEN.env.pca.atl <- prcomp(ZEN.env.atl, center = TRUE, scale. = TRUE)
# print(ZEN.env.pca.atl)
# PC1 PC2 PC3 PC4 PC5 PC6 PC7 PC8
# sst.mean -0.550319750 0.07256028 -0.14266055 0.01964309 -0.26247919 0.50693440 0.34783455 0.47358063
# sst.range 0.008028728 0.53243059 0.13905815 0.56739502 -0.43108238 -0.27143385 -0.27518892 0.19985403
# Salinity.ppt -0.312338254 -0.33887929 -0.52503373 0.18367192 -0.01847826 0.09370635 -0.67915383 -0.08853019
# parmean -0.307553079 0.44084782 0.04824442 -0.51027750 0.40436656 -0.23475705 -0.34111047 0.33671071
# cloudmean 0.486920633 -0.28474069 0.27891671 0.01450237 0.05975327 0.32618638 -0.33098360 0.61992583
# log10.phosphate 0.294237976 0.02478199 -0.66842063 0.18661880 0.25670169 -0.33170669 0.31530953 0.39478092
# log10.chlomean 0.265024764 0.54345377 -0.20872625 0.13627880 0.32327853 0.62268122 -0.08243877 -0.27063675
# Leaf.PercN.site 0.333217372 0.15821912 -0.33789872 -0.57441251 -0.63831315 0.03592546 -0.09954655 -0.03411444
# Interpretation:
# PCe1: latitude/climate: high = cooler, cloudier
# PCe2: estuarine/eutrophic: high = high phytoplankton, variable temperature, bright, lowish salinity
# PCe3: arid watershed? oligotrophic Baltic?: high = low salinity, low PO4
# # plot cumulative proportion of variance explained by PC axes
# plot(ZEN.env.pca.atl, type = "l")
# # Calculate proportion of variance explained by each PC
# summary(ZEN.env.pca.atl)
# PC1 PC2 PC3 PC4 PC5 PC6 PC7 PC8
# Standard deviation 1.6778 1.4182 1.2097 0.9687 0.62444 0.46015 0.35168 0.21673
# Proportion of Variance 0.3519 0.2514 0.1829 0.1173 0.04874 0.02647 0.01546 0.00587
# Cumulative Proportion 0.3519 0.6033 0.7862 0.9035 0.95220 0.97867 0.99413 1.00000
# Output PCA scores for each site and combine with site means data frame
site.env.pca.scores.atl <- ZEN.env.pca.atl$x
site.env.pca.scores.atl <- cbind(ZEN.sites.atl, site.env.pca.scores.atl)
site_means_Atlantic <- cbind(site_means_Atlantic, site.env.pca.scores.atl)
# Rename PCA variables 1-3 and cull PC4-7
names(site_means_Atlantic)[names(site_means_Atlantic)=="PC1"] <- "PC1.env.atl"
names(site_means_Atlantic)[names(site_means_Atlantic)=="PC2"] <- "PC2.env.atl"
names(site_means_Atlantic)[names(site_means_Atlantic)=="PC3"] <- "PC3.env.atl"
site_means_Atlantic <- subset(site_means_Atlantic, select = -c(PC4,PC5,PC6, PC7, PC8))
###################################################################################
# PCA - ENVIRONMENTAL VARIABLES (PACIFIC) #
###################################################################################
# # Explore correlations among environmental drivers
# pairs.panels(site_means_Pacific[,c("Latitude", "sst.mean", "sst.range", "sst.min", "sst.max", "Salinity.ppt",
# "parmean", "log10.day.length", "cloudmean", "precipitation", "sqrt.nitrate", "log10.phosphate", "log10.chlomean",
# "Leaf.PercN.site", "log10.mean.fetch")],
# smooth=T,density=F,ellipses=F,lm=F,digits=2,scale=F, cex.cor = 8)
# Create data frame containing the ZEN 2014 environmental variables for PCA
# Note: Some exploration shows that nitrate is closely correlated with several other
# variables, and taking it out results in first 3 PC axes explaining ~75% of variation. This
# is parsimonious and simplifies the analysis.
ZEN.env.pac <- site_means_Pacific[c("sst.mean", "sst.range", "Salinity.ppt", "parmean",
"cloudmean", "log10.phosphate", "log10.chlomean", "Leaf.PercN.site"
# , "precipitation", "log10.day.length"
)]
ZEN.sites.pac <- site_means_Pacific[c("Site")]
# Compute PCAs
ZEN.env.pca.pac <- prcomp(ZEN.env.pac, center = TRUE, scale. = TRUE)
# print(ZEN.env.pca.pac)
# PC1 PC2 PC3 PC4 PC5 PC6 PC7 PC8
# sst.mean 0.4416493 -0.14998580 0.38471592 -0.09795308 0.11434105 -0.46072831 0.20973408 0.59625174
# sst.range -0.1192591 -0.58280840 0.13287760 -0.61360069 -0.05905439 0.33920457 0.35256555 -0.09539264
# Salinity.ppt 0.4002213 0.04551641 -0.50374668 0.10047825 0.56645013 0.33137386 0.37126335 0.07337350
# parmean 0.4058142 0.32386570 0.34788599 0.04747739 -0.21892434 -0.03638956 0.46415902 -0.58519351
# cloudmean -0.3739858 -0.36483629 -0.19281131 0.31505264 0.17158648 -0.57553674 0.40395943 -0.25831575
# log10.phosphate -0.4215990 0.32143191 0.04272324 0.20357878 -0.29958265 0.25871215 0.55282804 0.46191518
# log10.chlomean -0.3422080 0.18681817 0.58063017 0.01610370 0.69496221 0.13777490 -0.03697576 -0.08532484
# Leaf.PercN.site -0.1764750 0.50946333 -0.28882340 -0.67866604 0.11171810 -0.37997422 0.09088341 -0.01326676
# Interpretation:
# PCe1: latitude/climate: high = warmer, brighter, higher salinity, lower PO4
# PCe2: nutrient status: high = high nutrients (especially leaf N), more stable temperature
# PCe3: estuarine/eutrophic: high = low salinity, high chl
# # plot cumulative proportion of variance explained by PC axes
# plot(ZEN.env.pca.pac, type = "l")
#
# # Calculate proportion of variance explained by each PC
# summary(ZEN.env.pca.pac)
# Standard deviation 1.9641 1.4390 0.9141 0.71060 0.62592 0.49046 0.24605 0.19570
# Proportion of Variance 0.4822 0.2588 0.1045 0.06312 0.04897 0.03007 0.00757 0.00479
# Cumulative Proportion 0.4822 0.7410 0.8455 0.90860 0.95758 0.98765 0.99521 1.00000
# Output PCA scores for each site and combine with site means data frame
site.env.pca.scores.pac <- ZEN.env.pca.pac$x
site.env.pca.scores.pac <- cbind(ZEN.sites.pac, site.env.pca.scores.pac)
site_means_Pacific <- cbind(site_means_Pacific, site.env.pca.scores.pac)
# Rename PCA variables 1-3 and cull PC4-7
names(site_means_Pacific)[names(site_means_Pacific)=="PC1"] <- "PC1.env.pac"
names(site_means_Pacific)[names(site_means_Pacific)=="PC2"] <- "PC2.env.pac"
names(site_means_Pacific)[names(site_means_Pacific)=="PC3"] <- "PC3.env.pac"
site_means_Pacific <- subset(site_means_Pacific, select = -c(PC4,PC5,PC6, PC7, PC8))
###################################################################################
# EXPLORE DATA COMPLETENESS #
###################################################################################
# NOTE: AIC comparisons among models are invalid unless exactly the same number of plots
# are used in each comparison, because the DF influences calculation of the AIC score.
# This means that we need data on all plots and need to impute missing data for
# valid AIC model comparisons.
# # How many observations are missing for each variable?
# sum(is.na(d$log10.Zostera.AG.mass)) # 24
# sum(is.na(d$log10.Zostera.shoots.core)) # 15
# sum(is.na(d$Zostera.longest.leaf.length)) # 0
# sum(is.na(d$Leaf.PercN)) # 14
# sum(is.na(d$Temperature.C)) # 0
# sum(is.na(d$Salinity.ppt)) # 0
# sum(is.na(d$pop.density.2015)) # 20 huh?
# sum(is.na(d$GenotypicRichness)) # 0
# sum(is.na(d$AllelicRichness)) # 0
# sum(is.na(d$grazer.richness.site)) # 0
# sum(is.na(d$log10.periphyton.mass.per.g.zostera)) # 4
# sum(is.na(d$log10.mesograzer.abund.per.g.plant)) # 9
# sum(is.na(d$log10.crustacean.abund.per.g.plant)) # 9
# sum(is.na(d$log10.gastropod.abund.per.g.plant)) # 9
# Look at percentage of values missing for each variable
# First create function to calculate % of missing values infor each variable in a data frame…
pMiss <- function(x){sum(is.na(x))/length(x)*100}
# # Now apply it to the data frame:
# apply(d,2,pMiss)
#
###################################################################################
# PCA - EELGRASS VARIABLES (GLOBAL) #
###################################################################################
# NOTE: The PCA for eelgrass morphology uses imputed data (see impute_missing/R)
d.imputed <- read.csv( "data/output/Duffy_et_al_2022_imputed.csv" )
# NOTE: This includes all available ZEN eelgrass morphological variables. We use the
# first two axes, which together explain 83% of the variation in input variables, under
# the (arbitrary) criterion of using those PC axes necessary to capture 75% of the variation.
## PCA - EELGRASS VARIABLES (PLOT LEVEL)
# Create data frame containing the ZEN 2014 eelgrass morphological variables
zos.morph.plot.2 <- d.imputed[c("log10.Zostera.AG.mass.imputed", "log10.Zostera.BG.mass.imputed",
"log10.Zostera.shoots.core.imputed", "log10.Zostera.sheath.length", "log10.Zostera.sheath.width", "log10.Zostera.longest.leaf.length")]
# Compute PCAs
zos.morph.plot.2.pca <- prcomp(zos.morph.plot.2, center = TRUE, scale. = TRUE)
print(zos.morph.plot.2.pca)
# PC1 PC2 PC3 PC4 PC5 PC6
# log10.Zostera.AG.mass.imputed -0.29772190 -0.58976969 0.16131419 -0.7076165 0.12385514 -0.14645813
# log10.Zostera.BG.mass.imputed 0.08114321 -0.67078182 -0.63774621 0.3664483 -0.03986877 0.02955342
# log10.Zostera.shoots.core.imputed 0.34930322 -0.42578505 0.70199747 0.3770211 0.20963800 0.13341998
# log10.Zostera.sheath.length -0.51441226 -0.05711932 0.21262143 0.4040899 -0.27044926 -0.67117666
# log10.Zostera.sheath.width -0.50068037 0.09723378 -0.08264182 0.2209389 0.81254579 0.15488847
# log10.Zostera.longest.leaf.length -0.51716912 -0.09062856 0.14973149 0.1036680 -0.45359545 0.69671169
# Interpretation:
# PCz1: growth form: high = short canopy, denser shoots
# PCz2: biomass: high values = low AG and especially BG biomass
# plot cumulative proportion of variance explained by PC axes
plot(zos.morph.plot.2.pca, type = "l")
# Calculate proportion of variance explained by each PC
summary(zos.morph.plot.2.pca)
# PC1 PC2 PC3 PC4 PC5 PC6
# Standard deviation 1.8230 1.2796 0.71769 0.48452 0.45114 0.29318
# Proportion of Variance 0.5539 0.2729 0.08585 0.03913 0.03392 0.01433
# Cumulative Proportion 0.5539 0.8268 0.91263 0.95175 0.98567 1.00000
# RESULT: First two PC axes explain 83% of variation in eelgrass morphology with ALL input variables.
# Output PCA scores and combine with plot data frame
zos.morph.plot.2.pca.scores <- zos.morph.plot.2.pca$x
d.imputed <- cbind(d.imputed, zos.morph.plot.2.pca.scores)
# Rename PCA variables 1-2 and cull PC3-4
names(d.imputed)[names(d.imputed)=="PC1"] <- "PC1.zos"
names(d.imputed)[names(d.imputed)=="PC2"] <- "PC2.zos"
d.imputed <- subset(d.imputed, select = -c(PC3,PC4,PC5,PC6))
# NOTE: IS THIS WHERE THIS SHOULD BE?
# Obtain mean values per site: Eelgrass growth form PCz1 and PCz2
add_means <- ddply(d.imputed, c("Site"), summarize,
PC1.zos.site = mean(PC1.zos, na.rm = T),
PC2.zos.site = mean(PC2.zos, na.rm = T)
)
# Add to site means data frame
site_means <- merge(site_means, add_means)
# Add to ocean data frames
site_means_Atlantic$PC1.zos.site <- site_means$PC1.zos.site[match(site_means_Atlantic$Site, site_means$Site)]
site_means_Atlantic$PC2.zos.site <- site_means$PC2.zos.site[match(site_means_Atlantic$Site, site_means$Site)]
site_means_Pacific$PC1.zos.site <- site_means$PC1.zos.site[match(site_means_Pacific$Site, site_means$Site)]
site_means_Pacific$PC2.zos.site <- site_means$PC2.zos.site[match(site_means_Pacific$Site, site_means$Site)]
site_means_49_Atlantic$PC1.zos.site <- site_means$PC1.zos.site[match(site_means_49_Atlantic$Site, site_means$Site)]
site_means_49_Atlantic$PC2.zos.site <- site_means$PC2.zos.site[match(site_means_49_Atlantic$Site, site_means$Site)]
###################################################################################
# CREATE SCALED VARIABLES #
###################################################################################
# Create function to standardize and center a variable by its range of observed values.
# The '...' allows it to work with NAs.
range01 <- function(x, ...){(x - min(x, na.rm = T, ...)) / (max(x, na.rm = T, ...) - min(x, na.rm = T, ...))}
# Combine PCA scores with PLOT-level data frame
site_means_49_Atlantic$PC1.env.global <- site_means$PC1.env.global[match(site_means_49_Atlantic$Site, site_means$Site)]
site_means_49_Atlantic$PC2.env.global <- site_means$PC2.env.global[match(site_means_49_Atlantic$Site, site_means$Site)]
site_means_49_Atlantic$PC3.env.global <- site_means$PC3.env.global[match(site_means_49_Atlantic$Site, site_means$Site)]
site_means_49_Atlantic$FC1 <- site_means$FC1[match(site_means_49_Atlantic$Site, site_means$Site)]
site_means_49_Atlantic$FC2 <- site_means$FC2[match(site_means_49_Atlantic$Site, site_means$Site)]
site_means_Pacific$PC1.env.global <- site_means$PC1.env.global[match(site_means_Pacific$Site, site_means$Site)]
site_means_Pacific$PC2.env.global <- site_means$PC2.env.global[match(site_means_Pacific$Site, site_means$Site)]
site_means_Pacific$PC3.env.global <- site_means$PC3.env.global[match(site_means_Pacific$Site, site_means$Site)]
site_means_Pacific$FC1 <- site_means$FC1[match(site_means_Pacific$Site, site_means$Site)]
site_means_Pacific$FC2 <- site_means$FC2[match(site_means_Pacific$Site, site_means$Site)]
# Create z-scaled variables: SITE level (GLOBAL)
site_means$zLatitude <- scale(site_means$Latitude)
site_means$zPC1.zos.site <- scale(site_means$PC1.zos.site)
site_means$zPC2.zos.site <- scale(site_means$PC2.zos.site)
site_means$zPC1.env.global <- scale(site_means$PC1.env.global)
site_means$zPC2.env.global <- scale(site_means$PC2.env.global)
site_means$zPC3.env.global <- scale(site_means$PC3.env.global)
site_means$zFC1 <- scale(site_means$FC1)
site_means$zFC2 <- scale(site_means$FC2)
site_means$zcanopy <- scale(site_means$log10.Zostera.longest.leaf.length.cm.site)
site_means$zshoots <- scale(site_means$log10.Zostera.shoots.core.site)
site_means$zagbiomass <- scale(site_means$log10.Zostera.AG.mass.site)
site_means$zbgbiomass <- scale(site_means$log10.Zostera.BG.mass.site)
site_means$zperiphyton <- scale(site_means$log10.periphyton.mass.per.area.site)
site_means$zperiphyton.perg <- scale(site_means$log10.periphyton.mass.per.g.zostera.site)
site_means$zmesograzer.mass <- scale(site_means$log10.mesograzer.mass.per.area.site)
site_means$zmesograzer.mass.perg <- scale(site_means$log10.mesograzer.mass.per.g.plant.site)
site_means$zmesograzer.abund <- scale(site_means$log10.mesograzer.abund.per.area.site)
site_means$zmesograzer.abund.perg <- scale(site_means$log10.mesograzer.abund.per.g.plant.site)
# Create RANGE-scaled variables: SITE level (GLOBAL)
site_means$rLatitude <- range01(site_means$Latitude)
site_means$rPC1.zos.site <- range01(site_means$PC1.zos.site)
site_means$rPC2.zos.site <- range01(site_means$PC2.zos.site)
site_means$rPC1.env.global <- range01(site_means$PC1.env.global)
site_means$rPC2.env.global <- range01(site_means$PC2.env.global)
site_means$rPC3.env.global <- range01(site_means$PC3.env.global)
site_means$rFC1 <- range01(site_means$FC1)
site_means$rFC2 <- range01(site_means$FC2)
site_means$rcanopy <- range01(site_means$log10.Zostera.longest.leaf.length.cm.site)
site_means$rshoots <- range01(site_means$log10.Zostera.shoots.core.site)
site_means$ragbiomass <- range01(site_means$log10.Zostera.AG.mass.site)
site_means$rbgbiomass <- range01(site_means$log10.Zostera.BG.mass.site)
site_means$rperiphyton <- range01(site_means$log10.periphyton.mass.per.area.site)
site_means$rperiphyton.perg <- range01(site_means$log10.periphyton.mass.per.g.zostera.site)
site_means$rmesograzer.mass <- range01(site_means$log10.mesograzer.mass.per.area.site)
site_means$rmesograzer.mass.perg <- range01(site_means$log10.mesograzer.mass.per.g.plant.site)
site_means$rmesograzer.abund <- range01(site_means$log10.mesograzer.abund.per.area.site)
site_means$rmesograzer.abund.perg <- range01(site_means$log10.mesograzer.abund.per.g.plant.site)
# Create z-scaled variables: SITE level (ATLANTIC 49)
# This data set scales the variables using only Atlantic values. Omit SW.A as the plot-level data set does.
site_means_49_Atlantic$zLatitude.atl <- scale(site_means_49_Atlantic$Latitude, scale = TRUE, center = TRUE)
site_means_49_Atlantic$zPC1.zos.atl <- scale(site_means_49_Atlantic$PC1.zos.site)
site_means_49_Atlantic$zPC2.zos.atl <- scale(site_means_49_Atlantic$PC2.zos.site)
site_means_49_Atlantic$zPC1.env.global.atl <- scale(site_means_49_Atlantic$PC1.env.global)
site_means_49_Atlantic$zPC2.env.global.atl <- scale(site_means_49_Atlantic$PC2.env.global)
site_means_49_Atlantic$zPC3.env.global.atl <- scale(site_means_49_Atlantic$PC3.env.global)
site_means_49_Atlantic$zFC1.global.atl <- scale(site_means_49_Atlantic$FC1)
site_means_49_Atlantic$zFC2.global.atl <- scale(site_means_49_Atlantic$FC2)
site_means_Atlantic$zPC1.env.atl <- scale(site_means_Atlantic$PC1.env.atl)
site_means_Atlantic$zPC2.env.atl <- scale(site_means_Atlantic$PC2.env.atl)
site_means_Atlantic$zPC3.env.atl <- scale(site_means_Atlantic$PC3.env.atl)
site_means_49_Atlantic$zperiphyton.area.atl <- scale(site_means_49_Atlantic$log10.periphyton.mass.per.area.site)
site_means_49_Atlantic$zperiphyton.perg.atl <- scale(site_means_49_Atlantic$log10.periphyton.mass.per.g.zostera.site)
site_means_49_Atlantic$zmesograzer.mass.area.atl <- scale(site_means_49_Atlantic$log10.mesograzer.mass.per.area.site)
site_means_49_Atlantic$zmesograzer.mass.perg.atl <- scale(site_means_49_Atlantic$log10.mesograzer.mass.per.g.plant.site)
################################################################################
# Create RANGE-scaled variables: SITE level (ATLANTIC 49)
# This data set scales the variables using only Atlantic values. Omit SW.A as the plot-level data set does.
site_means_49_Atlantic$rLatitude.atl <- range01(site_means_49_Atlantic$Latitude)
site_means_49_Atlantic$rPC1.zos.atl <- range01(site_means_49_Atlantic$PC1.zos.site)
site_means_49_Atlantic$rPC2.zos.atl <- range01(site_means_49_Atlantic$PC2.zos.site)
site_means_49_Atlantic$rPC1.env.global.atl <- range01(site_means_49_Atlantic$PC1.env.global)
site_means_49_Atlantic$rPC2.env.global.atl <- range01(site_means_49_Atlantic$PC2.env.global)
site_means_49_Atlantic$rPC3.env.global.atl <- range01(site_means_49_Atlantic$PC3.env.global)
site_means_49_Atlantic$rFC1.global.atl <- range01(site_means_49_Atlantic$FC1)
site_means_49_Atlantic$rFC2.global.atl <- range01(site_means_49_Atlantic$FC2)
site_means_Atlantic$rPC1.env.atl <- range01(site_means_Atlantic$PC1.env.atl)
site_means_Atlantic$rPC2.env.atl <- range01(site_means_Atlantic$PC2.env.atl)
site_means_Atlantic$rPC3.env.atl <- range01(site_means_Atlantic$PC3.env.atl)
site_means_49_Atlantic$rperiphyton.area.atl <- range01(site_means_49_Atlantic$log10.periphyton.mass.per.area.site)
site_means_49_Atlantic$rperiphyton.perg.atl <- range01(site_means_49_Atlantic$log10.periphyton.mass.per.g.zostera.site)
site_means_49_Atlantic$rmesograzer.mass.area.atl <- range01(site_means_49_Atlantic$log10.mesograzer.mass.per.area.site)
site_means_49_Atlantic$rmesograzer.mass.perg.atl <- range01(site_means_49_Atlantic$log10.mesograzer.mass.per.g.plant.site)
# Create z-scaled variables: SITE level (PACIFIC)
# This data set scales the variables using only Pacific values.
site_means_Pacific$zLatitude.pac <- scale(site_means_Pacific$Latitude, scale = TRUE, center = TRUE)
site_means_Pacific$zPC1.zos.pac <- scale(site_means_Pacific$PC1.zos.site)
site_means_Pacific$zPC2.zos.pac <- scale(site_means_Pacific$PC2.zos.site)
site_means_Pacific$zPC1.env.global.pac <- scale(site_means_Pacific$PC1.env.global)
site_means_Pacific$zPC2.env.global.pac <- scale(site_means_Pacific$PC2.env.global)
site_means_Pacific$zPC3.env.global.pac <- scale(site_means_Pacific$PC3.env.global)
site_means_Pacific$zFC1.global.pac <- scale(site_means_Pacific$FC1)
site_means_Pacific$zFC2.global.pac <- scale(site_means_Pacific$FC2)
site_means_Pacific$zPC1.env.pac <- scale(site_means_Pacific$PC1.env.pac)
site_means_Pacific$zPC2.env.pac <- scale(site_means_Pacific$PC2.env.pac)
site_means_Pacific$zPC3.env.pac <- scale(site_means_Pacific$PC3.env.pac)
site_means_Pacific$zperiphyton.area.pac <- scale(site_means_Pacific$log10.periphyton.mass.per.area.site)
site_means_Pacific$zperiphyton.perg.pac <- scale(site_means_Pacific$log10.periphyton.mass.per.g.zostera.site)
site_means_Pacific$zmesograzer.mass.area.pac <- scale(site_means_Pacific$log10.mesograzer.mass.per.area.site)
site_means_Pacific$zmesograzer.mass.perg.pac <- scale(site_means_Pacific$log10.mesograzer.mass.per.g.plant.site)
# Create RANGE-scaled variables: SITE level (PACIFIC)
# This data set scales the variables using only Pacific values.
site_means_Pacific$rLatitude.pac <- range01(site_means_Pacific$Latitude)
site_means_Pacific$rPC1.zos.pac <- range01(site_means_Pacific$PC1.zos.site)
site_means_Pacific$rPC2.zos.pac <- range01(site_means_Pacific$PC2.zos.site)
site_means_Pacific$rPC1.env.global.pac <- range01(site_means_Pacific$PC1.env.global)
site_means_Pacific$rPC2.env.global.pac <- range01(site_means_Pacific$PC2.env.global)
site_means_Pacific$rPC3.env.global.pac <- range01(site_means_Pacific$PC3.env.global)
site_means_Pacific$rFC1.global.pac <- range01(site_means_Pacific$FC1)
site_means_Pacific$rFC2.global.pac <- range01(site_means_Pacific$FC2)
site_means_Pacific$rPC1.env.pac <- range01(site_means_Pacific$PC1.env.pac)
site_means_Pacific$rPC2.env.pac <- range01(site_means_Pacific$PC2.env.pac)
site_means_Pacific$rPC3.env.pac <- range01(site_means_Pacific$PC3.env.pac)
site_means_Pacific$rperiphyton.area.pac <- range01(site_means_Pacific$log10.periphyton.mass.per.area.site)
site_means_Pacific$rperiphyton.perg.pac <- range01(site_means_Pacific$log10.periphyton.mass.per.g.zostera.site)
site_means_Pacific$rmesograzer.mass.area.pac <- range01(site_means_Pacific$log10.mesograzer.mass.per.area.site)
site_means_Pacific$rmesograzer.mass.perg.pac <- range01(site_means_Pacific$log10.mesograzer.mass.per.g.plant.site)
###################################################################################
# SUBSET DATA SETS BY GEOGRAPHY #
###################################################################################
# Create reduced data sets
# # Create separate data set excluding SW.A (no periphyton data)
site_means_49 <- droplevels(subset(site_means, Site != "SW.A"))
###################################################################################
# OUTPUT CURATED DATA SETS #
###################################################################################
# Export SITE-level data set
write.csv(site_means, "data/output/Duffy_et_al_2022_site_means.csv", row.names = F)
write.csv(site_means_Atlantic, "data/output/Duffy_et_al_2022_site_means_Atlantic.csv", row.names = F)
write.csv(site_means_49_Atlantic, "data/output/Duffy_et_al_2022_site_means_49_Atlantic.csv", row.names = F)
write.csv(site_means_Pacific, "data/output/Duffy_et_al_2022_site_means_Pacific.csv", row.names = F)
| /code/data_assembly.R | no_license | mawhal/ZEN_geography | R | false | false | 48,061 | r | ###################################################################################
# ##
# ZEN 2014 Global eelgrass ecosystem structure: Data assembly ##
# RAW data are current as of 2017-04-24 ##
# Emmett Duffy (duffye@si.edu) ##
# updated 2022-06-28 by Matt Whalen ##
# ##
###################################################################################
###################################################################################
# TABLE OF CONTENTS #
# #
# METADATA #
# LOAD PACKAGES #
# READ IN AND PREPARE DATA #
# CREATE DERIVED VARIABLES #
# EXPLORE DISTRIBUTIONS OF VARIABLES (PLOT LEVEL) #
# LOG TRANSFORMS #
# OBTAIN SITE MEANS #
# PCA - ENVIRONMENTAL VARIABLES (GLOBAL) #
# PCA - ENVIRONMENTAL VARIABLES (ATLANTIC) #
# PCA - ENVIRONMENTAL VARIABLES (PACIFIC) #
# EXPLORE DATA COMPLETENESS #
# PCA - EELGRASS VARIABLES (GLOBAL) #
# CREATE SCALED VARIABLES #
# SUBSET DATA SETS BY GEOGRAPHY #
# OUTPUT CURATED DATA SETS #
# #
###################################################################################
###################################################################################
# METADATA #
###################################################################################
# This script assembles raw data from the ZEN 2014 global eelgrass ecosystem sampling
# project, and outputs data files for use in modeling and other applications. See also:
# ZEN_2014_model_comparison.R: for data exploration and model building
# ZEN_2014_figures.R series: for building figures for the MS
# Source data: For most of the history of this script I was using
# ZEN_2014_Site&PlotData_2016_05_17_Released.xlsx.
###################################################################################
# LOAD PACKAGES #
###################################################################################
# Load packages:
library(tidyverse) # for reformatting epibiota data
library(randomForest) # needed for data imputation
library(car) # needed or vif analysis
library(psych) # to visualize relationshiops in pairs panels
library(plyr) # to use ddply below in fixing richness values
###################################################################################
# READ AND PREPARE DATA #
###################################################################################
# MAIN ZEN 2014 DATA SET
# Read in summary data set for ZEN 2014:
d <- read.csv("data/input/Duffy_et_al_2022_main_data.csv", header = TRUE)
# General site data
sites <- read.csv("data/input/Duffy_et_al_2022_site_metadata.csv", header = TRUE)
# BIO-ORACLE CLIMATE AND ENVIRONMENTAL DATA
# Read in Bio-ORACLE and WorldClim environmental data for ZEN sites from Matt Whalen's script:
env <- read.csv("data/output/Duffy_et_al_2022_environmental.csv", header = TRUE)
# add in situ data
env.insitu <- read.csv("data/input/Duffy_et_al_2022_environmental_in_situ.csv") %>%
mutate(site=Site)
env <- left_join(env, env.insitu)
# EELGRASS GENETICS
d.gen_fca <- read.csv("data/input/Duffy_et_al_2022_FCA_scores.csv", header = TRUE)
# d.gen_fca_atlantic <- read.csv("data/input/ZEN_2014_fca_scores_atlantic_20210125_copy.csv", header = TRUE)
# d.gen_fca_pacific <- read.csv("data/input/ZEN_2014_fca_scores_pacific_20210125_copy.csv", header = TRUE)
#### CLEAN UP AND CONSOLIDATE
# Convert categorical variables to factors
d$Site.Code <- as.factor(d$Site.Code)
d$Ocean <- as.factor(d$Ocean)
# Rename Long Island sites
d$Site <- as.factor(d$Site)
levels(d$Site)[levels(d$Site)=="LI.1"] <- "LI.A"
levels(d$Site)[levels(d$Site)=="LI.2"] <- "LI.B"
# Rename misspelled or confusing variables
names(d)[names(d)=="Mean.Sheath.Width.cm."] <- "Zostera.sheath.width"
names(d)[names(d)=="Mean.Shealth.Length.cm."] <- "Zostera.sheath.length"
names(d)[names(d)=="Mean.Longest.Leaft.Length.cm."] <- "Zostera.longest.leaf.length"
names(d)[names(d)=="Mean.Above.Zmarina.g"] <- "Zostera.aboveground.mean.mass"
names(d)[names(d)=="Mean.Below.Zmarina.g"] <- "Zostera.belowground.mean.mass"
names(d)[names(d)=="Shoots.Zmarina.per.m2"] <- "Zostera.shoots.per.m2.core"
names(d)[names(d)=="Mean.Fetch"] <- "mean.fetch"
names(d)[names(d)=="PopDens2"] <- "pop.density.2015"
names(d)[names(d)=="mesograzer.total.site.richness"] <- "grazer.richness.site"
# MESOGRAZER SITE RICHNESS: FIX MISSING VALUES
# Create vector of plots with missing values to see what is missing:
missing.richness <- d[is.na(d$grazer.richness.site), c(3,7)] # columns 3 and 7 are Site, Unique.ID
# replace all site richness values with "mean" for that site. First, create vector of means:
temp <- d %>%
group_by( Site) %>%
summarize( grazer.richness.site = mean(grazer.richness.site, na.rm = T))
# But CR.A has NO mesograzers at all so returns NaN. Assume species pool is same as for CR.B (S = 3) and replace:
# temp$grazer.richness.site[is.na(temp$grazer.richness.site)] <- 3 # CR.A grazer richness now = 3
temp$grazer.richness.site[temp$Site == "CR.A" ] <- 3 # CR.A grazer richness now = 3
d$grazer.richness.site <- temp$grazer.richness.site[match(d$Site, temp$Site)]
# Add BioOracle environmental data to main ZEN dataframe:
d$sst.min <- env$sstmin[match(d$Site, env$Site)]
d$sst.mean <- env$sstmean[match(d$Site, env$Site)]
d$sst.max <- env$sstmax[match(d$Site, env$Site)]
d$sst.range <- env$sstrange[match(d$Site, env$Site)]
d$chlomean <- env$chlomean[match(d$Site, env$Site)]
d$nitrate <- env$nitrate[match(d$Site, env$Site)]
d$parmean <- env$parmean[match(d$Site, env$Site)]
d$cloudmean <- env$cloudmean[match(d$Site, env$Site)]
d$day.length <- env$Day.length.hours[match(d$Site, env$Site)]
d$ph <- env$ph[match(d$Site, env$Site)]
d$phosphate <- env$phosphate[match(d$Site, env$Site)]
d$salinity <- env$salinity[match(d$Site, env$Site)]
d$precipitation <- env$precip[match(d$Site, env$Site)]
# Reorder variables 'Coast': WP to EA
d$Coast <- as.factor(d$Coast)
d$Coast <- factor(d$Coast, levels = c("West Pacific", "East Pacific", "West Atlantic", "East Atlantic"))
###################################################################################
# CREATE DERIVED VARIABLES #
###################################################################################
# Percentage of crustaceans and gastropods among the mesograzers
d$crust.pct.mass <- d$Malacostraca.mesograzer.plot.biomass.std.mg.g / d$mesograzer.total.plot.biomass.std.mg.g
d$gast.pct.mass <- d$Gastropoda.mesograzer.plot.biomass.std.mg.g / d$mesograzer.total.plot.biomass.std.mg.g
# grazer and periphyton nunmbers per unit bottom area (i.e., core)
d$mesograzer.abund.per.area <- d$mesograzer.total.plot.abund.std.g * d$Zostera.aboveground.mean.mass
d$crustacean.mass.per.area <- d$Malacostraca.mesograzer.plot.biomass.std.mg.g * d$Zostera.aboveground.mean.mass
d$gastropod.mass.per.area <- d$Gastropoda.mesograzer.plot.biomass.std.mg.g * d$Zostera.aboveground.mean.mass
d$mesograzer.mass.per.area <- d$mesograzer.total.plot.biomass.std.mg.g * d$Zostera.aboveground.mean.mass
d$periphyton.mass.per.area <- d$periphyton.mass.per.g.zostera * d$Zostera.aboveground.mean.mass
# Leaf C:N ratio
d$leaf.CN.ratio <- d$Leaf.PercC / d$Leaf.PercN
###################################################################################
# EXPLORE DISTRIBUTIONS OF VARIABLES (PLOT LEVEL) #
###################################################################################
# Examine frequency distribution of sites by environmental factor
# par(mfrow = c(1,1))
# par(mfrow = c(2,4))
# hist(d$Latitude, col = "cyan", main = "Surveys by latitude")
# hist(d$Longitude, col = "cyan", main = "Surveys by longitude")
# hist(d$Temperature.C, col = "cyan", main = "Surveys by temperature")
# hist(d$Salinity.ppt, col = "cyan", main = "Surveys by salinity")
# hist(d$pop.density.2015, col = "cyan", main = "Surveys by population density")
# hist(d$day.length, col = "cyan", main = "Surveys by day length")
# hist(d$mean.fetch, col = "cyan", main = "Surveys by mean fetch")
#
# hist(d$Zostera.aboveground.mean.mass, col = "cyan", main = "Surveys by Zostera AG biomass")
# hist(d$periphyton.mass.per.g.zostera, col = "cyan", main = "Surveys by periphyton biomass")
# hist(d$Malacostraca.mesograzer.plot.abund.std.g, col = "cyan", main = "Surveys by crustacean biomass")
# hist(d$Gastropoda.mesograzer.plot.biomass.std.mg.g, col = "cyan", main = "Surveys by gastropod biomass")
# hist(d$grazer.richness.site, col = "cyan", main = "Surveys by mesograzer richness")
#
# hist(d$mesograzer.total.plot.biomass.std.mg.g, col = "cyan", main = "Surveys by mesograzer biomass")
# hist(d$epifauna.total.plot.biomass.std.mg.g, col = "cyan", main = "Surveys by mobile epifauna biomass")
#
###################################################################################
# LOG TRANSFORMS #
###################################################################################
# NOTE: For many variables I add a constant roughly equal to the smallest value recorded
d$log10.Zostera.AG.mass <- log10(d$Zostera.aboveground.mean.mass + 1)
d$log10.Zostera.BG.mass <- log10(d$Zostera.belowground.mean.mass + 1)
d$log10.Zostera.shoots.core <- log10(d$Zostera.shoots.per.m2.core)
d$log10.Zostera.sheath.width <- log10(d$Zostera.sheath.width)
d$log10.Zostera.sheath.length <- log10(d$Zostera.sheath.length)
d$log10.Zostera.longest.leaf.length <- log10(d$Zostera.longest.leaf.length)
d$log10.epibiota.filter <- log10(d$epibiota.filter)
d$log10.epibiota.zostera.marina <- log10(d$epibiota.zostera.marina)
d$log10.periphyton.mass.per.g.zostera <- log10(d$periphyton.mass.per.g.zostera + 0.001)
d$log10.periphyton.mass.per.area <- log10(d$periphyton.mass.per.area + 0.1)
d$log10.mesograzer.abund.per.g.plant <- log10(d$mesograzer.total.plot.abund.std.g + 0.01)
d$log10.crustacean.abund.per.g.plant <- log10(d$Malacostraca.mesograzer.plot.abund.std.g + 0.01)
d$log10.gastropod.abund.per.g.plant <- log10(d$Gastropoda.mesograzer.plot.abund.std.g + 0.01)
d$log10.mesograzer.mass.per.g.plant <- log10(d$mesograzer.total.plot.biomass.std.mg.g + 0.01)
d$log10.crustacean.mass.per.g.plant <- log10(d$Malacostraca.mesograzer.plot.biomass.std.mg.g + 0.01)
d$log10.gastropod.mass.per.g.plant <- log10(d$Gastropoda.mesograzer.plot.biomass.std.mg.g + 0.01)
d$log10.mesograzer.abund.per.area <- log10(d$mesograzer.abund.per.area + 1)
d$log10.crustacean.mass.per.area <- log10(d$crustacean.mass.per.area + 1)
d$log10.gastropod.mass.per.area <- log10(d$gastropod.mass.per.area + 1)
d$log10.mesograzer.mass.per.area <- log10(d$mesograzer.mass.per.area + 1)
d$log10.grazer.richness.site <- log10(d$grazer.richness.site + 1)
d$log10.day.length <- log10(d$day.length)
d$log10.Leaf.PercN <- log10(d$Leaf.PercN)
d$sqrt.nitrate <- sqrt(d$nitrate)
d$log10.phosphate <- log10(d$phosphate)
d$log10.chlomean <- log10(d$chlomean)
d$log10.mean.fetch <- log10(d$mean.fetch)
# hist(d$nitrate)
# hist(d$sqrt.nitrate)
#
# hist(d$log10.Zostera.AG.mass)
#
# Change values of NaN to NA:
d[d == "NaN"] = NA
###################################################################################
# OBTAIN SITE MEANS #
###################################################################################
# CAN THIS GO AFTER IMPUTATION SECTION? SHOULD IT?
# Obtain mean values per site
site_means <- d %>%
group_by(Site) %>%
dplyr::summarize( Zostera.AG.mass.site = mean(Zostera.aboveground.mean.mass, na.rm = T),
Zostera.BG.mass.site = mean(Zostera.belowground.mean.mass, na.rm = T),
Zostera.shoots.core.site = mean(Zostera.shoots.per.m2.core, na.rm = T),
Zostera.sheath.width.site = mean(Zostera.sheath.width, na.rm = T),
Zostera.sheath.length.site = mean(Zostera.sheath.length, na.rm = T),
Zostera.longest.leaf.length.site = mean(Zostera.longest.leaf.length, na.rm = T),
epibiota.filter.site = mean(epibiota.filter, na.rm = T),
epibiota.zostera.marina.site = mean(epibiota.zostera.marina, na.rm = T),
periphyton.mass.per.g.zostera.site = mean(periphyton.mass.per.g.zostera, na.rm = T),
mesograzer.abund.per.g.plant.site = mean(mesograzer.total.plot.abund.std.g, na.rm = T),
crustacean.abund.per.g.plant.site = mean(Malacostraca.mesograzer.plot.abund.std.g, na.rm = T),
gastropod.abund.per.g.plant.site = mean(Gastropoda.mesograzer.plot.abund.std.g, na.rm = T),
mesograzer.mass.per.g.plant.site = mean(mesograzer.total.plot.biomass.std.mg.g, na.rm = T),
crustacean.mass.per.g.plant.site = mean(Malacostraca.mesograzer.plot.biomass.std.mg.g, na.rm = T),
gastropod.mass.per.g.plant.site = mean(Gastropoda.mesograzer.plot.biomass.std.mg.g, na.rm = T),
mesograzer.mass.per.area.site = mean(mesograzer.mass.per.area, na.rm = T),
crustacean.mass.per.area.site = mean(crustacean.mass.per.area, na.rm = T),
gastropod.mass.per.area.site = mean(gastropod.mass.per.area, na.rm = T),
periphyton.mass.per.area.site = mean(periphyton.mass.per.area, na.rm = T),
log10.grazer.richness.site = mean(log10.grazer.richness.site, na.rm = T),
crust.pct.mass.site = mean(crust.pct.mass, na.rm = T),
gast.pct.mass.site = mean(gast.pct.mass, na.rm = T),
Leaf.PercN.site = mean(Leaf.PercN, na.rm = T),
leaf.CN.ratio.site = mean(leaf.CN.ratio, na.rm = T),
log10.Zostera.AG.mass.site = mean(log10.Zostera.AG.mass, na.rm = T),
log10.Zostera.BG.mass.site = mean(log10.Zostera.BG.mass, na.rm = T),
log10.Zostera.shoots.core.site = mean(log10.Zostera.shoots.core, na.rm = T),
log10.Zostera.sheath.width.site = mean(log10.Zostera.sheath.width, na.rm = T),
log10.Zostera.sheath.length.site = mean(log10.Zostera.sheath.length, na.rm = T),
log10.Zostera.longest.leaf.length.cm.site = mean(log10.Zostera.longest.leaf.length, na.rm = T),
log10.periphyton.mass.per.g.zostera.site = mean(log10.periphyton.mass.per.g.zostera, na.rm = T),
log10.mesograzer.abund.per.g.plant.site = mean(log10.mesograzer.abund.per.g.plant, na.rm = T),
log10.crustacean.abund.per.g.plant.site = mean(log10.crustacean.abund.per.g.plant, na.rm = T),
log10.gastropod.abund.per.g.plant.site = mean(log10.gastropod.abund.per.g.plant, na.rm = T),
log10.mesograzer.mass.per.g.plant.site = mean(log10.mesograzer.mass.per.g.plant, na.rm = T),
log10.crustacean.mass.per.g.plant.site = mean(log10.crustacean.mass.per.g.plant, na.rm = T),
log10.gastropod.mass.per.g.plant.site = mean(log10.gastropod.mass.per.g.plant, na.rm = T),
log10.mesograzer.abund.per.area.site = mean(log10.mesograzer.abund.per.area, na.rm = T),
log10.mesograzer.mass.per.area.site = mean(log10.mesograzer.mass.per.area, na.rm = T),
log10.crustacean.mass.per.area.site = mean(log10.crustacean.mass.per.area, na.rm = T),
log10.gastropod.mass.per.area.site = mean(log10.gastropod.mass.per.area, na.rm = T),
log10.periphyton.mass.per.area.site = mean(log10.periphyton.mass.per.area, na.rm = T),
log10.Leaf.PercN.site = mean(log10.Leaf.PercN, na.rm = T) )
site_means$grazer.richness.site <- d$grazer.richness.site[match(site_means$Site, d$Site)]
# Change values of NaN to NA:
site_means[site_means == "NaN"] = NA
# Add site-level environmental (and other) variables back in
site_means$Ocean <- d$Ocean[match(site_means$Site, d$Site)]
site_means$Coast <- d$Coast[match(site_means$Site, d$Site)]
site_means$Latitude <- d$Latitude[match(site_means$Site, d$Site)]
site_means$Longitude <- d$Longitude[match(site_means$Site, d$Site)]
site_means$Temperature.C <- d$Temperature.C[match(site_means$Site, d$Site)]
site_means$Salinity.ppt <- d$Salinity.ppt[match(site_means$Site, d$Site)]
site_means$log10.mean.fetch <- d$log10.mean.fetch[match(site_means$Site, d$Site)]
site_means$day.length <- d$day.length[match(site_means$Site, d$Site)]
site_means$log10.day.length <- d$log10.day.length[match(site_means$Site, d$Site)]
site_means$sst.min <- d$sst.min[match(site_means$Site, d$Site)]
site_means$sst.mean <- d$sst.mean[match(site_means$Site, d$Site)]
site_means$sst.max <- d$sst.max[match(site_means$Site, d$Site)]
site_means$sst.range <- d$sst.range[match(site_means$Site, d$Site)]
site_means$salinity <- d$salinity[match(site_means$Site, d$Site)]
site_means$parmean <- d$parmean[match(site_means$Site, d$Site)]
site_means$cloudmean <- d$cloudmean[match(site_means$Site, d$Site)]
site_means$precipitation <- d$precipitation[match(site_means$Site, d$Site)]
site_means$nitrate <- d$nitrate[match(site_means$Site, d$Site)]
site_means$sqrt.nitrate <- d$sqrt.nitrate[match(site_means$Site, d$Site)]
site_means$ph <- d$ph[match(site_means$Site, d$Site)]
site_means$phosphate <- d$phosphate[match(site_means$Site, d$Site)]
site_means$log10.phosphate <- d$log10.phosphate[match(site_means$Site, d$Site)]
site_means$NP.ratio <- d$NP.ratio[match(site_means$Site, d$Site)]
site_means$chlomean <- d$chlomean[match(site_means$Site, d$Site)]
site_means$log10.chlomean <- d$log10.chlomean[match(site_means$Site, d$Site)]
site_means$pop.density.2015 <- d$pop.density.2015[match(site_means$Site, d$Site)]
# Add genetic data to site means data frame
site_means$FC1 <- d.gen_fca$FC1[match(site_means$Site, d.gen_fca$Site)]
site_means$FC2 <- d.gen_fca$FC2[match(site_means$Site, d.gen_fca$Site)]
# For boxplots, reorder variable 'Coast': WP to EA
site_means$Coast <- factor(site_means$Coast, levels = c("West Pacific", "East Pacific", "West Atlantic", "East Atlantic"))
# Create separate data sets by Ocean - SITE level
site_means_Atlantic <- droplevels(subset(site_means, Ocean == "Atlantic"))
site_means_Pacific <- droplevels(subset(site_means, Ocean == "Pacific"))
site_means_49_Atlantic <- droplevels(subset(site_means_Atlantic, Site != "SW.A"))
###################################################################################
# PCA - ENVIRONMENTAL VARIABLES (GLOBAL) #
###################################################################################
# # Explore correlations among environmental drivers
# pairs.panels(site_means[,c("Latitude", "sst.mean", "sst.range", "sst.min", "sst.max", "Salinity.ppt",
# "parmean", "log10.day.length", "cloudmean", "precipitation", "sqrt.nitrate", "log10.phosphate", "log10.chlomean",
# "Leaf.PercN.site", "log10.mean.fetch")],
# smooth=T,density=F,ellipses=F,lm=F,digits=2,scale=F, cex.cor = 8)
# Create data frame containing the ZEN 2014 environmental variables for PCA
# Note: Some exploration shows that nitrate is closely correlated with several other
# variables, and taking it out results in first 3 PC axes explaining ~75% of variation. This
# is parsimonious and simplifies the analysis.
ZEN.env <- site_means[c("sst.mean", "sst.range", "Salinity.ppt", "parmean",
"cloudmean", "log10.phosphate", "log10.chlomean", "Leaf.PercN.site"
# , "precipitation", "log10.day.length",
)]
ZEN.sites <- site_means[c("Site")]
# Compute PCAs
ZEN.env.pca <- prcomp(ZEN.env, center = TRUE, scale. = TRUE)
# print(ZEN.env.pca)
# PC1 PC2 PC3 PC4 PC5 PC6 PC7 PC8
# sst.mean 0.5344090 -0.04221968 0.12650153 -0.2221002 0.11595693 -0.56707288 0.49861640 0.25230424
# sst.range -0.1607624 -0.40262794 0.45918615 -0.4862507 0.41315371 0.25966358 -0.15719348 0.31925476
# Salinity.ppt 0.3702257 0.16135868 -0.48106388 -0.4651378 0.05646463 -0.08442206 -0.61172656 0.06779392
# parmean 0.4076216 0.22572201 0.39507514 0.3928616 -0.25219684 0.21903419 -0.29892746 0.52108800
# cloudmean -0.4937825 -0.21507910 -0.27382435 0.1300389 -0.18748290 -0.44075941 -0.12798127 0.61010822
# log10.phosphate -0.2101797 0.54450089 -0.13760560 -0.4243534 -0.22277173 0.36170941 0.41340358 0.33010411
# log10.chlomean -0.2566312 0.34762747 0.53996106 -0.2846051 -0.31346195 -0.45082306 -0.26740350 -0.26025590
# Leaf.PercN.site -0.1774368 0.54363232 0.01286878 0.2560322 0.75235033 -0.16600039 -0.06571552 0.09672818
# Interpretation:
# PCe1: latitude/climate: high = warmer, brighter, less cloudy (lower latitude)
# PCe2: nutrient status: high = high PO4, leaf N
# PCe3: estuarine: low salinity, variable temp, high chl
# # plot cumulative proportion of variance explained by PC axes
# plot(ZEN.env.pca, type = "l")
# # Calculate proportion of variance explained by each PC
# summary(ZEN.env.pca)
# PC1 PC2 PC3 PC4 PC5 PC6 PC7 PC8
# Standard deviation 1.6849 1.4240 1.1552 0.9516 0.65646 0.48125 0.36494 0.3124
# Proportion of Variance 0.3549 0.2535 0.1668 0.1132 0.05387 0.02895 0.01665 0.0122
# Cumulative Proportion 0.3549 0.6083 0.7751 0.8883 0.94220 0.97115 0.98780 1.0000
# Combine PCA scores with SITE-level data frame
site.env.pca.scores <- ZEN.env.pca$x
site.env.pca.scores <- cbind(ZEN.sites, site.env.pca.scores)
site_means <- cbind(site_means, site.env.pca.scores)
# Rename PCA variables 1-3 and cull PC4-7
names(site_means)[names(site_means)=="PC1"] <- "PC1.env.global"
names(site_means)[names(site_means)=="PC2"] <- "PC2.env.global"
names(site_means)[names(site_means)=="PC3"] <- "PC3.env.global"
site_means <- subset(site_means, select = -c(PC4,PC5,PC6, PC7, PC8))
###################################################################################
# PCA - ENVIRONMENTAL VARIABLES (ATLANTIC) #
###################################################################################
# # Explore correlations among environmental drivers
# pairs.panels(site_means_Atlantic[,c("sst.mean", "sst.range", "Salinity.ppt", "parmean",
# "cloudmean", "log10.phosphate", "log10.chlomean", "Leaf.PercN.site"
# # , "precipitation", "log10.day.length"
# )],
# smooth=T,density=F,ellipses=F,lm=F,digits=2,scale=F, cex.cor = 8)
# Create data frame containing the ZEN 2014 environmental variables for PCA
# Note: Some exploration shows that nitrate is closely corrtelated with several other
# variables, and taking it out results in first 3 PC axes explaining ~75% of variation. This
# is parsimonious and simplifies the analysis.
ZEN.env.atl <- site_means_Atlantic[c("sst.mean", "sst.range", "Salinity.ppt", "parmean",
"cloudmean", "log10.phosphate", "log10.chlomean", "Leaf.PercN.site"
# , "precipitation", "log10.day.length"
)]
ZEN.sites.atl <- site_means_Atlantic[c("Site")]
# Compute PCAs
ZEN.env.pca.atl <- prcomp(ZEN.env.atl, center = TRUE, scale. = TRUE)
# print(ZEN.env.pca.atl)
# PC1 PC2 PC3 PC4 PC5 PC6 PC7 PC8
# sst.mean -0.550319750 0.07256028 -0.14266055 0.01964309 -0.26247919 0.50693440 0.34783455 0.47358063
# sst.range 0.008028728 0.53243059 0.13905815 0.56739502 -0.43108238 -0.27143385 -0.27518892 0.19985403
# Salinity.ppt -0.312338254 -0.33887929 -0.52503373 0.18367192 -0.01847826 0.09370635 -0.67915383 -0.08853019
# parmean -0.307553079 0.44084782 0.04824442 -0.51027750 0.40436656 -0.23475705 -0.34111047 0.33671071
# cloudmean 0.486920633 -0.28474069 0.27891671 0.01450237 0.05975327 0.32618638 -0.33098360 0.61992583
# log10.phosphate 0.294237976 0.02478199 -0.66842063 0.18661880 0.25670169 -0.33170669 0.31530953 0.39478092
# log10.chlomean 0.265024764 0.54345377 -0.20872625 0.13627880 0.32327853 0.62268122 -0.08243877 -0.27063675
# Leaf.PercN.site 0.333217372 0.15821912 -0.33789872 -0.57441251 -0.63831315 0.03592546 -0.09954655 -0.03411444
# Interpretation:
# PCe1: latitude/climate: high = cooler, cloudier
# PCe2: estuarine/eutrophic: high = high phytoplankton, variable temperature, bright, lowish salinity
# PCe3: arid watershed? oligotrophic Baltic?: high = low salinity, low PO4
# # plot cumulative proportion of variance explained by PC axes
# plot(ZEN.env.pca.atl, type = "l")
# # Calculate proportion of variance explained by each PC
# summary(ZEN.env.pca.atl)
# PC1 PC2 PC3 PC4 PC5 PC6 PC7 PC8
# Standard deviation 1.6778 1.4182 1.2097 0.9687 0.62444 0.46015 0.35168 0.21673
# Proportion of Variance 0.3519 0.2514 0.1829 0.1173 0.04874 0.02647 0.01546 0.00587
# Cumulative Proportion 0.3519 0.6033 0.7862 0.9035 0.95220 0.97867 0.99413 1.00000
# Output PCA scores for each site and combine with site means data frame
site.env.pca.scores.atl <- ZEN.env.pca.atl$x
site.env.pca.scores.atl <- cbind(ZEN.sites.atl, site.env.pca.scores.atl)
site_means_Atlantic <- cbind(site_means_Atlantic, site.env.pca.scores.atl)
# Rename PCA variables 1-3 and cull PC4-7
names(site_means_Atlantic)[names(site_means_Atlantic)=="PC1"] <- "PC1.env.atl"
names(site_means_Atlantic)[names(site_means_Atlantic)=="PC2"] <- "PC2.env.atl"
names(site_means_Atlantic)[names(site_means_Atlantic)=="PC3"] <- "PC3.env.atl"
site_means_Atlantic <- subset(site_means_Atlantic, select = -c(PC4,PC5,PC6, PC7, PC8))
###################################################################################
# PCA - ENVIRONMENTAL VARIABLES (PACIFIC) #
###################################################################################
# # Explore correlations among environmental drivers
# pairs.panels(site_means_Pacific[,c("Latitude", "sst.mean", "sst.range", "sst.min", "sst.max", "Salinity.ppt",
# "parmean", "log10.day.length", "cloudmean", "precipitation", "sqrt.nitrate", "log10.phosphate", "log10.chlomean",
# "Leaf.PercN.site", "log10.mean.fetch")],
# smooth=T,density=F,ellipses=F,lm=F,digits=2,scale=F, cex.cor = 8)
# Create data frame containing the ZEN 2014 environmental variables for PCA
# Note: Some exploration shows that nitrate is closely correlated with several other
# variables, and taking it out results in first 3 PC axes explaining ~75% of variation. This
# is parsimonious and simplifies the analysis.
ZEN.env.pac <- site_means_Pacific[c("sst.mean", "sst.range", "Salinity.ppt", "parmean",
"cloudmean", "log10.phosphate", "log10.chlomean", "Leaf.PercN.site"
# , "precipitation", "log10.day.length"
)]
ZEN.sites.pac <- site_means_Pacific[c("Site")]
# Compute PCAs
ZEN.env.pca.pac <- prcomp(ZEN.env.pac, center = TRUE, scale. = TRUE)
# print(ZEN.env.pca.pac)
# PC1 PC2 PC3 PC4 PC5 PC6 PC7 PC8
# sst.mean 0.4416493 -0.14998580 0.38471592 -0.09795308 0.11434105 -0.46072831 0.20973408 0.59625174
# sst.range -0.1192591 -0.58280840 0.13287760 -0.61360069 -0.05905439 0.33920457 0.35256555 -0.09539264
# Salinity.ppt 0.4002213 0.04551641 -0.50374668 0.10047825 0.56645013 0.33137386 0.37126335 0.07337350
# parmean 0.4058142 0.32386570 0.34788599 0.04747739 -0.21892434 -0.03638956 0.46415902 -0.58519351
# cloudmean -0.3739858 -0.36483629 -0.19281131 0.31505264 0.17158648 -0.57553674 0.40395943 -0.25831575
# log10.phosphate -0.4215990 0.32143191 0.04272324 0.20357878 -0.29958265 0.25871215 0.55282804 0.46191518
# log10.chlomean -0.3422080 0.18681817 0.58063017 0.01610370 0.69496221 0.13777490 -0.03697576 -0.08532484
# Leaf.PercN.site -0.1764750 0.50946333 -0.28882340 -0.67866604 0.11171810 -0.37997422 0.09088341 -0.01326676
# Interpretation:
# PCe1: latitude/climate: high = warmer, brighter, higher salinity, lower PO4
# PCe2: nutrient status: high = high nutrients (especially leaf N), more stable temperature
# PCe3: estuarine/eutrophic: high = low salinity, high chl
# # plot cumulative proportion of variance explained by PC axes
# plot(ZEN.env.pca.pac, type = "l")
#
# # Calculate proportion of variance explained by each PC
# summary(ZEN.env.pca.pac)
# Standard deviation 1.9641 1.4390 0.9141 0.71060 0.62592 0.49046 0.24605 0.19570
# Proportion of Variance 0.4822 0.2588 0.1045 0.06312 0.04897 0.03007 0.00757 0.00479
# Cumulative Proportion 0.4822 0.7410 0.8455 0.90860 0.95758 0.98765 0.99521 1.00000
# Output PCA scores for each site and combine with site means data frame
site.env.pca.scores.pac <- ZEN.env.pca.pac$x
site.env.pca.scores.pac <- cbind(ZEN.sites.pac, site.env.pca.scores.pac)
site_means_Pacific <- cbind(site_means_Pacific, site.env.pca.scores.pac)
# Rename PCA variables 1-3 and cull PC4-7
names(site_means_Pacific)[names(site_means_Pacific)=="PC1"] <- "PC1.env.pac"
names(site_means_Pacific)[names(site_means_Pacific)=="PC2"] <- "PC2.env.pac"
names(site_means_Pacific)[names(site_means_Pacific)=="PC3"] <- "PC3.env.pac"
site_means_Pacific <- subset(site_means_Pacific, select = -c(PC4,PC5,PC6, PC7, PC8))
###################################################################################
# EXPLORE DATA COMPLETENESS #
###################################################################################
# NOTE: AIC comparisons among models are invalid unless exactly the same number of plots
# are used in each comparison, because the DF influences calculation of the AIC score.
# This means that we need data on all plots and need to impute missing data for
# valid AIC model comparisons.
# # How many observations are missing for each variable?
# sum(is.na(d$log10.Zostera.AG.mass)) # 24
# sum(is.na(d$log10.Zostera.shoots.core)) # 15
# sum(is.na(d$Zostera.longest.leaf.length)) # 0
# sum(is.na(d$Leaf.PercN)) # 14
# sum(is.na(d$Temperature.C)) # 0
# sum(is.na(d$Salinity.ppt)) # 0
# sum(is.na(d$pop.density.2015)) # 20 huh?
# sum(is.na(d$GenotypicRichness)) # 0
# sum(is.na(d$AllelicRichness)) # 0
# sum(is.na(d$grazer.richness.site)) # 0
# sum(is.na(d$log10.periphyton.mass.per.g.zostera)) # 4
# sum(is.na(d$log10.mesograzer.abund.per.g.plant)) # 9
# sum(is.na(d$log10.crustacean.abund.per.g.plant)) # 9
# sum(is.na(d$log10.gastropod.abund.per.g.plant)) # 9
# Look at percentage of values missing for each variable
# First create function to calculate % of missing values infor each variable in a data frame…
pMiss <- function(x){sum(is.na(x))/length(x)*100}
# # Now apply it to the data frame:
# apply(d,2,pMiss)
#
###################################################################################
# PCA - EELGRASS VARIABLES (GLOBAL) #
###################################################################################
# NOTE: The PCA for eelgrass morphology uses imputed data (see impute_missing/R)
d.imputed <- read.csv( "data/output/Duffy_et_al_2022_imputed.csv" )
# NOTE: This includes all available ZEN eelgrass morphological variables. We use the
# first two axes, which together explain 83% of the variation in input variables, under
# the (arbitrary) criterion of using those PC axes necessary to capture 75% of the variation.
## PCA - EELGRASS VARIABLES (PLOT LEVEL)
# Create data frame containing the ZEN 2014 eelgrass morphological variables
zos.morph.plot.2 <- d.imputed[c("log10.Zostera.AG.mass.imputed", "log10.Zostera.BG.mass.imputed",
"log10.Zostera.shoots.core.imputed", "log10.Zostera.sheath.length", "log10.Zostera.sheath.width", "log10.Zostera.longest.leaf.length")]
# Compute PCAs
zos.morph.plot.2.pca <- prcomp(zos.morph.plot.2, center = TRUE, scale. = TRUE)
print(zos.morph.plot.2.pca)
# PC1 PC2 PC3 PC4 PC5 PC6
# log10.Zostera.AG.mass.imputed -0.29772190 -0.58976969 0.16131419 -0.7076165 0.12385514 -0.14645813
# log10.Zostera.BG.mass.imputed 0.08114321 -0.67078182 -0.63774621 0.3664483 -0.03986877 0.02955342
# log10.Zostera.shoots.core.imputed 0.34930322 -0.42578505 0.70199747 0.3770211 0.20963800 0.13341998
# log10.Zostera.sheath.length -0.51441226 -0.05711932 0.21262143 0.4040899 -0.27044926 -0.67117666
# log10.Zostera.sheath.width -0.50068037 0.09723378 -0.08264182 0.2209389 0.81254579 0.15488847
# log10.Zostera.longest.leaf.length -0.51716912 -0.09062856 0.14973149 0.1036680 -0.45359545 0.69671169
# Interpretation:
# PCz1: growth form: high = short canopy, denser shoots
# PCz2: biomass: high values = low AG and especially BG biomass
# plot cumulative proportion of variance explained by PC axes
plot(zos.morph.plot.2.pca, type = "l")
# Calculate proportion of variance explained by each PC
summary(zos.morph.plot.2.pca)
# PC1 PC2 PC3 PC4 PC5 PC6
# Standard deviation 1.8230 1.2796 0.71769 0.48452 0.45114 0.29318
# Proportion of Variance 0.5539 0.2729 0.08585 0.03913 0.03392 0.01433
# Cumulative Proportion 0.5539 0.8268 0.91263 0.95175 0.98567 1.00000
# RESULT: First two PC axes explain 83% of variation in eelgrass morphology with ALL input variables.
# Output PCA scores and combine with plot data frame
zos.morph.plot.2.pca.scores <- zos.morph.plot.2.pca$x
d.imputed <- cbind(d.imputed, zos.morph.plot.2.pca.scores)
# Rename PCA variables 1-2 and cull PC3-4
names(d.imputed)[names(d.imputed)=="PC1"] <- "PC1.zos"
names(d.imputed)[names(d.imputed)=="PC2"] <- "PC2.zos"
d.imputed <- subset(d.imputed, select = -c(PC3,PC4,PC5,PC6))
# NOTE: IS THIS WHERE THIS SHOULD BE?
# Obtain mean values per site: Eelgrass growth form PCz1 and PCz2
add_means <- ddply(d.imputed, c("Site"), summarize,
PC1.zos.site = mean(PC1.zos, na.rm = T),
PC2.zos.site = mean(PC2.zos, na.rm = T)
)
# Add to site means data frame
site_means <- merge(site_means, add_means)
# Add to ocean data frames
site_means_Atlantic$PC1.zos.site <- site_means$PC1.zos.site[match(site_means_Atlantic$Site, site_means$Site)]
site_means_Atlantic$PC2.zos.site <- site_means$PC2.zos.site[match(site_means_Atlantic$Site, site_means$Site)]
site_means_Pacific$PC1.zos.site <- site_means$PC1.zos.site[match(site_means_Pacific$Site, site_means$Site)]
site_means_Pacific$PC2.zos.site <- site_means$PC2.zos.site[match(site_means_Pacific$Site, site_means$Site)]
site_means_49_Atlantic$PC1.zos.site <- site_means$PC1.zos.site[match(site_means_49_Atlantic$Site, site_means$Site)]
site_means_49_Atlantic$PC2.zos.site <- site_means$PC2.zos.site[match(site_means_49_Atlantic$Site, site_means$Site)]
###################################################################################
# CREATE SCALED VARIABLES #
###################################################################################
# Create function to standardize and center a variable by its range of observed values.
# The '...' allows it to work with NAs.
range01 <- function(x, ...){(x - min(x, na.rm = T, ...)) / (max(x, na.rm = T, ...) - min(x, na.rm = T, ...))}
# Combine PCA scores with PLOT-level data frame
site_means_49_Atlantic$PC1.env.global <- site_means$PC1.env.global[match(site_means_49_Atlantic$Site, site_means$Site)]
site_means_49_Atlantic$PC2.env.global <- site_means$PC2.env.global[match(site_means_49_Atlantic$Site, site_means$Site)]
site_means_49_Atlantic$PC3.env.global <- site_means$PC3.env.global[match(site_means_49_Atlantic$Site, site_means$Site)]
site_means_49_Atlantic$FC1 <- site_means$FC1[match(site_means_49_Atlantic$Site, site_means$Site)]
site_means_49_Atlantic$FC2 <- site_means$FC2[match(site_means_49_Atlantic$Site, site_means$Site)]
site_means_Pacific$PC1.env.global <- site_means$PC1.env.global[match(site_means_Pacific$Site, site_means$Site)]
site_means_Pacific$PC2.env.global <- site_means$PC2.env.global[match(site_means_Pacific$Site, site_means$Site)]
site_means_Pacific$PC3.env.global <- site_means$PC3.env.global[match(site_means_Pacific$Site, site_means$Site)]
site_means_Pacific$FC1 <- site_means$FC1[match(site_means_Pacific$Site, site_means$Site)]
site_means_Pacific$FC2 <- site_means$FC2[match(site_means_Pacific$Site, site_means$Site)]
# Create z-scaled variables: SITE level (GLOBAL)
site_means$zLatitude <- scale(site_means$Latitude)
site_means$zPC1.zos.site <- scale(site_means$PC1.zos.site)
site_means$zPC2.zos.site <- scale(site_means$PC2.zos.site)
site_means$zPC1.env.global <- scale(site_means$PC1.env.global)
site_means$zPC2.env.global <- scale(site_means$PC2.env.global)
site_means$zPC3.env.global <- scale(site_means$PC3.env.global)
site_means$zFC1 <- scale(site_means$FC1)
site_means$zFC2 <- scale(site_means$FC2)
site_means$zcanopy <- scale(site_means$log10.Zostera.longest.leaf.length.cm.site)
site_means$zshoots <- scale(site_means$log10.Zostera.shoots.core.site)
site_means$zagbiomass <- scale(site_means$log10.Zostera.AG.mass.site)
site_means$zbgbiomass <- scale(site_means$log10.Zostera.BG.mass.site)
site_means$zperiphyton <- scale(site_means$log10.periphyton.mass.per.area.site)
site_means$zperiphyton.perg <- scale(site_means$log10.periphyton.mass.per.g.zostera.site)
site_means$zmesograzer.mass <- scale(site_means$log10.mesograzer.mass.per.area.site)
site_means$zmesograzer.mass.perg <- scale(site_means$log10.mesograzer.mass.per.g.plant.site)
site_means$zmesograzer.abund <- scale(site_means$log10.mesograzer.abund.per.area.site)
site_means$zmesograzer.abund.perg <- scale(site_means$log10.mesograzer.abund.per.g.plant.site)
# Create RANGE-scaled variables: SITE level (GLOBAL)
site_means$rLatitude <- range01(site_means$Latitude)
site_means$rPC1.zos.site <- range01(site_means$PC1.zos.site)
site_means$rPC2.zos.site <- range01(site_means$PC2.zos.site)
site_means$rPC1.env.global <- range01(site_means$PC1.env.global)
site_means$rPC2.env.global <- range01(site_means$PC2.env.global)
site_means$rPC3.env.global <- range01(site_means$PC3.env.global)
site_means$rFC1 <- range01(site_means$FC1)
site_means$rFC2 <- range01(site_means$FC2)
site_means$rcanopy <- range01(site_means$log10.Zostera.longest.leaf.length.cm.site)
site_means$rshoots <- range01(site_means$log10.Zostera.shoots.core.site)
site_means$ragbiomass <- range01(site_means$log10.Zostera.AG.mass.site)
site_means$rbgbiomass <- range01(site_means$log10.Zostera.BG.mass.site)
site_means$rperiphyton <- range01(site_means$log10.periphyton.mass.per.area.site)
site_means$rperiphyton.perg <- range01(site_means$log10.periphyton.mass.per.g.zostera.site)
site_means$rmesograzer.mass <- range01(site_means$log10.mesograzer.mass.per.area.site)
site_means$rmesograzer.mass.perg <- range01(site_means$log10.mesograzer.mass.per.g.plant.site)
site_means$rmesograzer.abund <- range01(site_means$log10.mesograzer.abund.per.area.site)
site_means$rmesograzer.abund.perg <- range01(site_means$log10.mesograzer.abund.per.g.plant.site)
# Create z-scaled variables: SITE level (ATLANTIC 49)
# This data set scales the variables using only Atlantic values. Omit SW.A as the plot-level data set does.
site_means_49_Atlantic$zLatitude.atl <- scale(site_means_49_Atlantic$Latitude, scale = TRUE, center = TRUE)
site_means_49_Atlantic$zPC1.zos.atl <- scale(site_means_49_Atlantic$PC1.zos.site)
site_means_49_Atlantic$zPC2.zos.atl <- scale(site_means_49_Atlantic$PC2.zos.site)
site_means_49_Atlantic$zPC1.env.global.atl <- scale(site_means_49_Atlantic$PC1.env.global)
site_means_49_Atlantic$zPC2.env.global.atl <- scale(site_means_49_Atlantic$PC2.env.global)
site_means_49_Atlantic$zPC3.env.global.atl <- scale(site_means_49_Atlantic$PC3.env.global)
site_means_49_Atlantic$zFC1.global.atl <- scale(site_means_49_Atlantic$FC1)
site_means_49_Atlantic$zFC2.global.atl <- scale(site_means_49_Atlantic$FC2)
site_means_Atlantic$zPC1.env.atl <- scale(site_means_Atlantic$PC1.env.atl)
site_means_Atlantic$zPC2.env.atl <- scale(site_means_Atlantic$PC2.env.atl)
site_means_Atlantic$zPC3.env.atl <- scale(site_means_Atlantic$PC3.env.atl)
site_means_49_Atlantic$zperiphyton.area.atl <- scale(site_means_49_Atlantic$log10.periphyton.mass.per.area.site)
site_means_49_Atlantic$zperiphyton.perg.atl <- scale(site_means_49_Atlantic$log10.periphyton.mass.per.g.zostera.site)
site_means_49_Atlantic$zmesograzer.mass.area.atl <- scale(site_means_49_Atlantic$log10.mesograzer.mass.per.area.site)
site_means_49_Atlantic$zmesograzer.mass.perg.atl <- scale(site_means_49_Atlantic$log10.mesograzer.mass.per.g.plant.site)
################################################################################
# Create RANGE-scaled variables: SITE level (ATLANTIC 49)
# This data set scales the variables using only Atlantic values. Omit SW.A as the plot-level data set does.
site_means_49_Atlantic$rLatitude.atl <- range01(site_means_49_Atlantic$Latitude)
site_means_49_Atlantic$rPC1.zos.atl <- range01(site_means_49_Atlantic$PC1.zos.site)
site_means_49_Atlantic$rPC2.zos.atl <- range01(site_means_49_Atlantic$PC2.zos.site)
site_means_49_Atlantic$rPC1.env.global.atl <- range01(site_means_49_Atlantic$PC1.env.global)
site_means_49_Atlantic$rPC2.env.global.atl <- range01(site_means_49_Atlantic$PC2.env.global)
site_means_49_Atlantic$rPC3.env.global.atl <- range01(site_means_49_Atlantic$PC3.env.global)
site_means_49_Atlantic$rFC1.global.atl <- range01(site_means_49_Atlantic$FC1)
site_means_49_Atlantic$rFC2.global.atl <- range01(site_means_49_Atlantic$FC2)
site_means_Atlantic$rPC1.env.atl <- range01(site_means_Atlantic$PC1.env.atl)
site_means_Atlantic$rPC2.env.atl <- range01(site_means_Atlantic$PC2.env.atl)
site_means_Atlantic$rPC3.env.atl <- range01(site_means_Atlantic$PC3.env.atl)
site_means_49_Atlantic$rperiphyton.area.atl <- range01(site_means_49_Atlantic$log10.periphyton.mass.per.area.site)
site_means_49_Atlantic$rperiphyton.perg.atl <- range01(site_means_49_Atlantic$log10.periphyton.mass.per.g.zostera.site)
site_means_49_Atlantic$rmesograzer.mass.area.atl <- range01(site_means_49_Atlantic$log10.mesograzer.mass.per.area.site)
site_means_49_Atlantic$rmesograzer.mass.perg.atl <- range01(site_means_49_Atlantic$log10.mesograzer.mass.per.g.plant.site)
# Create z-scaled variables: SITE level (PACIFIC)
# This data set scales the variables using only Pacific values.
site_means_Pacific$zLatitude.pac <- scale(site_means_Pacific$Latitude, scale = TRUE, center = TRUE)
site_means_Pacific$zPC1.zos.pac <- scale(site_means_Pacific$PC1.zos.site)
site_means_Pacific$zPC2.zos.pac <- scale(site_means_Pacific$PC2.zos.site)
site_means_Pacific$zPC1.env.global.pac <- scale(site_means_Pacific$PC1.env.global)
site_means_Pacific$zPC2.env.global.pac <- scale(site_means_Pacific$PC2.env.global)
site_means_Pacific$zPC3.env.global.pac <- scale(site_means_Pacific$PC3.env.global)
site_means_Pacific$zFC1.global.pac <- scale(site_means_Pacific$FC1)
site_means_Pacific$zFC2.global.pac <- scale(site_means_Pacific$FC2)
site_means_Pacific$zPC1.env.pac <- scale(site_means_Pacific$PC1.env.pac)
site_means_Pacific$zPC2.env.pac <- scale(site_means_Pacific$PC2.env.pac)
site_means_Pacific$zPC3.env.pac <- scale(site_means_Pacific$PC3.env.pac)
site_means_Pacific$zperiphyton.area.pac <- scale(site_means_Pacific$log10.periphyton.mass.per.area.site)
site_means_Pacific$zperiphyton.perg.pac <- scale(site_means_Pacific$log10.periphyton.mass.per.g.zostera.site)
site_means_Pacific$zmesograzer.mass.area.pac <- scale(site_means_Pacific$log10.mesograzer.mass.per.area.site)
site_means_Pacific$zmesograzer.mass.perg.pac <- scale(site_means_Pacific$log10.mesograzer.mass.per.g.plant.site)
# Create RANGE-scaled variables: SITE level (PACIFIC)
# This data set scales the variables using only Pacific values.
site_means_Pacific$rLatitude.pac <- range01(site_means_Pacific$Latitude)
site_means_Pacific$rPC1.zos.pac <- range01(site_means_Pacific$PC1.zos.site)
site_means_Pacific$rPC2.zos.pac <- range01(site_means_Pacific$PC2.zos.site)
site_means_Pacific$rPC1.env.global.pac <- range01(site_means_Pacific$PC1.env.global)
site_means_Pacific$rPC2.env.global.pac <- range01(site_means_Pacific$PC2.env.global)
site_means_Pacific$rPC3.env.global.pac <- range01(site_means_Pacific$PC3.env.global)
site_means_Pacific$rFC1.global.pac <- range01(site_means_Pacific$FC1)
site_means_Pacific$rFC2.global.pac <- range01(site_means_Pacific$FC2)
site_means_Pacific$rPC1.env.pac <- range01(site_means_Pacific$PC1.env.pac)
site_means_Pacific$rPC2.env.pac <- range01(site_means_Pacific$PC2.env.pac)
site_means_Pacific$rPC3.env.pac <- range01(site_means_Pacific$PC3.env.pac)
site_means_Pacific$rperiphyton.area.pac <- range01(site_means_Pacific$log10.periphyton.mass.per.area.site)
site_means_Pacific$rperiphyton.perg.pac <- range01(site_means_Pacific$log10.periphyton.mass.per.g.zostera.site)
site_means_Pacific$rmesograzer.mass.area.pac <- range01(site_means_Pacific$log10.mesograzer.mass.per.area.site)
site_means_Pacific$rmesograzer.mass.perg.pac <- range01(site_means_Pacific$log10.mesograzer.mass.per.g.plant.site)
###################################################################################
# SUBSET DATA SETS BY GEOGRAPHY #
###################################################################################
# Create reduced data sets
# # Create separate data set excluding SW.A (no periphyton data)
site_means_49 <- droplevels(subset(site_means, Site != "SW.A"))
###################################################################################
# OUTPUT CURATED DATA SETS #
###################################################################################
# Export SITE-level data set
write.csv(site_means, "data/output/Duffy_et_al_2022_site_means.csv", row.names = F)
write.csv(site_means_Atlantic, "data/output/Duffy_et_al_2022_site_means_Atlantic.csv", row.names = F)
write.csv(site_means_49_Atlantic, "data/output/Duffy_et_al_2022_site_means_49_Atlantic.csv", row.names = F)
write.csv(site_means_Pacific, "data/output/Duffy_et_al_2022_site_means_Pacific.csv", row.names = F)
|
globals <- new.env()
#' Browser base class
#'
#' Base class for browsers like Chrome, Chromium, etc. Defines the interface
#' used by various browser implementations. It can represent a local browser
#' process or one running remotely.
#'
#' The \code{initialize()} method of an implementation should set private$host
#' and private$port. If the process is local, the \code{initialize()} method
#' should also set private$process.
#'
#' @export
Browser <- R6Class("Browser",
public = list(
# Returns TRUE if the browser is running locally, FALSE if it's remote.
is_local = function() !is.null(private$process),
get_process = function() private$process,
get_host = function() private$host,
get_port = function() private$port,
close = function() {
if (self$is_local() && private$process$is_alive()) {
private$process$signal(tools::SIGTERM)
}
}
),
private = list(
process = NULL,
host = NULL,
port = NULL,
finalize = function(e) {
if (self$is_local()) {
self$close()
}
}
)
)
| /R/browser.R | no_license | Hong-Sung-Hyun/chromote | R | false | false | 1,073 | r | globals <- new.env()
#' Browser base class
#'
#' Base class for browsers like Chrome, Chromium, etc. Defines the interface
#' used by various browser implementations. It can represent a local browser
#' process or one running remotely.
#'
#' The \code{initialize()} method of an implementation should set private$host
#' and private$port. If the process is local, the \code{initialize()} method
#' should also set private$process.
#'
#' @export
Browser <- R6Class("Browser",
public = list(
# Returns TRUE if the browser is running locally, FALSE if it's remote.
is_local = function() !is.null(private$process),
get_process = function() private$process,
get_host = function() private$host,
get_port = function() private$port,
close = function() {
if (self$is_local() && private$process$is_alive()) {
private$process$signal(tools::SIGTERM)
}
}
),
private = list(
process = NULL,
host = NULL,
port = NULL,
finalize = function(e) {
if (self$is_local()) {
self$close()
}
}
)
)
|
## ----results='hide'-----------------------------------------------------------
set.seed(42)
library("Matrix")
library("lme4")
library("ggplot2")
library("eyetrackingR")
data("word_recognition")
data <- make_eyetrackingr_data(word_recognition,
participant_column = "ParticipantName",
trial_column = "Trial",
time_column = "TimeFromTrialOnset",
trackloss_column = "TrackLoss",
aoi_columns = c('Animate','Inanimate'),
treat_non_aoi_looks_as_missing = TRUE
)
# subset to response window post word-onset
response_window <- subset_by_window(data,
window_start_time = 15500,
window_end_time = 21000,
rezero = FALSE)
# analyze amount of trackloss by subjects and trials
(trackloss <- trackloss_analysis(data = response_window))
# remove trials with > 25% of trackloss
response_window_clean <- clean_by_trackloss(data = response_window, trial_prop_thresh = .25)
# create Target condition column
response_window_clean$Target <- as.factor( ifelse(test = grepl('(Spoon|Bottle)', response_window_clean$Trial),
yes = 'Inanimate',
no = 'Animate') )
## ---- warning=FALSE-----------------------------------------------------------
# recode AOIs to target & distractor
response_window_clean$TrialTarget <- ifelse(test = response_window_clean$Target == 'Animate',
yes = response_window_clean$Animate,
no = response_window_clean$Inanimate)
response_window_clean$TrialDistractor <- ifelse(test = response_window_clean$Target == 'Animate',
yes = response_window_clean$Inanimate,
no = response_window_clean$Animate)
## ---- warning=FALSE-----------------------------------------------------------
onsets <- make_onset_data(response_window_clean, onset_time = 15500, target_aoi='TrialTarget')
# participants' ability to orient to the trial target overall:
plot(onsets) + theme(legend.text=element_text(size=5))
## ---- warning=FALSE-----------------------------------------------------------
# participants' ability to orient to the trial target, split by which target:
plot(onsets, predictor_columns = "Target") + theme(legend.text=element_text(size=6))
## ---- warning=FALSE-----------------------------------------------------------
# we can also visualize numeric predictors:
plot(onsets, predictor_columns = "MCDI_Total") + theme(legend.text=element_text(size=6))
## ---- warning= FALSE----------------------------------------------------------
onset_switches <- make_switch_data(onsets, predictor_columns = "Target")
# visualize subject's switch times
plot(onset_switches, predictor_columns = c("Target"))
# center predictor:
onset_switches$FirstAOIC <- ifelse(onset_switches$FirstAOI == 'TrialTarget', .5, -.5)
onset_switches$FirstAOIC <- scale(onset_switches$FirstAOIC, center=TRUE, scale=FALSE)
onset_switches$TargetC <- ifelse(onset_switches$Target == 'Animate', .5, -.5)
onset_switches$TargetC <- scale(onset_switches$TargetC, center=TRUE, scale=FALSE)
# build model:
model_switches <- lmer(FirstSwitch ~ FirstAOIC*TargetC +
(1 | Trial) + (1 | ParticipantName), data=onset_switches, REML=FALSE)
# cleanly show important parts of model (see `summary()` for more)
broom.mixed::tidy(model_switches, effects="fixed")
drop1(model_switches,~.,test="Chi")
| /inst/doc/onset_contingent_analysis_vignette.R | permissive | cran/eyetrackingR | R | false | false | 3,690 | r | ## ----results='hide'-----------------------------------------------------------
set.seed(42)
library("Matrix")
library("lme4")
library("ggplot2")
library("eyetrackingR")
data("word_recognition")
data <- make_eyetrackingr_data(word_recognition,
participant_column = "ParticipantName",
trial_column = "Trial",
time_column = "TimeFromTrialOnset",
trackloss_column = "TrackLoss",
aoi_columns = c('Animate','Inanimate'),
treat_non_aoi_looks_as_missing = TRUE
)
# subset to response window post word-onset
response_window <- subset_by_window(data,
window_start_time = 15500,
window_end_time = 21000,
rezero = FALSE)
# analyze amount of trackloss by subjects and trials
(trackloss <- trackloss_analysis(data = response_window))
# remove trials with > 25% of trackloss
response_window_clean <- clean_by_trackloss(data = response_window, trial_prop_thresh = .25)
# create Target condition column
response_window_clean$Target <- as.factor( ifelse(test = grepl('(Spoon|Bottle)', response_window_clean$Trial),
yes = 'Inanimate',
no = 'Animate') )
## ---- warning=FALSE-----------------------------------------------------------
# recode AOIs to target & distractor
response_window_clean$TrialTarget <- ifelse(test = response_window_clean$Target == 'Animate',
yes = response_window_clean$Animate,
no = response_window_clean$Inanimate)
response_window_clean$TrialDistractor <- ifelse(test = response_window_clean$Target == 'Animate',
yes = response_window_clean$Inanimate,
no = response_window_clean$Animate)
## ---- warning=FALSE-----------------------------------------------------------
onsets <- make_onset_data(response_window_clean, onset_time = 15500, target_aoi='TrialTarget')
# participants' ability to orient to the trial target overall:
plot(onsets) + theme(legend.text=element_text(size=5))
## ---- warning=FALSE-----------------------------------------------------------
# participants' ability to orient to the trial target, split by which target:
plot(onsets, predictor_columns = "Target") + theme(legend.text=element_text(size=6))
## ---- warning=FALSE-----------------------------------------------------------
# we can also visualize numeric predictors:
plot(onsets, predictor_columns = "MCDI_Total") + theme(legend.text=element_text(size=6))
## ---- warning= FALSE----------------------------------------------------------
onset_switches <- make_switch_data(onsets, predictor_columns = "Target")
# visualize subject's switch times
plot(onset_switches, predictor_columns = c("Target"))
# center predictor:
onset_switches$FirstAOIC <- ifelse(onset_switches$FirstAOI == 'TrialTarget', .5, -.5)
onset_switches$FirstAOIC <- scale(onset_switches$FirstAOIC, center=TRUE, scale=FALSE)
onset_switches$TargetC <- ifelse(onset_switches$Target == 'Animate', .5, -.5)
onset_switches$TargetC <- scale(onset_switches$TargetC, center=TRUE, scale=FALSE)
# build model:
model_switches <- lmer(FirstSwitch ~ FirstAOIC*TargetC +
(1 | Trial) + (1 | ParticipantName), data=onset_switches, REML=FALSE)
# cleanly show important parts of model (see `summary()` for more)
broom.mixed::tidy(model_switches, effects="fixed")
drop1(model_switches,~.,test="Chi")
|
## Jinson's week 3 programming assignment for R Programming module
## of Data Science Specialization
##
## cachematrix contains 2 functions makeCacheMatrix and cacheSolve
## The purpose of these functions is to leverage on different scoping environments
## within R in order to cache time-consuming matrix inversion calculations
##
## Refer to function definition comments below for more details
##
## By: Jinson Xu
## Date: 21st September 2014
##
##
# clear workspace
rm(list=ls())
# define functions
# makeCacheMatrix takes in a matrix and populates it into a custom object that holds both the original matrix and its inverse if it has been set.
makeCacheMatrix <- function(x = matrix()) {
im <- NULL # initialize inverse matrix property, set to NULL
# Setter function for makeCacheMatrix's matrix property
# useful if we want to change the matrix in the initialized makeCacheMatrix object
set <- function(newMatrix = matrix()) {
# set the x variable in the parent environment of this function to the new matrix property
x <<- newMatrix
# set/reset the inverse matrix property to NULL, cos the matrix property is different now.
im <<- NULL
}
# Getter function for makeCacheMatrix's matrix property
get <- function() return(x)
# Setter function for makeCacheMatrix's inverse matrix property
setInverse <- function(inverseMatrix) im <<- inverseMatrix # set inverseMatrix in im property in parent environment
# Getter function for makecacheMatrix's inverse matrix property
getInverse <- function() return(im)
# define makeCacheMatrix's function name handles.
# I've also set the matrix and inverse matrix property names for illustration purposes,
# note that traditionally we access these data via getters/setters as per best practices in encapsulation
list(set = set, get = get, setInverse = setInverse, getInverse = getInverse,
data = x, inverse = im)
}
## cacheSolve returns a matrix that is the inverse of 'x'
##
cacheSolve <- function(x, ...) {
# populate the local inverseMatrix property via call to makeCacheMatrix object's getInverse function
inverseMatrix <- x$getInverse()
# check if our local inverseMatrix property is NULL.
# if not NULL, break from function by returning it
# else solve inverse of the matrix and set it within the makeCacheMatrix object
if (!is.null(inverseMatrix)) {
message('getting cached inverse matrix')
return(inverseMatrix)
} else {
matrixData <- x$get()
message('calculating inverse matrix...')
inverseMatrix <- solve(matrixData, ...)
x$setInverse(inverseMatrix)
return(inverseMatrix)
}
}
# create a sample square matrix for testing
testMatrix <- matrix(sample(1:4000000, 4000000, replace = T), 2000)
dataObject <- makeCacheMatrix(testMatrix)
# let's now solve the matrix inversion for the first time. Add timing too...
system.time({
cacheSolve(dataObject)
})
# let's try it the 2nd time!
system.time({
cacheSolve(dataObject)
}) | /cachematrix.R | no_license | jinsonxu/ProgrammingAssignment2 | R | false | false | 3,038 | r | ## Jinson's week 3 programming assignment for R Programming module
## of Data Science Specialization
##
## cachematrix contains 2 functions makeCacheMatrix and cacheSolve
## The purpose of these functions is to leverage on different scoping environments
## within R in order to cache time-consuming matrix inversion calculations
##
## Refer to function definition comments below for more details
##
## By: Jinson Xu
## Date: 21st September 2014
##
##
# clear workspace
rm(list=ls())
# define functions
# makeCacheMatrix takes in a matrix and populates it into a custom object that holds both the original matrix and its inverse if it has been set.
makeCacheMatrix <- function(x = matrix()) {
im <- NULL # initialize inverse matrix property, set to NULL
# Setter function for makeCacheMatrix's matrix property
# useful if we want to change the matrix in the initialized makeCacheMatrix object
set <- function(newMatrix = matrix()) {
# set the x variable in the parent environment of this function to the new matrix property
x <<- newMatrix
# set/reset the inverse matrix property to NULL, cos the matrix property is different now.
im <<- NULL
}
# Getter function for makeCacheMatrix's matrix property
get <- function() return(x)
# Setter function for makeCacheMatrix's inverse matrix property
setInverse <- function(inverseMatrix) im <<- inverseMatrix # set inverseMatrix in im property in parent environment
# Getter function for makecacheMatrix's inverse matrix property
getInverse <- function() return(im)
# define makeCacheMatrix's function name handles.
# I've also set the matrix and inverse matrix property names for illustration purposes,
# note that traditionally we access these data via getters/setters as per best practices in encapsulation
list(set = set, get = get, setInverse = setInverse, getInverse = getInverse,
data = x, inverse = im)
}
## cacheSolve returns a matrix that is the inverse of 'x'
##
cacheSolve <- function(x, ...) {
# populate the local inverseMatrix property via call to makeCacheMatrix object's getInverse function
inverseMatrix <- x$getInverse()
# check if our local inverseMatrix property is NULL.
# if not NULL, break from function by returning it
# else solve inverse of the matrix and set it within the makeCacheMatrix object
if (!is.null(inverseMatrix)) {
message('getting cached inverse matrix')
return(inverseMatrix)
} else {
matrixData <- x$get()
message('calculating inverse matrix...')
inverseMatrix <- solve(matrixData, ...)
x$setInverse(inverseMatrix)
return(inverseMatrix)
}
}
# create a sample square matrix for testing
testMatrix <- matrix(sample(1:4000000, 4000000, replace = T), 2000)
dataObject <- makeCacheMatrix(testMatrix)
# let's now solve the matrix inversion for the first time. Add timing too...
system.time({
cacheSolve(dataObject)
})
# let's try it the 2nd time!
system.time({
cacheSolve(dataObject)
}) |
# Data Structures in R
#control+enter when you are in the line to execute
# Vectors-----
c(2,4,6)
?c
?seq
seq(2,10,.5)
seq(by=.5, from=2,to=3)
rep(1:3,times=4)
rep(1:3,each=4)
rep(c(3,6,7,2),each=4)
rep(c(3,6,7,2), times=4)
?rep
x=1:10 #create seq of nos from 1 to 10
x
x[5]
x[seq(1,10,2)]
(x1 <- 1:20) # brackets - assign & print
(x1=1:30)
(x2=c(1,2,13,4,5))
class(x2)
?mode
(x3=c('a',"ABC"))
class(x3)
(x3=letters[1:10])
class(x3)
LETTERS[1:26]
(x3b = c('a',"Henry",4))#should not combine numeric and character
class(x3b)
(x4=c(T,FALSE,TRUE,T,F)) #logical
class(x4)
class(c(3,5))
(x5a = c(3,5.5))
class(x5a)
as.integer(x5a)
(x5=c(3L,5L, 100L))
class(x5)
x5a = c(3,5)
class(x5a)
(x5b = c(1, 'a',T, 4L))
class(x5b)
#blank variable ?
x=3.5677
trunc(x)
round(x)
floor(x)
ceiling(x)
#access elements
?seq
(x6 = seq(0,100,by=3))
seq(0,100,3)
seq(to=100,from=0,by=3)
seq(1,5,2)
?seq
#[1] 0 2 4 6 8 10
ls() #variables in my environment
x6
length(x6)
x6[1]; x6[21]
x6[1:5]
x6[10:20]
x6[ seq(1,length(x6), 2)]
x6
x6[3] # access 3rd element
#[1] 4
x6[c(2, 4)] # access 2nd and 4th element
x6[-1] # access all but 1st element
x6[-c(1:10, 15:20)]
x6[c(2, -4)] # cannot mix positive and negative integers
#Error in x[c(2, -4)] : only 0's may be mixed with negative subscripts
x6[c(2.4, 3.54)] # real numbers are truncated to integers
x6[c(2,3)]
x6[-c(1,5,20)]
x6
x6[x6 > 30]
x6[x6 > 30 & x6 < 40] # 31-39
x6[x6 != 30]
#or | and is & !
length(x6)
x6
x6[-(length(x6)-1)]
x2
(x7 = c(x6, x2))
#---- Day1------
#------
#modify
x6
set.seed(1234)
(x6 = sample(1:50))
(x6b = sort(sample(1:50)))
sort(x6)
sort(x6[-c(1,2)])
sort(x6, decreasing=T)
x6
rev(x6)
seq(-3, 10, by=.2)
x6[-c(1:12)]
x6
x6[x6> 30 & x6 < 40]
(x = -3:2)
x6
x6[2:10] <- 99; x6 # modify 2nd element
x6[x6 > 30 & x6 < 40] = 999
x6
x6
x7 = x6[1:4]; x7 # truncate x to first 4 elements
1:5
#equal partitions within a range
(x = seq(1,5, length.out = 15))
x
x = NULL
x
#NULL
x[4]
#NULL
?distribution
?rnorm
(x = rnorm(100))
plot(density(x))
abline(v=c(-3,0,3))
mean(x)
(x1 = rnorm(100, mean=50, sd=5))
plot(density(x1))
abline(v=mean(x1),h=0.04)
hist(x1, breaks=7)
hist(x1)
hist(x1, freq=F)
lines(density(x1), col=2)
summary(x1)
quantile(x1)
quantile(x1, seq(0,1,.25))
quantile(x1,c(.1, .5, .8))
quantile(x1,seq(0,1,.01))
stem(x1)
#Matrix-----
100:111
length(100:111)
matrix(1,ncol=3, nrow=4)
(m1 = matrix(100:111, nrow=4))
(m2 = matrix(100:111, ncol=3, byrow=T))
x=101:124
length(x)
matrix(x, ncol=6)
class(m1)
attributes(m1)
dim(m1)
m1
# access elements of matrix
m1[1,]
m1[,1]
m1[,1, drop=F]
m1[,-1] #remove 1st column
m1[1,2:3]
m1[c(1,3),]
m1[,-c(1,3), drop=F]
m1[m1> 105 & m1 < 108]
#names of cols and rows
m1
paste("C","D",sep="-")
paste("C",1:100,sep="-")
paste("C",1:3,sep='')
(colnames(m1) = paste('C',1:3, sep=''))
m1
(rownames(m1) = paste("R",1:4, sep=''))
m1
attributes(m1)
m1[,c('C1','C3')]
m1[,c(1,3)]
#Vector to Matrix
(m3 = 1:24)
m3
dim(m3)= c(6,4)
m3
#access elements
m2
m2[1,] #first row
m2[c(1,3,4),] #1st,3rd,4th row
m2[,1] #first col
m2[,2:3] # 2nd to 3rd coln
m2[c(1,2),c(2,3)]
m2[,]
m2[-2,] # exclude 2nd row
m2
m2[1:5] # matrix is like vector
m2
m2[c(TRUE,F,T,F),c(F, T, T)] #logical indexing
m2[m2 > 5 & m2 < 10]
m1
m1[1:2,1:2]
m1[c('R1','R2'),c('C1','C2')]
m1[1:2,]
m1[c(T,T,F,F),]
m1
#modify Vector
m2
m2[2,2]
m2[2,2] = 10
m2
m2[,2] = 10
m2
m2[m2> 107] = 9999
m2
rbind(m2, c(50,60,70))
rbind(m2,m2)
m2
cbind(m2, c(55,65,75,85))
m2m2= cbind(m2,m2)
m2m2
m2
cbind(m2,m2)
rbind(m2,m2)
#row and col wise summary
m1
colSums(m1)
rowSums(m1)
colMeans(m1)
rowMeans(m1)
t(m1) # transpose
m1
sweep(m1, MARGIN = 1, STATS = c(2,3,4,5), FUN="+" ) #rowise
sweep(m1, MARGIN = 2, STATS = c(2,3,4), FUN="*" ) #colwise
#addmargins
m1
?addmargins
addmargins(m1,margin=1,sum) #colwise function
addmargins(m1,1,sd) #colwise function
addmargins(m1,2,mean) #rowwise function
addmargins(m1,c(1,2),mean) #row & col wise function
?addmargins
(M1sum= addmargins(m1,c(1,2),list(list(mean,sum,max, min), list(var,sd, max, min)))) #row & col wise function
round(M1sum,0)
#Array-----
length(100:123)
4*3*2
#2 coys, 3 products, 4 locations sold qty
(a1 = array(100:123, dim=c(4,3,2)))
(loc = paste('loc', 1:4,sep='-'))
(product = paste('p', 1:3,sep='@'))
(coy = paste('coy', 1:2,sep='%'))
dimnames(a1) = list(loc, product, coy)
a1
apply(a1,1, sum) #locationwise
apply(a1,2, sum) #productwise
apply(a1,c(1,2), sum) #product-location wise
apply(a1,c(2,3), sum) #product-coy wise
apply(a1,c(1,3), sum) #coy-location
apply(a1,3, sum) #coywise
sum(a1) #total
#DataFrame----
#create Vectors to be combined into DF
(rollno = 1:30)
(sname = paste('student',1:30,sep=''))
(gender = sample(c('M','F'), size=30, replace=T, prob=c(.7,.3)))
(marks1 = floor(rnorm(30,mean= 50,sd=10)))
(marks2 = ceiling(rnorm(30,40,5)))
(course = sample(c('BBA','MBA'), size=30, replace=T, prob=c(.5,.5)))
rollno; sname; gender
marks1 ; marks2; course
#create DF
df1= data.frame(rollno, sname, gender, marks1, marks2, course, stringsAsFactors = F)
str(df1) #structure of DF
head(df1) #top 6 rows
head(df1,n=3) #top 3 rows
tail(df1) #last 6 rows
class(df1) # DF
summary(df1) #summary
nrow(df1)
dim(df1)
length(df1)
df1$course
df1$gender = factor(df1$gender)
df1$course = factor(df1$course)
#df1$sname = as.character(df1$sname)
str(df1)
summary(df1)
boxplot(marks1 ~ gender + course, data=df1)
df1 #full data
df1$gender # one column
head(df1[ , c(2,4)]) #multiple columns
df1[1:10 ,] #select rows, all columns
df1[1:5,1:4]
#as per conditionis
df1[ marks1 > 50 & gender=='F', c('rollno', 'sname','gender', 'marks1')]
df1[ marks1 > 50 & gender=='F', c(1,2)]
df1[ marks1 > 50 | gender=='F', ]
names(df1) # names of columns
dim(df1) #Dimensions
aggregate(df1$marks1, by=list(df1$gender), FUN=sum)
aggregate(marks1 ~ gender, data=df1, FUN=max)
aggregate(cbind(marks1, marks2) ~ gender, data=df1, FUN=max)
(df2 = aggregate(cbind(marks1,marks2) ~ gender + course, data=df1, FUN=mean))
df2
df1
#List -----
g ="My First List"
h = c(25, 26,18,39)
j = matrix(1:10,nrow=2)
k = c('one','two','three')
mylist = list(title=g, ages=h, j, h)
mylist
mylist[2]
mylist[[2]]
mylist[['ages']]
mylist$ages
#Factor -----
(grades = sample(c('A','B','C','D'), size=30, replace=T, prob=c(.3,.2,.4,.1)))
summary(grades)
table(grades)
(gradesFactor = factor(grades))
summary(gradesFactor)
(gradesFactorOrdered = factor(grades, ordered=T))
summary(gradesFactorOrdered)
(gradesFactorOrderedLevels = factor(grades, ordered=T, levels=c('D','C','B','A')))
summary(gradesFactorOrderedLevels)
gradesFactor
gradesFactorOrdered
gradesFactorOrderedLevels
pie(c(10,15,17))
pie(summary(gradesFactorOrderedLevels))
barplot(summary(gradesFactorOrderedLevels), col=1:4)
class(grades)
class(gradesFactorOrdered)
class(gradesFactorOrderedLevels)
# Object Properties
#vector
v1= 1:100
class(v1) ; typeof(v1)
v2=letters[1:10]
class(v2) ; typeof(v2)
length(v2)
summary(v1)
#matrix
m1= matrix(1:24,nrow=6)
class(m1)
summary(m1)
dim(m1)
str(m1)
#Array
a1 =array(1:24, dim=c(4,3,2))
class(a1)
str(a1)
dim(a1)
summary(a1)
#DF
#data() #built in datasets
df1= iris
str(df1)
summary(df1)
class(df1); dim(df1)
nrow(df1) ; names(df1) ;NROW(df1)
colnames(df1)
rownames(df1)
#list
list1 = list(v1,m1,a1,df1)
str(list1)
#Statistical Description
library(Hmisc)
describe(df1)
#Next Topics
x= c(123.2234, 33333.544, 43243.8442)
floor(x)
ceiling(x)
trunc(x)
round(x,-2)
round(x, digits = 5)
| /11b2-DS1.R | no_license | shummy-herenz/ranalytics | R | false | false | 7,456 | r | # Data Structures in R
#control+enter when you are in the line to execute
# Vectors-----
c(2,4,6)
?c
?seq
seq(2,10,.5)
seq(by=.5, from=2,to=3)
rep(1:3,times=4)
rep(1:3,each=4)
rep(c(3,6,7,2),each=4)
rep(c(3,6,7,2), times=4)
?rep
x=1:10 #create seq of nos from 1 to 10
x
x[5]
x[seq(1,10,2)]
(x1 <- 1:20) # brackets - assign & print
(x1=1:30)
(x2=c(1,2,13,4,5))
class(x2)
?mode
(x3=c('a',"ABC"))
class(x3)
(x3=letters[1:10])
class(x3)
LETTERS[1:26]
(x3b = c('a',"Henry",4))#should not combine numeric and character
class(x3b)
(x4=c(T,FALSE,TRUE,T,F)) #logical
class(x4)
class(c(3,5))
(x5a = c(3,5.5))
class(x5a)
as.integer(x5a)
(x5=c(3L,5L, 100L))
class(x5)
x5a = c(3,5)
class(x5a)
(x5b = c(1, 'a',T, 4L))
class(x5b)
#blank variable ?
x=3.5677
trunc(x)
round(x)
floor(x)
ceiling(x)
#access elements
?seq
(x6 = seq(0,100,by=3))
seq(0,100,3)
seq(to=100,from=0,by=3)
seq(1,5,2)
?seq
#[1] 0 2 4 6 8 10
ls() #variables in my environment
x6
length(x6)
x6[1]; x6[21]
x6[1:5]
x6[10:20]
x6[ seq(1,length(x6), 2)]
x6
x6[3] # access 3rd element
#[1] 4
x6[c(2, 4)] # access 2nd and 4th element
x6[-1] # access all but 1st element
x6[-c(1:10, 15:20)]
x6[c(2, -4)] # cannot mix positive and negative integers
#Error in x[c(2, -4)] : only 0's may be mixed with negative subscripts
x6[c(2.4, 3.54)] # real numbers are truncated to integers
x6[c(2,3)]
x6[-c(1,5,20)]
x6
x6[x6 > 30]
x6[x6 > 30 & x6 < 40] # 31-39
x6[x6 != 30]
#or | and is & !
length(x6)
x6
x6[-(length(x6)-1)]
x2
(x7 = c(x6, x2))
#---- Day1------
#------
#modify
x6
set.seed(1234)
(x6 = sample(1:50))
(x6b = sort(sample(1:50)))
sort(x6)
sort(x6[-c(1,2)])
sort(x6, decreasing=T)
x6
rev(x6)
seq(-3, 10, by=.2)
x6[-c(1:12)]
x6
x6[x6> 30 & x6 < 40]
(x = -3:2)
x6
x6[2:10] <- 99; x6 # modify 2nd element
x6[x6 > 30 & x6 < 40] = 999
x6
x6
x7 = x6[1:4]; x7 # truncate x to first 4 elements
1:5
#equal partitions within a range
(x = seq(1,5, length.out = 15))
x
x = NULL
x
#NULL
x[4]
#NULL
?distribution
?rnorm
(x = rnorm(100))
plot(density(x))
abline(v=c(-3,0,3))
mean(x)
(x1 = rnorm(100, mean=50, sd=5))
plot(density(x1))
abline(v=mean(x1),h=0.04)
hist(x1, breaks=7)
hist(x1)
hist(x1, freq=F)
lines(density(x1), col=2)
summary(x1)
quantile(x1)
quantile(x1, seq(0,1,.25))
quantile(x1,c(.1, .5, .8))
quantile(x1,seq(0,1,.01))
stem(x1)
#Matrix-----
100:111
length(100:111)
matrix(1,ncol=3, nrow=4)
(m1 = matrix(100:111, nrow=4))
(m2 = matrix(100:111, ncol=3, byrow=T))
x=101:124
length(x)
matrix(x, ncol=6)
class(m1)
attributes(m1)
dim(m1)
m1
# access elements of matrix
m1[1,]
m1[,1]
m1[,1, drop=F]
m1[,-1] #remove 1st column
m1[1,2:3]
m1[c(1,3),]
m1[,-c(1,3), drop=F]
m1[m1> 105 & m1 < 108]
#names of cols and rows
m1
paste("C","D",sep="-")
paste("C",1:100,sep="-")
paste("C",1:3,sep='')
(colnames(m1) = paste('C',1:3, sep=''))
m1
(rownames(m1) = paste("R",1:4, sep=''))
m1
attributes(m1)
m1[,c('C1','C3')]
m1[,c(1,3)]
#Vector to Matrix
(m3 = 1:24)
m3
dim(m3)= c(6,4)
m3
#access elements
m2
m2[1,] #first row
m2[c(1,3,4),] #1st,3rd,4th row
m2[,1] #first col
m2[,2:3] # 2nd to 3rd coln
m2[c(1,2),c(2,3)]
m2[,]
m2[-2,] # exclude 2nd row
m2
m2[1:5] # matrix is like vector
m2
m2[c(TRUE,F,T,F),c(F, T, T)] #logical indexing
m2[m2 > 5 & m2 < 10]
m1
m1[1:2,1:2]
m1[c('R1','R2'),c('C1','C2')]
m1[1:2,]
m1[c(T,T,F,F),]
m1
#modify Vector
m2
m2[2,2]
m2[2,2] = 10
m2
m2[,2] = 10
m2
m2[m2> 107] = 9999
m2
rbind(m2, c(50,60,70))
rbind(m2,m2)
m2
cbind(m2, c(55,65,75,85))
m2m2= cbind(m2,m2)
m2m2
m2
cbind(m2,m2)
rbind(m2,m2)
#row and col wise summary
m1
colSums(m1)
rowSums(m1)
colMeans(m1)
rowMeans(m1)
t(m1) # transpose
m1
sweep(m1, MARGIN = 1, STATS = c(2,3,4,5), FUN="+" ) #rowise
sweep(m1, MARGIN = 2, STATS = c(2,3,4), FUN="*" ) #colwise
#addmargins
m1
?addmargins
addmargins(m1,margin=1,sum) #colwise function
addmargins(m1,1,sd) #colwise function
addmargins(m1,2,mean) #rowwise function
addmargins(m1,c(1,2),mean) #row & col wise function
?addmargins
(M1sum= addmargins(m1,c(1,2),list(list(mean,sum,max, min), list(var,sd, max, min)))) #row & col wise function
round(M1sum,0)
#Array-----
length(100:123)
4*3*2
#2 coys, 3 products, 4 locations sold qty
(a1 = array(100:123, dim=c(4,3,2)))
(loc = paste('loc', 1:4,sep='-'))
(product = paste('p', 1:3,sep='@'))
(coy = paste('coy', 1:2,sep='%'))
dimnames(a1) = list(loc, product, coy)
a1
apply(a1,1, sum) #locationwise
apply(a1,2, sum) #productwise
apply(a1,c(1,2), sum) #product-location wise
apply(a1,c(2,3), sum) #product-coy wise
apply(a1,c(1,3), sum) #coy-location
apply(a1,3, sum) #coywise
sum(a1) #total
#DataFrame----
#create Vectors to be combined into DF
(rollno = 1:30)
(sname = paste('student',1:30,sep=''))
(gender = sample(c('M','F'), size=30, replace=T, prob=c(.7,.3)))
(marks1 = floor(rnorm(30,mean= 50,sd=10)))
(marks2 = ceiling(rnorm(30,40,5)))
(course = sample(c('BBA','MBA'), size=30, replace=T, prob=c(.5,.5)))
rollno; sname; gender
marks1 ; marks2; course
#create DF
df1= data.frame(rollno, sname, gender, marks1, marks2, course, stringsAsFactors = F)
str(df1) #structure of DF
head(df1) #top 6 rows
head(df1,n=3) #top 3 rows
tail(df1) #last 6 rows
class(df1) # DF
summary(df1) #summary
nrow(df1)
dim(df1)
length(df1)
df1$course
df1$gender = factor(df1$gender)
df1$course = factor(df1$course)
#df1$sname = as.character(df1$sname)
str(df1)
summary(df1)
boxplot(marks1 ~ gender + course, data=df1)
df1 #full data
df1$gender # one column
head(df1[ , c(2,4)]) #multiple columns
df1[1:10 ,] #select rows, all columns
df1[1:5,1:4]
#as per conditionis
df1[ marks1 > 50 & gender=='F', c('rollno', 'sname','gender', 'marks1')]
df1[ marks1 > 50 & gender=='F', c(1,2)]
df1[ marks1 > 50 | gender=='F', ]
names(df1) # names of columns
dim(df1) #Dimensions
aggregate(df1$marks1, by=list(df1$gender), FUN=sum)
aggregate(marks1 ~ gender, data=df1, FUN=max)
aggregate(cbind(marks1, marks2) ~ gender, data=df1, FUN=max)
(df2 = aggregate(cbind(marks1,marks2) ~ gender + course, data=df1, FUN=mean))
df2
df1
#List -----
g ="My First List"
h = c(25, 26,18,39)
j = matrix(1:10,nrow=2)
k = c('one','two','three')
mylist = list(title=g, ages=h, j, h)
mylist
mylist[2]
mylist[[2]]
mylist[['ages']]
mylist$ages
#Factor -----
(grades = sample(c('A','B','C','D'), size=30, replace=T, prob=c(.3,.2,.4,.1)))
summary(grades)
table(grades)
(gradesFactor = factor(grades))
summary(gradesFactor)
(gradesFactorOrdered = factor(grades, ordered=T))
summary(gradesFactorOrdered)
(gradesFactorOrderedLevels = factor(grades, ordered=T, levels=c('D','C','B','A')))
summary(gradesFactorOrderedLevels)
gradesFactor
gradesFactorOrdered
gradesFactorOrderedLevels
pie(c(10,15,17))
pie(summary(gradesFactorOrderedLevels))
barplot(summary(gradesFactorOrderedLevels), col=1:4)
class(grades)
class(gradesFactorOrdered)
class(gradesFactorOrderedLevels)
# Object Properties
#vector
v1= 1:100
class(v1) ; typeof(v1)
v2=letters[1:10]
class(v2) ; typeof(v2)
length(v2)
summary(v1)
#matrix
m1= matrix(1:24,nrow=6)
class(m1)
summary(m1)
dim(m1)
str(m1)
#Array
a1 =array(1:24, dim=c(4,3,2))
class(a1)
str(a1)
dim(a1)
summary(a1)
#DF
#data() #built in datasets
df1= iris
str(df1)
summary(df1)
class(df1); dim(df1)
nrow(df1) ; names(df1) ;NROW(df1)
colnames(df1)
rownames(df1)
#list
list1 = list(v1,m1,a1,df1)
str(list1)
#Statistical Description
library(Hmisc)
describe(df1)
#Next Topics
x= c(123.2234, 33333.544, 43243.8442)
floor(x)
ceiling(x)
trunc(x)
round(x,-2)
round(x, digits = 5)
|
library(plyr)
library(dplyr)
library(data.table)
library(Stack)
test = read.csv('/Train-Test Splits/Context/test.csv', header = TRUE)
#Order LEs by 'user_id'
setDT(tes)[,freq := .N, by = "user_id"]
test = test[order(freq, decreasing = T),]
#Get the unique user_ids and their frequencies
unique_user_id = with(test,aggregate(freq ~ user_id,FUN=function(x){unique(x)}))
frequen = unique_user_id$freq
frequen = sort(frequen, decreasing = TRUE)
user = unique(test$user_id)
#Positive LEs are given a rating 1
test$rating=1
test$freq = NULL
#Creating a test set with one temporary positive LE to start with
temp = test[1,]
temp$lang = as.character(temp$lang)
temp$hashtag = as.character(temp$hashtag)
temp$tweet_lang = as.character(temp$tweet_lang)
for (i in 1:length(user))
{
#Get LEs of the particular user
lis = filter( test, test$user_id ==user[i])
#Creating 9 negative samples for each positive sample of the 'user_id'
notlis = do.call("rbind", replicate(9, lis, simplify = FALSE))
#Get vector of the languages that the user hasn't used
notlang = setdiff(test$lang, lis$lang)
notlang = rep(notlang,length.out=nrow(notlis))
#Get vector of the hashtags that the user hasn't used
nothash = setdiff(test$hashtag, lis$hashtag)
nothash = rep(nothash,length.out=nrow(notlis))
notlis$lang = notlang
notlis$hashtag = nothash
notlis$tweet_lang = notlang
#Negative LEs are given a rating 0
notlis$rating = 0
#Stacking the negative samples for each user
temp = Stack(temp, notlis)
print(i)
}
#Discarding the temporary LE that was used at the beginning of creating the test set
temp = temp[2:nrow(temp),]
#Merging the positive and negative LEs to create the final test set
test_all = Stack(test, temp)
#Writing the final test set (to be input to FM) to file
write.table(test_all, 'test_final_POP_USER.txt', quote = FALSE, col.names= FALSE, row.names = FALSE, sep = '\t') | /Context_POP_USER/test.r | no_license | asmitapoddar/nowplaying-RS-Music-Reco-FM | R | false | false | 1,953 | r | library(plyr)
library(dplyr)
library(data.table)
library(Stack)
test = read.csv('/Train-Test Splits/Context/test.csv', header = TRUE)
#Order LEs by 'user_id'
setDT(tes)[,freq := .N, by = "user_id"]
test = test[order(freq, decreasing = T),]
#Get the unique user_ids and their frequencies
unique_user_id = with(test,aggregate(freq ~ user_id,FUN=function(x){unique(x)}))
frequen = unique_user_id$freq
frequen = sort(frequen, decreasing = TRUE)
user = unique(test$user_id)
#Positive LEs are given a rating 1
test$rating=1
test$freq = NULL
#Creating a test set with one temporary positive LE to start with
temp = test[1,]
temp$lang = as.character(temp$lang)
temp$hashtag = as.character(temp$hashtag)
temp$tweet_lang = as.character(temp$tweet_lang)
for (i in 1:length(user))
{
#Get LEs of the particular user
lis = filter( test, test$user_id ==user[i])
#Creating 9 negative samples for each positive sample of the 'user_id'
notlis = do.call("rbind", replicate(9, lis, simplify = FALSE))
#Get vector of the languages that the user hasn't used
notlang = setdiff(test$lang, lis$lang)
notlang = rep(notlang,length.out=nrow(notlis))
#Get vector of the hashtags that the user hasn't used
nothash = setdiff(test$hashtag, lis$hashtag)
nothash = rep(nothash,length.out=nrow(notlis))
notlis$lang = notlang
notlis$hashtag = nothash
notlis$tweet_lang = notlang
#Negative LEs are given a rating 0
notlis$rating = 0
#Stacking the negative samples for each user
temp = Stack(temp, notlis)
print(i)
}
#Discarding the temporary LE that was used at the beginning of creating the test set
temp = temp[2:nrow(temp),]
#Merging the positive and negative LEs to create the final test set
test_all = Stack(test, temp)
#Writing the final test set (to be input to FM) to file
write.table(test_all, 'test_final_POP_USER.txt', quote = FALSE, col.names= FALSE, row.names = FALSE, sep = '\t') |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/prepare.R
\name{print_dat}
\alias{print_dat}
\title{Internal Function: Print a data frame with caption/note}
\usage{
print_dat(x, caption = NULL, note = NULL, digits = 1,
big.mark = ",")
}
\arguments{
\item{x}{data frame: data frame contents to print}
\item{caption}{character: Optional caption to print}
\item{note}{character: Optional note(s) to print.
for multiple lines of notes}
\item{digits}{number of digits for rounding}
\item{big.mark}{character: separator between 1000s}
}
\description{
Intended for showing tables with titles & notes in logged output in doc/
}
\examples{
x <- data.frame(yr = c(2005, 2006), cust = c(100000, 131000),
sales = c(567891, 673568), churn = c(NA, 25.23), char = c("test", NA))
print_dat(x)
print_dat(x, "Customer Sales by Year")
print_dat(x, "Customer Sales by Year", "A note!")
print_dat(x, "Customer Sales by Year", big.mark = "")
print_dat(x, "Customer Sales by Year", digits = 0)
}
\seealso{
Other internal helper functions: \code{\link{calc_churn}},
\code{\link{format_num}}, \code{\link{pct_round}}
}
\concept{internal helper functions}
\keyword{internal}
| /man/print_dat.Rd | permissive | southwick-associates/salic | R | false | true | 1,191 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/prepare.R
\name{print_dat}
\alias{print_dat}
\title{Internal Function: Print a data frame with caption/note}
\usage{
print_dat(x, caption = NULL, note = NULL, digits = 1,
big.mark = ",")
}
\arguments{
\item{x}{data frame: data frame contents to print}
\item{caption}{character: Optional caption to print}
\item{note}{character: Optional note(s) to print.
for multiple lines of notes}
\item{digits}{number of digits for rounding}
\item{big.mark}{character: separator between 1000s}
}
\description{
Intended for showing tables with titles & notes in logged output in doc/
}
\examples{
x <- data.frame(yr = c(2005, 2006), cust = c(100000, 131000),
sales = c(567891, 673568), churn = c(NA, 25.23), char = c("test", NA))
print_dat(x)
print_dat(x, "Customer Sales by Year")
print_dat(x, "Customer Sales by Year", "A note!")
print_dat(x, "Customer Sales by Year", big.mark = "")
print_dat(x, "Customer Sales by Year", digits = 0)
}
\seealso{
Other internal helper functions: \code{\link{calc_churn}},
\code{\link{format_num}}, \code{\link{pct_round}}
}
\concept{internal helper functions}
\keyword{internal}
|
plot4 <- function() {
# specify output file
png(file = "plot4.png")
#Read raw data from a txt file
# specify: keep header and define delimiter
rawData <- read.table("household_power_consumption.txt", header=TRUE, sep=";", stringsAsFactors=FALSE, na.strings="?")
# tranform a column in a raw dataset and assign it
powerconsumption <- transform(rawData,Date=as.Date(rawData$Date,format="%d/%m/%Y"))
# get data only for two days
powerconsumptionSubset <- powerconsumption[powerconsumption$Date=="2007-2-1" | powerconsumption$Date=="2007-2-2", ]
# merge data and time
powerconsumptionSubset$DateTime <- strptime(paste(powerconsumptionSubset$Date,powerconsumptionSubset$Time,sep=":"),format="%Y-%m-%d:%H:%M:%S")
# define the grid for plotting ( 2 rows and 2 columns)
par(mfrow = c(2,2))
# make a plot using time series for Global Active Power
with(powerconsumptionSubset, plot(powerconsumptionSubset$DateTime,powerconsumptionSubset$Global_active_power,type="l",xlab="",ylab="Global Active Power"))
# If set to TRUE, the next high-level plotting command (actually plot.new) should not clean the frame before drawing as if it were on a new device.
# make a plot using time series for Voltage
with(powerconsumptionSubset, plot(powerconsumptionSubset$DateTime,powerconsumptionSubset$Voltage,type="l",xlab="datetime",ylab="Voltage"))
# make a plot using time series for sub_metering 1
with(powerconsumptionSubset, plot(powerconsumptionSubset$DateTime,powerconsumptionSubset$Sub_metering_1,type="l",xlab="",ylab="Energy sub metering", col = "black", ylim = c(0,38)))
# If set to TRUE, the next high-level plotting command (actually plot.new) should not clean the frame before drawing as if it were on a new device.
par(new=TRUE)
# make a plot using time series for sub_metering 2
with(powerconsumptionSubset, plot(powerconsumptionSubset$DateTime,powerconsumptionSubset$Sub_metering_2,type="l",xlab="",ylab="Energy sub metering", col = "red", ylim = c(0,38)))
# If set to TRUE, the next high-level plotting command (actually plot.new) should not clean the frame before drawing as if it were on a new device.
par(new=TRUE)
# make a plot using time series for sub_metering 3
with(powerconsumptionSubset, plot(powerconsumptionSubset$DateTime,powerconsumptionSubset$Sub_metering_3,type="l",xlab="",ylab="Energy sub metering", col = "blue", ylim = c(0,38)))
#annotate with a legend
legend("topright",col=c("black","red","blue"),lty=c(1,1,1),lwd=c(1,1,1),legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),bty="n")
# make a plot using time series for Voltage
# note: need to hide border around the legend
with(powerconsumptionSubset, plot(powerconsumptionSubset$DateTime,powerconsumptionSubset$Global_reactive_power,type="l",xlab="datetime",ylab="Global_reactive_power",lwd=0.005))
# write png
dev.off()
} | /plot4.R | no_license | DigitalSocrates/ExData_Plotting1 | R | false | false | 2,884 | r | plot4 <- function() {
# specify output file
png(file = "plot4.png")
#Read raw data from a txt file
# specify: keep header and define delimiter
rawData <- read.table("household_power_consumption.txt", header=TRUE, sep=";", stringsAsFactors=FALSE, na.strings="?")
# tranform a column in a raw dataset and assign it
powerconsumption <- transform(rawData,Date=as.Date(rawData$Date,format="%d/%m/%Y"))
# get data only for two days
powerconsumptionSubset <- powerconsumption[powerconsumption$Date=="2007-2-1" | powerconsumption$Date=="2007-2-2", ]
# merge data and time
powerconsumptionSubset$DateTime <- strptime(paste(powerconsumptionSubset$Date,powerconsumptionSubset$Time,sep=":"),format="%Y-%m-%d:%H:%M:%S")
# define the grid for plotting ( 2 rows and 2 columns)
par(mfrow = c(2,2))
# make a plot using time series for Global Active Power
with(powerconsumptionSubset, plot(powerconsumptionSubset$DateTime,powerconsumptionSubset$Global_active_power,type="l",xlab="",ylab="Global Active Power"))
# If set to TRUE, the next high-level plotting command (actually plot.new) should not clean the frame before drawing as if it were on a new device.
# make a plot using time series for Voltage
with(powerconsumptionSubset, plot(powerconsumptionSubset$DateTime,powerconsumptionSubset$Voltage,type="l",xlab="datetime",ylab="Voltage"))
# make a plot using time series for sub_metering 1
with(powerconsumptionSubset, plot(powerconsumptionSubset$DateTime,powerconsumptionSubset$Sub_metering_1,type="l",xlab="",ylab="Energy sub metering", col = "black", ylim = c(0,38)))
# If set to TRUE, the next high-level plotting command (actually plot.new) should not clean the frame before drawing as if it were on a new device.
par(new=TRUE)
# make a plot using time series for sub_metering 2
with(powerconsumptionSubset, plot(powerconsumptionSubset$DateTime,powerconsumptionSubset$Sub_metering_2,type="l",xlab="",ylab="Energy sub metering", col = "red", ylim = c(0,38)))
# If set to TRUE, the next high-level plotting command (actually plot.new) should not clean the frame before drawing as if it were on a new device.
par(new=TRUE)
# make a plot using time series for sub_metering 3
with(powerconsumptionSubset, plot(powerconsumptionSubset$DateTime,powerconsumptionSubset$Sub_metering_3,type="l",xlab="",ylab="Energy sub metering", col = "blue", ylim = c(0,38)))
#annotate with a legend
legend("topright",col=c("black","red","blue"),lty=c(1,1,1),lwd=c(1,1,1),legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),bty="n")
# make a plot using time series for Voltage
# note: need to hide border around the legend
with(powerconsumptionSubset, plot(powerconsumptionSubset$DateTime,powerconsumptionSubset$Global_reactive_power,type="l",xlab="datetime",ylab="Global_reactive_power",lwd=0.005))
# write png
dev.off()
} |
testlist <- list(AgeVector = c(-4.73074171454048e-167, 2.2262381097027e-76, -9.12990429452974e-204, 5.97087417427845e-79, 4.7390525269307e-300, 6.58361441690132e-121, 3.58611068565168e-154, -2.94504776827523e-186, 2.62380314702636e-116, -6.78950518864266e+23, 6.99695749856012e-167, 86485.676793021, 1.11271562183704e+230, 1.94114173595984e-186, 1.44833381226225e-178, -6.75217876587581e-69, 1.17166524186752e-15, -4.66902120197297e-64, -1.96807327384856e+304, 4.43806122192455e-53, 9.29588680224717e-276, -6.49633240047463e-239, -1.22140819059424e-138, 5.03155164774999e-80, -6.36956558303921e-38, 7.15714506860012e-155, -1.05546603899445e-274, -3.66720914317747e-169, -6.94681701552128e+38, 2.93126040859825e-33, 2.03804078100055e-84, 3.62794352816579e+190, 3.84224576683191e+202, 2.90661893502594e+44, -5.43046915655589e-132, -1.22315376742253e-152), ExpressionMatrix = structure(c(4.80597147865938e+96, 6.97343932706536e+155, 1.3267342810479e+281, 1.34663897260867e+171, 1.76430141680543e+158, 1.20021255064002e-241, 1.72046093489436e+274, 4.64807629890539e-66, 3.23566990107388e-38, 3.70896378162114e-42, 1.09474740380531e+92, 7.49155705745727e-308, 3.26639180474928e+224, 3.21841801500177e-79, 4.26435540037564e-295, 1.40002857639358e+82, 47573397570345336, 2.00517157311369e-187, 2.74035572944044e+70, 2.89262435086883e-308, 6.65942057982148e-198, 1.10979548758712e-208, 1.40208057226312e-220, 6.25978904299555e-111, 1.06191688875218e+167, 1.1857452172049, 7.01135380962132e-157, 4.49610615342627e-308, 8.04053421408348e+261, 6.23220855980985e+275, 1.91601752509744e+141, 2.27737212344351e-244, 1.6315101795754e+126, 3.83196182917788e+160, 1.53445011275161e-192), .Dim = c(5L, 7L)), permutations = 415362983L)
result <- do.call(myTAI:::cpp_bootMatrix,testlist)
str(result) | /myTAI/inst/testfiles/cpp_bootMatrix/AFL_cpp_bootMatrix/cpp_bootMatrix_valgrind_files/1615765549-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 1,803 | r | testlist <- list(AgeVector = c(-4.73074171454048e-167, 2.2262381097027e-76, -9.12990429452974e-204, 5.97087417427845e-79, 4.7390525269307e-300, 6.58361441690132e-121, 3.58611068565168e-154, -2.94504776827523e-186, 2.62380314702636e-116, -6.78950518864266e+23, 6.99695749856012e-167, 86485.676793021, 1.11271562183704e+230, 1.94114173595984e-186, 1.44833381226225e-178, -6.75217876587581e-69, 1.17166524186752e-15, -4.66902120197297e-64, -1.96807327384856e+304, 4.43806122192455e-53, 9.29588680224717e-276, -6.49633240047463e-239, -1.22140819059424e-138, 5.03155164774999e-80, -6.36956558303921e-38, 7.15714506860012e-155, -1.05546603899445e-274, -3.66720914317747e-169, -6.94681701552128e+38, 2.93126040859825e-33, 2.03804078100055e-84, 3.62794352816579e+190, 3.84224576683191e+202, 2.90661893502594e+44, -5.43046915655589e-132, -1.22315376742253e-152), ExpressionMatrix = structure(c(4.80597147865938e+96, 6.97343932706536e+155, 1.3267342810479e+281, 1.34663897260867e+171, 1.76430141680543e+158, 1.20021255064002e-241, 1.72046093489436e+274, 4.64807629890539e-66, 3.23566990107388e-38, 3.70896378162114e-42, 1.09474740380531e+92, 7.49155705745727e-308, 3.26639180474928e+224, 3.21841801500177e-79, 4.26435540037564e-295, 1.40002857639358e+82, 47573397570345336, 2.00517157311369e-187, 2.74035572944044e+70, 2.89262435086883e-308, 6.65942057982148e-198, 1.10979548758712e-208, 1.40208057226312e-220, 6.25978904299555e-111, 1.06191688875218e+167, 1.1857452172049, 7.01135380962132e-157, 4.49610615342627e-308, 8.04053421408348e+261, 6.23220855980985e+275, 1.91601752509744e+141, 2.27737212344351e-244, 1.6315101795754e+126, 3.83196182917788e+160, 1.53445011275161e-192), .Dim = c(5L, 7L)), permutations = 415362983L)
result <- do.call(myTAI:::cpp_bootMatrix,testlist)
str(result) |
#annuitas awal dan akhir
annuitas<- function (num,k,i,n )
switch (num,
satu= {
v= 1/(1+i)
anAwal= k*(1-v^n)/(i*v)
snAwal= k*((1+i)^n-1)/(i*v)
print(anAwal)
print(snAwal)
},
dua= {
v= 1/(1+i)
anAkhir= k*(1-v^n)/i
snAkhir= k*((1+i)^n-1)/i
print(anAkhir)
print(snAkhir)
}
) | /annuitass.R | no_license | dindaseftiyani/Pengantar-Statistika-Keuangan | R | false | false | 467 | r | #annuitas awal dan akhir
annuitas<- function (num,k,i,n )
switch (num,
satu= {
v= 1/(1+i)
anAwal= k*(1-v^n)/(i*v)
snAwal= k*((1+i)^n-1)/(i*v)
print(anAwal)
print(snAwal)
},
dua= {
v= 1/(1+i)
anAkhir= k*(1-v^n)/i
snAkhir= k*((1+i)^n-1)/i
print(anAkhir)
print(snAkhir)
}
) |
#http://www.gastonsanchez.com/visually-enforced/how-to/2014/01/15/Center-data-in-R/
set.seed(212)
Data = matrix(rnorm(60), 30, 2)
Data <- cbind(geocoded$lat, geocoded$long)
Data
View(Data)
Data <- as.data.frame(Data)
Data$V1 <- as.numeric(as.character(Data$V1))
Data$V2 <- as.numeric(as.character(Data$V2))
#############################
center_scale <- function(x) {
scale(x, scale = FALSE)
}
# apply it
center_scale(Data)
#############################
center_apply <- function(x) {
apply(x, 2, function(y) y - mean(y))
}
# apply it
center_apply(Data)
############################
# center with 'sweep()'
center_sweep <- function(x, row.w = rep(1, nrow(x))/nrow(x)) {
get_average <- function(v) sum(v * row.w)/sum(row.w)
average <- apply(x, 2, get_average)
sweep(x, 2, average)
}
# apply it
center_sweep(Data)
############################################
# RECOMENDADO
############################################
## center with 'colMeans()'
center_colmeans <- function(x) {
xcenter = colMeans(x)
x - rep(xcenter, rep.int(nrow(x), ncol(x)))
}
# apply it
center_colmeans(Data)
####################################
# center matrix operator
center_operator <- function(x) {
n = nrow(x)
ones = rep(1, n)
H = diag(n) - (1/n) * (ones %*% t(ones))
H %*% x
}
# apply it
center_operator(Data)
# mean subtraction
center_mean <- function(x) {
ones = rep(1, nrow(x))
x_mean = ones %*% t(colMeans(x))
x - x_mean
}
# apply it
center_mean(Data)
| /encontrar_ponto_central_coordeadas.R | permissive | fagnersutel/mapas | R | false | false | 1,564 | r | #http://www.gastonsanchez.com/visually-enforced/how-to/2014/01/15/Center-data-in-R/
set.seed(212)
Data = matrix(rnorm(60), 30, 2)
Data <- cbind(geocoded$lat, geocoded$long)
Data
View(Data)
Data <- as.data.frame(Data)
Data$V1 <- as.numeric(as.character(Data$V1))
Data$V2 <- as.numeric(as.character(Data$V2))
#############################
center_scale <- function(x) {
scale(x, scale = FALSE)
}
# apply it
center_scale(Data)
#############################
center_apply <- function(x) {
apply(x, 2, function(y) y - mean(y))
}
# apply it
center_apply(Data)
############################
# center with 'sweep()'
center_sweep <- function(x, row.w = rep(1, nrow(x))/nrow(x)) {
get_average <- function(v) sum(v * row.w)/sum(row.w)
average <- apply(x, 2, get_average)
sweep(x, 2, average)
}
# apply it
center_sweep(Data)
############################################
# RECOMENDADO
############################################
## center with 'colMeans()'
center_colmeans <- function(x) {
xcenter = colMeans(x)
x - rep(xcenter, rep.int(nrow(x), ncol(x)))
}
# apply it
center_colmeans(Data)
####################################
# center matrix operator
center_operator <- function(x) {
n = nrow(x)
ones = rep(1, n)
H = diag(n) - (1/n) * (ones %*% t(ones))
H %*% x
}
# apply it
center_operator(Data)
# mean subtraction
center_mean <- function(x) {
ones = rep(1, nrow(x))
x_mean = ones %*% t(colMeans(x))
x - x_mean
}
# apply it
center_mean(Data)
|
# plot 2
datetime <- strptime(paste(dataSub$Date, dataSub$Time), "%Y-%m-%d %H:%M:%S")
if(.Platform$OS.type == 'unix')
{dev.copy(png, file = "plot2.png", bg = 'transparent')}
op <- par(bg = "transparent")
with(dataSub, plot(datetime, Global_active_power, type='l',
xlab = '', ylab = 'Global Active Power (kilowatts)'))
par(op)
if(.Platform$OS.type == 'windows')
{dev.copy(png, file = "plot2.png", bg = 'transparent')}
dev.off() | /plot2.R | no_license | zge/ExData_Plotting1 | R | false | false | 436 | r | # plot 2
datetime <- strptime(paste(dataSub$Date, dataSub$Time), "%Y-%m-%d %H:%M:%S")
if(.Platform$OS.type == 'unix')
{dev.copy(png, file = "plot2.png", bg = 'transparent')}
op <- par(bg = "transparent")
with(dataSub, plot(datetime, Global_active_power, type='l',
xlab = '', ylab = 'Global Active Power (kilowatts)'))
par(op)
if(.Platform$OS.type == 'windows')
{dev.copy(png, file = "plot2.png", bg = 'transparent')}
dev.off() |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rapt_extend.R
\name{G3multi}
\alias{G3multi}
\title{Marked Nearest Neighbour Distance Function}
\usage{
G3multi(
X,
I,
J,
rmax = NULL,
nrval = 128,
disjoint = NULL,
correction = c("rs", "km", "han")
)
}
\arguments{
\item{X}{The observed point pattern, from which an estimate of the multitype
distance distribution function \eqn{G[3IJ](r)} will be computed. It must be
a marked point pattern. See Details.}
\item{I}{Subset of points of \code{X} from which distances are measured.}
\item{J}{Subset of points in \code{X} to which distances are measured.}
\item{rmax}{Optional. Maximum value of argument \emph{r} for which \eqn{G[3IJ](r)}
will be estimated.}
\item{nrval}{Optional. Number of values of \emph{r} for which \eqn{G3IJ(r)} will
be estimated. A large value of \code{nrval} is required to avoid discretisation
effects.}
\item{disjoint}{Optional flag indicating whether the subsets \code{I} and \code{J} are
disjoint. If missing, this value will be computed by inspecting the vectors
\code{I} and \code{J}.}
\item{correction}{Optional. Character string specifying the edge
correction(s) to be used. Options are \code{"none"}, \code{"rs"}, \code{"km"},
\code{"hanisch"}, and \code{"best"}. Alternatively \code{correction="all"} selects all
options.}
}
\description{
For a marked point pattern, estimate the distribution of the distance from a
typical point in subset \code{I} to the nearest point of subset \code{J}.
}
\details{
The function \code{G3multi} generalises \code{\link[spatstat]{G3est}} (for
unmarked point patterns) and \code{G3dot} (unimplmented) and
\code{\link{G3cross}} (for multitype point patterns) to arbitrary marked
point patterns.
}
\seealso{
\code{\link{G3cross}}, \code{\link[spatstat]{G3est}}
Other spatstat extensions:
\code{\link{G3cross}()},
\code{\link{Tstat.pp3}()},
\code{\link{bdist.points}()},
\code{\link{marktable.pp3}()},
\code{\link{marktable}()},
\code{\link{quadratcount.pp3}()},
\code{\link{quadrats.pp3}()},
\code{\link{rPoissonCluster3}()},
\code{\link{rjitter.pp3}()},
\code{\link{rjitter.ppp}()},
\code{\link{rjitter}()},
\code{\link{rpoint3}()},
\code{\link{sample.pp3}()},
\code{\link{sample.ppp}()},
\code{\link{shift.pp3}()},
\code{\link{studpermu.test.pp3}()},
\code{\link{studpermu.test}()},
\code{\link{superimpose.pp3}()}
}
\concept{spatstat extensions}
| /man/G3multi.Rd | no_license | AKIRA0129/rapt | R | false | true | 2,411 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rapt_extend.R
\name{G3multi}
\alias{G3multi}
\title{Marked Nearest Neighbour Distance Function}
\usage{
G3multi(
X,
I,
J,
rmax = NULL,
nrval = 128,
disjoint = NULL,
correction = c("rs", "km", "han")
)
}
\arguments{
\item{X}{The observed point pattern, from which an estimate of the multitype
distance distribution function \eqn{G[3IJ](r)} will be computed. It must be
a marked point pattern. See Details.}
\item{I}{Subset of points of \code{X} from which distances are measured.}
\item{J}{Subset of points in \code{X} to which distances are measured.}
\item{rmax}{Optional. Maximum value of argument \emph{r} for which \eqn{G[3IJ](r)}
will be estimated.}
\item{nrval}{Optional. Number of values of \emph{r} for which \eqn{G3IJ(r)} will
be estimated. A large value of \code{nrval} is required to avoid discretisation
effects.}
\item{disjoint}{Optional flag indicating whether the subsets \code{I} and \code{J} are
disjoint. If missing, this value will be computed by inspecting the vectors
\code{I} and \code{J}.}
\item{correction}{Optional. Character string specifying the edge
correction(s) to be used. Options are \code{"none"}, \code{"rs"}, \code{"km"},
\code{"hanisch"}, and \code{"best"}. Alternatively \code{correction="all"} selects all
options.}
}
\description{
For a marked point pattern, estimate the distribution of the distance from a
typical point in subset \code{I} to the nearest point of subset \code{J}.
}
\details{
The function \code{G3multi} generalises \code{\link[spatstat]{G3est}} (for
unmarked point patterns) and \code{G3dot} (unimplmented) and
\code{\link{G3cross}} (for multitype point patterns) to arbitrary marked
point patterns.
}
\seealso{
\code{\link{G3cross}}, \code{\link[spatstat]{G3est}}
Other spatstat extensions:
\code{\link{G3cross}()},
\code{\link{Tstat.pp3}()},
\code{\link{bdist.points}()},
\code{\link{marktable.pp3}()},
\code{\link{marktable}()},
\code{\link{quadratcount.pp3}()},
\code{\link{quadrats.pp3}()},
\code{\link{rPoissonCluster3}()},
\code{\link{rjitter.pp3}()},
\code{\link{rjitter.ppp}()},
\code{\link{rjitter}()},
\code{\link{rpoint3}()},
\code{\link{sample.pp3}()},
\code{\link{sample.ppp}()},
\code{\link{shift.pp3}()},
\code{\link{studpermu.test.pp3}()},
\code{\link{studpermu.test}()},
\code{\link{superimpose.pp3}()}
}
\concept{spatstat extensions}
|
# events
get.public.events <- function(ctx) api.get.request(ctx, c("events"))
get.repository.events <- function(ctx, owner, repo) api.get.request(ctx, c("repos", owner, repo, "events"))
get.repository.issue.events <- function(ctx, owner, repo) api.get.request(ctx, c("repos", owner, repo, "issues", "events"))
# TODO I believe the documentation on http://developer.github.com/v3/activity/events/ is wrong for network.public.events, but in case it isn't...
get.network.public.events <- function(ctx, owner, repo) api.get.request(ctx, c("networks", owner, repo, "events"))
get.organization.public.events <- function(ctx, org) api.get.request(ctx, c("orgs", org, "events"))
get.user.received.events <- function(ctx, user) api.get.request(ctx, c("users", user, "received_events"))
get.user.public.received.events <- function(ctx, user) api.get.request(ctx, c("users", user, "received_events", "public"))
get.user.performed.events <- function(ctx, user) api.get.request(ctx, c("users", user, "events"))
get.user.public.performed.events <- function(ctx, user) api.get.request(ctx, c("users", user, "events", "public"))
get.my.organization.events <- function(ctx, org) api.get.request(ctx, c("users", ctx$user$login, "events", "orgs", org))
# notifications
get.my.notifications <- function(ctx, ...) api.get.request(ctx, c("notifications"), params=.rest(...))
get.my.repository.notifications <- function(ctx, owner, repo, ...) api.get.request(ctx, c("repos", owner, repo, "notifications"), params=.rest(...))
mark.my.notifications <- function(ctx, ...) api.put.request(ctx, c("notifications"), expect.code=205, params=.rest(...))
mark.my.repository.notifications <- function(ctx, owner, repo, ...) api.put.request(ctx, c("repos", owner, repo, "notifications"), expect.code=205, params=.rest(...))
get.thread.notifications <- function(ctx, id) api.get.request(ctx, c("notifications", "threads", id))
mark.thread.notifications <- function(ctx, id, ...) api.patch.request(ctx, c("notifications", "threads", id), expect.code=205, params=.rest(...))
get.thread.notifications.subscription <- function(ctx, id) api.get.request(ctx, c("notifications", "threads", id, "subscription"))
set.thread.notifications.subscription <- function(ctx, id, ...) api.put.request(ctx, c("notifications", "threads", id, "subscription"), params=.rest(...))
unset.thread.notifications.subscription <- function(ctx, id) api.delete.request(ctx, c("notifications", "threads", id, "subscription"))
# starring
get.stargazers <- function(ctx, owner, repo) api.get.request(ctx, c("repos", owner, repo, "stargazers"))
get.repositories.starred.by.user <- function(ctx, user, ...) api.get.request(ctx, c("users", user, "starred"), params=.rest(...))
get.repositories.starred.by.me <- function(ctx, ...) api.get.request(ctx, c("user", "starred"), params=.rest(...))
is.repository.starred.by.me <- function(ctx, owner, repo) api.test.request(ctx, c("user", "starred", owner, repo))
star.repository <- function(ctx, owner, repo) api.put.request(ctx, c("user", "starred", owner, repo), expect.code=204)
unstar.repository <- function(ctx, owner, repo) api.delete.request(ctx, c("user", "starred", owner, repo), expect.code=204)
# watching
# NB http://developer.github.com/changes/2012-9-5-watcher-api/
get.watchers <- function(ctx, owner, repo) api.get.request(ctx, c("repos", owner, repo, "subscribers"))
get.repositories.watched.by.user <- function(ctx, user) api.get.request(ctx, c("users", user, "subscriptions"))
get.repositories.watched.by.me <- function(ctx) api.get.request(ctx, c("user", "subscriptions"))
get.repository.subscription <- function(ctx, owner, repo) api.get.request(ctx, c("repos", owner, repo, "subscription"))
set.repository.subscription <- function(ctx, owner, repo, ...) api.put.request(ctx, c("repos", owner, repo, "subscription"), params=.rest(...))
unset.repository.subscription <- function(ctx, owner, repo) api.delete.request(ctx, c("repos", owner, repo, "subscription"))
| /R/activity.R | no_license | smschauhan/rgithub | R | false | false | 4,395 | r | # events
get.public.events <- function(ctx) api.get.request(ctx, c("events"))
get.repository.events <- function(ctx, owner, repo) api.get.request(ctx, c("repos", owner, repo, "events"))
get.repository.issue.events <- function(ctx, owner, repo) api.get.request(ctx, c("repos", owner, repo, "issues", "events"))
# TODO I believe the documentation on http://developer.github.com/v3/activity/events/ is wrong for network.public.events, but in case it isn't...
get.network.public.events <- function(ctx, owner, repo) api.get.request(ctx, c("networks", owner, repo, "events"))
get.organization.public.events <- function(ctx, org) api.get.request(ctx, c("orgs", org, "events"))
get.user.received.events <- function(ctx, user) api.get.request(ctx, c("users", user, "received_events"))
get.user.public.received.events <- function(ctx, user) api.get.request(ctx, c("users", user, "received_events", "public"))
get.user.performed.events <- function(ctx, user) api.get.request(ctx, c("users", user, "events"))
get.user.public.performed.events <- function(ctx, user) api.get.request(ctx, c("users", user, "events", "public"))
get.my.organization.events <- function(ctx, org) api.get.request(ctx, c("users", ctx$user$login, "events", "orgs", org))
# notifications
get.my.notifications <- function(ctx, ...) api.get.request(ctx, c("notifications"), params=.rest(...))
get.my.repository.notifications <- function(ctx, owner, repo, ...) api.get.request(ctx, c("repos", owner, repo, "notifications"), params=.rest(...))
mark.my.notifications <- function(ctx, ...) api.put.request(ctx, c("notifications"), expect.code=205, params=.rest(...))
mark.my.repository.notifications <- function(ctx, owner, repo, ...) api.put.request(ctx, c("repos", owner, repo, "notifications"), expect.code=205, params=.rest(...))
get.thread.notifications <- function(ctx, id) api.get.request(ctx, c("notifications", "threads", id))
mark.thread.notifications <- function(ctx, id, ...) api.patch.request(ctx, c("notifications", "threads", id), expect.code=205, params=.rest(...))
get.thread.notifications.subscription <- function(ctx, id) api.get.request(ctx, c("notifications", "threads", id, "subscription"))
set.thread.notifications.subscription <- function(ctx, id, ...) api.put.request(ctx, c("notifications", "threads", id, "subscription"), params=.rest(...))
unset.thread.notifications.subscription <- function(ctx, id) api.delete.request(ctx, c("notifications", "threads", id, "subscription"))
# starring
get.stargazers <- function(ctx, owner, repo) api.get.request(ctx, c("repos", owner, repo, "stargazers"))
get.repositories.starred.by.user <- function(ctx, user, ...) api.get.request(ctx, c("users", user, "starred"), params=.rest(...))
get.repositories.starred.by.me <- function(ctx, ...) api.get.request(ctx, c("user", "starred"), params=.rest(...))
is.repository.starred.by.me <- function(ctx, owner, repo) api.test.request(ctx, c("user", "starred", owner, repo))
star.repository <- function(ctx, owner, repo) api.put.request(ctx, c("user", "starred", owner, repo), expect.code=204)
unstar.repository <- function(ctx, owner, repo) api.delete.request(ctx, c("user", "starred", owner, repo), expect.code=204)
# watching
# NB http://developer.github.com/changes/2012-9-5-watcher-api/
get.watchers <- function(ctx, owner, repo) api.get.request(ctx, c("repos", owner, repo, "subscribers"))
get.repositories.watched.by.user <- function(ctx, user) api.get.request(ctx, c("users", user, "subscriptions"))
get.repositories.watched.by.me <- function(ctx) api.get.request(ctx, c("user", "subscriptions"))
get.repository.subscription <- function(ctx, owner, repo) api.get.request(ctx, c("repos", owner, repo, "subscription"))
set.repository.subscription <- function(ctx, owner, repo, ...) api.put.request(ctx, c("repos", owner, repo, "subscription"), params=.rest(...))
unset.repository.subscription <- function(ctx, owner, repo) api.delete.request(ctx, c("repos", owner, repo, "subscription"))
|
args=commandArgs(trailingOnly = TRUE)
FOLDER_ID=1
OUTPUT_FILE_ID =2
folder_path = args[FOLDER_ID]
output_file = args[OUTPUT_FILE_ID]
build_path <- function(dir, file){
paste(dir, file, sep='/')
}
open_file <- function(path){
read.csv(path, sep='\t')
}
attach_to_katamari <- function(katamari, path){
file = open_file(path)
return(merge(katamari, file, by=1, all = TRUE))
}
save_katamari <- function(katamari, path){
write.table(katamari, path, sep='\t', quote = FALSE, row.names = FALSE, col.names=TRUE)
}
folder = list.files(folder_path, pattern = '*.txt')
print("Let's start katamari!")
katamari = open_file(build_path(folder_path, folder[1]))
for(i in 2:length(folder)){
if( i %% 50 == 0){
print(i)
}
katamari = attach_to_katamari(katamari, build_path(folder_path, folder[i]))
}
print(paste("Katamari stored at '", output_file, "'", sep=''))
save_katamari(katamari, output_file)
| /TCGA.analysis/tools/katamari.R | no_license | lpalomerol/Idibell.tools | R | false | false | 919 | r | args=commandArgs(trailingOnly = TRUE)
FOLDER_ID=1
OUTPUT_FILE_ID =2
folder_path = args[FOLDER_ID]
output_file = args[OUTPUT_FILE_ID]
build_path <- function(dir, file){
paste(dir, file, sep='/')
}
open_file <- function(path){
read.csv(path, sep='\t')
}
attach_to_katamari <- function(katamari, path){
file = open_file(path)
return(merge(katamari, file, by=1, all = TRUE))
}
save_katamari <- function(katamari, path){
write.table(katamari, path, sep='\t', quote = FALSE, row.names = FALSE, col.names=TRUE)
}
folder = list.files(folder_path, pattern = '*.txt')
print("Let's start katamari!")
katamari = open_file(build_path(folder_path, folder[1]))
for(i in 2:length(folder)){
if( i %% 50 == 0){
print(i)
}
katamari = attach_to_katamari(katamari, build_path(folder_path, folder[i]))
}
print(paste("Katamari stored at '", output_file, "'", sep=''))
save_katamari(katamari, output_file)
|
setwd("/home/chris/Bureau/sb_cofactor_hr/A549")
library(dplyr)
###### NIPBL Regions
nipbl_regions <- c("output/chip-pipeline-GRCh38/peak_call/A549_NIPBL/A549_NIPBL_CTRL_specific.bed",
"output/chip-pipeline-GRCh38/peak_call/A549_NIPBL/A549_NIPBL_common.bed",
"output/chip-pipeline-GRCh38/peak_call/A549_NIPBL/A549_NIPBL_DEX_specific.bed")
regions <- paste(nipbl_regions, collapse = " ")
region_labels <- "NIPBL_CTRL NIPBL_common NIPBL_DEX"
###### Samples etoh/dex
# EP300, JUNB to do
targets <- c("BCL3", "CEBPB", "CTCF", "FOSL2",
"H3K4me1", "H3K4me2", "H3K4me3", "H3K9me3", "H3K27ac",
"HES2", "JUN", "NR3C1", "RAD21", "SMC3")
etoh_rep <- c(3, 2, 3, 2,
3, 3, 3, 3, 3,
2, 3, 3, 3, 3)
dex_rep <- c(3, 3, 3, 3,
3, 3, 3, 3, 3,
3, 2, 3, 3, 3)
replicate_nb <- data.frame(targets, etoh_rep, dex_rep)
###### Command line
# 1. computeMatrix
# 2. plotHeatmap
output_dir <- "output/analyses/heatmap_NIPBL_vs_ENCODE_Reddy"
dir.create(output_dir, recursive=TRUE, showWarnings=FALSE)
bigwig_dir <- "input/ENCODE/A549/GRCh38/chip-seq/bigWig"
generate_sample_path <- function(target, condition, nb_dex) {
res <- c()
for (i in 1:nb_dex) {
filename <- paste0(target, "_", condition, "_", "rep", i, ".bigWig")
sample_name <- file.path(bigwig_dir, filename)
res <- c(res, sample_name)
}
return(res)
}
compute_matrix <- function(target, replicate_nb) {
nb <- replicate_nb %>% filter(targets == target)
nb_etoh <- nb$etoh_rep
nb_dex <- nb$dex_rep
samples_etoh <- generate_sample_path(target, condition = "etoh", nb_etoh)
samples_dex <- generate_sample_path(target, condition = "dex1h", nb_dex)
sample_scaffold <- c(samples_etoh, samples_dex)
samples <- paste(sample_scaffold, collapse = " ")
output_path <- matrix_path
cmd_line_scaffold <- c("computeMatrix reference-point --referencePoint center",
"--regionsFileName", regions,
"--scoreFileName", samples,
"--upstream", "1000", "--downstream", "1000", "-p", "8",
"--outFileName", output_path)
cmd_line <- paste(cmd_line_scaffold, collapse = " ")
message(cmd_line)
system(cmd_line)
}
generate_sample_labels <- function(target, condition, nb_dex) {
res <- c()
for (i in 1:nb_dex) {
label <- paste0(target, "_", condition, "_", "rep", i)
res <- c(res, label)
}
return(res)
}
plot_heatmap <- function(target, replicate_nb) {
nb <- replicate_nb %>% filter(targets == target)
nb_etoh <- nb$etoh_rep
nb_dex <- nb$dex_rep
sample_labels_etoh <- generate_sample_labels(target, condition = "etoh", nb_etoh)
sample_labels_dex <- generate_sample_labels(target, condition = "dex1h", nb_dex)
sample_labels_scaffold <- c(sample_labels_etoh, sample_labels_dex)
sample_labels <- paste(sample_labels_scaffold, collapse = " ")
output_path <- file.path(output_dir, paste0("nipbl_", target, "_heatmap.png"))
cmd_line_scaffold <- c("plotHeatmap", "--matrixFile", matrix_path,
"--colorMap", "rainbow",
"--regionsLabel", region_labels,
"--samplesLabel", sample_labels,
"--outFileName", output_path)
cmd_line <- paste(cmd_line_scaffold, collapse = " ")
message(cmd_line)
system(cmd_line)
}
### Main fonction
for (target in targets) {
message("##########\t", target)
matrix_path <- file.path(output_dir, paste0("nipbl_", target, "_matrix.gzip"))
compute_matrix(target, replicate_nb)
plot_heatmap(target, replicate_nb)
} | /A549/scripts/chris/heatmap_NIPBL_vs_ENCODE_Reddy.R | no_license | ArnaudDroitLab/sb_cofactor | R | false | false | 3,671 | r | setwd("/home/chris/Bureau/sb_cofactor_hr/A549")
library(dplyr)
###### NIPBL Regions
nipbl_regions <- c("output/chip-pipeline-GRCh38/peak_call/A549_NIPBL/A549_NIPBL_CTRL_specific.bed",
"output/chip-pipeline-GRCh38/peak_call/A549_NIPBL/A549_NIPBL_common.bed",
"output/chip-pipeline-GRCh38/peak_call/A549_NIPBL/A549_NIPBL_DEX_specific.bed")
regions <- paste(nipbl_regions, collapse = " ")
region_labels <- "NIPBL_CTRL NIPBL_common NIPBL_DEX"
###### Samples etoh/dex
# EP300, JUNB to do
targets <- c("BCL3", "CEBPB", "CTCF", "FOSL2",
"H3K4me1", "H3K4me2", "H3K4me3", "H3K9me3", "H3K27ac",
"HES2", "JUN", "NR3C1", "RAD21", "SMC3")
etoh_rep <- c(3, 2, 3, 2,
3, 3, 3, 3, 3,
2, 3, 3, 3, 3)
dex_rep <- c(3, 3, 3, 3,
3, 3, 3, 3, 3,
3, 2, 3, 3, 3)
replicate_nb <- data.frame(targets, etoh_rep, dex_rep)
###### Command line
# 1. computeMatrix
# 2. plotHeatmap
output_dir <- "output/analyses/heatmap_NIPBL_vs_ENCODE_Reddy"
dir.create(output_dir, recursive=TRUE, showWarnings=FALSE)
bigwig_dir <- "input/ENCODE/A549/GRCh38/chip-seq/bigWig"
generate_sample_path <- function(target, condition, nb_dex) {
res <- c()
for (i in 1:nb_dex) {
filename <- paste0(target, "_", condition, "_", "rep", i, ".bigWig")
sample_name <- file.path(bigwig_dir, filename)
res <- c(res, sample_name)
}
return(res)
}
compute_matrix <- function(target, replicate_nb) {
nb <- replicate_nb %>% filter(targets == target)
nb_etoh <- nb$etoh_rep
nb_dex <- nb$dex_rep
samples_etoh <- generate_sample_path(target, condition = "etoh", nb_etoh)
samples_dex <- generate_sample_path(target, condition = "dex1h", nb_dex)
sample_scaffold <- c(samples_etoh, samples_dex)
samples <- paste(sample_scaffold, collapse = " ")
output_path <- matrix_path
cmd_line_scaffold <- c("computeMatrix reference-point --referencePoint center",
"--regionsFileName", regions,
"--scoreFileName", samples,
"--upstream", "1000", "--downstream", "1000", "-p", "8",
"--outFileName", output_path)
cmd_line <- paste(cmd_line_scaffold, collapse = " ")
message(cmd_line)
system(cmd_line)
}
generate_sample_labels <- function(target, condition, nb_dex) {
res <- c()
for (i in 1:nb_dex) {
label <- paste0(target, "_", condition, "_", "rep", i)
res <- c(res, label)
}
return(res)
}
plot_heatmap <- function(target, replicate_nb) {
nb <- replicate_nb %>% filter(targets == target)
nb_etoh <- nb$etoh_rep
nb_dex <- nb$dex_rep
sample_labels_etoh <- generate_sample_labels(target, condition = "etoh", nb_etoh)
sample_labels_dex <- generate_sample_labels(target, condition = "dex1h", nb_dex)
sample_labels_scaffold <- c(sample_labels_etoh, sample_labels_dex)
sample_labels <- paste(sample_labels_scaffold, collapse = " ")
output_path <- file.path(output_dir, paste0("nipbl_", target, "_heatmap.png"))
cmd_line_scaffold <- c("plotHeatmap", "--matrixFile", matrix_path,
"--colorMap", "rainbow",
"--regionsLabel", region_labels,
"--samplesLabel", sample_labels,
"--outFileName", output_path)
cmd_line <- paste(cmd_line_scaffold, collapse = " ")
message(cmd_line)
system(cmd_line)
}
### Main fonction
for (target in targets) {
message("##########\t", target)
matrix_path <- file.path(output_dir, paste0("nipbl_", target, "_matrix.gzip"))
compute_matrix(target, replicate_nb)
plot_heatmap(target, replicate_nb)
} |
/Pipeline.R | permissive | torbjornsaterberg/Ecologically-Sustainable-Exploitation | R | false | false | 2,005 | r | ||
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mm_filter_valid_days.R
\name{mm_filter_valid_days}
\alias{mm_filter_valid_days}
\title{Remove entries in data}
\usage{
mm_filter_valid_days(
data,
data_daily = NULL,
day_start = 4,
day_end = 27.99,
day_tests = c("full_day", "even_timesteps", "complete_data", "pos_discharge"),
required_timestep = NA,
timestep_days = TRUE
)
}
\arguments{
\item{data}{data.frame of instantaneous observations, to be filtered to only
those points on days that pass the specified tests in mm_is_valid_day}
\item{data_daily}{data.frame of daily estimates/statistics, to be filtered in
accordance with the filtering of data}
\item{day_start}{start time (inclusive) of a day's data in number of hours
from the midnight that begins the date. For example, day_start=-1.5
indicates that data describing 2006-06-26 begin at 2006-06-25 22:30, or at
the first observation time that occurs after that time if day_start doesn't
fall exactly on an observation time. For metabolism models working with
single days of input data, it is conventional/useful to begin the day the
evening before, e.g., -1.5, and to end just before the next sunrise, e.g.,
30. For multiple consecutive days, it may make the most sense to start just
before sunrise (e.g., 4) and to end 24 hours later. For nighttime
regression, the date assigned to a chunk of data should be the date whose
evening contains the data. The default is therefore 12 to 36 for
metab_night, of which the times of darkness will be used.}
\item{day_end}{end time (exclusive) of a day's data in number of hours from
the midnight that begins the date. For example, day_end=30 indicates that
data describing 2006-06-26 end at the last observation time that occurs
before 2006-06-27 06:00. See day_start for recommended start and end times.}
\item{day_tests}{list of tests to conduct to determine whether each date
worth of data is valid for modeling. The results of these tests will be
combined with the result of the test implied if \code{required_timestep} is
numeric and then will be passed to \code{model_fun} as the
\code{ply_validity} argument to that function.}
\item{required_timestep}{NA or numeric (length 1). If numeric, the timestep
length in days that a date must have to pass the validity check (to within
a tolerance of 0.2\% of the value of \code{required_timestep}). The result
of this test will be combined with the results of the tests listed in
\code{day_tests} and reported to \code{model_fun} as the
\code{ply_validity} argument to that function.}
\item{timestep_days}{TRUE if you would like the mean timestep length to be
calculated for each data ply and passed to \code{model_fun} as the
\code{timestep_days} argument to that function. Alternatively, this may be
numeric as a specifically expected timestep length in days; for example, a
1-hour timestep is 1/24 is 0.0416667.}
}
\value{
list of data and data_daily with same structure as inputs but with
invalid days removed, plus a third data.frame of dates that were removed
}
\description{
Filter out any data rows that don't pass the specified tests for completeness
and regularity
}
\examples{
dat <- data_metab(res='30', num_days='10', flaws='missing middle')
datfilt <- mm_filter_valid_days(dat)
datfilt$removed
c(nrow(dat), nrow(datfilt$data))
}
| /man/mm_filter_valid_days.Rd | permissive | lsdeel/streamMetabolizer | R | false | true | 3,368 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mm_filter_valid_days.R
\name{mm_filter_valid_days}
\alias{mm_filter_valid_days}
\title{Remove entries in data}
\usage{
mm_filter_valid_days(
data,
data_daily = NULL,
day_start = 4,
day_end = 27.99,
day_tests = c("full_day", "even_timesteps", "complete_data", "pos_discharge"),
required_timestep = NA,
timestep_days = TRUE
)
}
\arguments{
\item{data}{data.frame of instantaneous observations, to be filtered to only
those points on days that pass the specified tests in mm_is_valid_day}
\item{data_daily}{data.frame of daily estimates/statistics, to be filtered in
accordance with the filtering of data}
\item{day_start}{start time (inclusive) of a day's data in number of hours
from the midnight that begins the date. For example, day_start=-1.5
indicates that data describing 2006-06-26 begin at 2006-06-25 22:30, or at
the first observation time that occurs after that time if day_start doesn't
fall exactly on an observation time. For metabolism models working with
single days of input data, it is conventional/useful to begin the day the
evening before, e.g., -1.5, and to end just before the next sunrise, e.g.,
30. For multiple consecutive days, it may make the most sense to start just
before sunrise (e.g., 4) and to end 24 hours later. For nighttime
regression, the date assigned to a chunk of data should be the date whose
evening contains the data. The default is therefore 12 to 36 for
metab_night, of which the times of darkness will be used.}
\item{day_end}{end time (exclusive) of a day's data in number of hours from
the midnight that begins the date. For example, day_end=30 indicates that
data describing 2006-06-26 end at the last observation time that occurs
before 2006-06-27 06:00. See day_start for recommended start and end times.}
\item{day_tests}{list of tests to conduct to determine whether each date
worth of data is valid for modeling. The results of these tests will be
combined with the result of the test implied if \code{required_timestep} is
numeric and then will be passed to \code{model_fun} as the
\code{ply_validity} argument to that function.}
\item{required_timestep}{NA or numeric (length 1). If numeric, the timestep
length in days that a date must have to pass the validity check (to within
a tolerance of 0.2\% of the value of \code{required_timestep}). The result
of this test will be combined with the results of the tests listed in
\code{day_tests} and reported to \code{model_fun} as the
\code{ply_validity} argument to that function.}
\item{timestep_days}{TRUE if you would like the mean timestep length to be
calculated for each data ply and passed to \code{model_fun} as the
\code{timestep_days} argument to that function. Alternatively, this may be
numeric as a specifically expected timestep length in days; for example, a
1-hour timestep is 1/24 is 0.0416667.}
}
\value{
list of data and data_daily with same structure as inputs but with
invalid days removed, plus a third data.frame of dates that were removed
}
\description{
Filter out any data rows that don't pass the specified tests for completeness
and regularity
}
\examples{
dat <- data_metab(res='30', num_days='10', flaws='missing middle')
datfilt <- mm_filter_valid_days(dat)
datfilt$removed
c(nrow(dat), nrow(datfilt$data))
}
|
# EM algorithm for missing data.
# X = missing data. threshold = convergence parameter.
# maxit = maximum number of iterations.
EM <- function(X,thresh=.0001,maxit=100) {
n <- nrow(X)
p <- ncol(X)
mis.ri <- NULL
k <- 1
for (i in 1:n) {
if (any(is.na(X[i,]))) {
mis.ri[k] <- i
k <- k + 1
}
}
new.X <- X
for (j in 1:p) new.X[which(is.na(X[,j])),j] <- mean(X[,j],na.rm=T)
old.X <- new.X + thresh*2
j <- 1
while (!(all(abs(new.X-old.X)<thresh)) & (j<maxit)) {
for (i in mis.ri) {
if (any(is.na(X[i,]))) {
mi <- which(is.na(X[i,]))
mu1 <- apply(as.matrix(new.X[-i, mi]),2,mean)
mu2 <- apply(as.matrix(new.X[-i,-mi]),2,mean)
x2 <- new.X[i,-mi]
S11 <- var(new.X[-i,mi])
S12 <- var(new.X[-i,mi],new.X[-i,-mi])
S22 <- var(new.X[-i,-mi])
B <- S12 %*% solve(S22)
x1 <- mu1 + B %*% (x2-mu2)
old.X <- new.X
new.X[i,mi] <- as.vector(x1)
}
#print(Sys.time())
#print(new.X)
#Sys.sleep(1)
}
j <- j+1
}
print(paste(ifelse(j<maxit,"Converged.","Not Converged."),"Number of Iterations:", j)); cat("\n")
new.X
}
| /R_Functions/EM.R | no_license | luiarthur/byuHW | R | false | false | 1,182 | r | # EM algorithm for missing data.
# X = missing data. threshold = convergence parameter.
# maxit = maximum number of iterations.
EM <- function(X,thresh=.0001,maxit=100) {
n <- nrow(X)
p <- ncol(X)
mis.ri <- NULL
k <- 1
for (i in 1:n) {
if (any(is.na(X[i,]))) {
mis.ri[k] <- i
k <- k + 1
}
}
new.X <- X
for (j in 1:p) new.X[which(is.na(X[,j])),j] <- mean(X[,j],na.rm=T)
old.X <- new.X + thresh*2
j <- 1
while (!(all(abs(new.X-old.X)<thresh)) & (j<maxit)) {
for (i in mis.ri) {
if (any(is.na(X[i,]))) {
mi <- which(is.na(X[i,]))
mu1 <- apply(as.matrix(new.X[-i, mi]),2,mean)
mu2 <- apply(as.matrix(new.X[-i,-mi]),2,mean)
x2 <- new.X[i,-mi]
S11 <- var(new.X[-i,mi])
S12 <- var(new.X[-i,mi],new.X[-i,-mi])
S22 <- var(new.X[-i,-mi])
B <- S12 %*% solve(S22)
x1 <- mu1 + B %*% (x2-mu2)
old.X <- new.X
new.X[i,mi] <- as.vector(x1)
}
#print(Sys.time())
#print(new.X)
#Sys.sleep(1)
}
j <- j+1
}
print(paste(ifelse(j<maxit,"Converged.","Not Converged."),"Number of Iterations:", j)); cat("\n")
new.X
}
|
#Version Control | /Text.R | no_license | tiffanguyen/New-Project | R | false | false | 16 | r | #Version Control |
#clear
rm(list=ls())
#required R packages
library(ada)
library(pROC)
#input file
inputFile = 'MHS_CHF.csv'
df <- read.csv(inputFile)
df = df[sample(1:nrow(df)),]
#preds file
predsFile = 'readmit_cols.csv'
preds <- read.csv(predsFile)
preds = as.character(preds$x)
#response
responseVararray = c('thirtyday','sixtyday','ninetyday','two_LOS','four_LOS','six_LOS','three_mortality','six_mortality','nine_mortality','twelve_mortality')
#test data
testdata = df[1:5,]
#for(i in 2:length(responseVararray))
for(i in 1:length(responseVararray))
{
responseVar = responseVararray[i]
formula <- as.formula(paste(responseVar,'~.',sep=''))
cat(paste0(i,".Training model for ",responseVar,"\n"))
model = ada(formula, data = df[,which(names(df) %in% c(responseVar, preds))])
saveRDS(model,paste0(responseVar,"ada.RDS"))
cat(paste0(i,".Doing a test prediction for ",responseVar,"\n"))
pred = predict(model, newdata=testdata[,which(names(testdata) %in% c(responseVar,preds))], type='prob')[, 2]
print(pred)
cat(paste("\n"))
}
| /10AdaModels/generateReadmitModels.R | no_license | aftab-hassan/AzureMLDeploymentScripts | R | false | false | 1,041 | r | #clear
rm(list=ls())
#required R packages
library(ada)
library(pROC)
#input file
inputFile = 'MHS_CHF.csv'
df <- read.csv(inputFile)
df = df[sample(1:nrow(df)),]
#preds file
predsFile = 'readmit_cols.csv'
preds <- read.csv(predsFile)
preds = as.character(preds$x)
#response
responseVararray = c('thirtyday','sixtyday','ninetyday','two_LOS','four_LOS','six_LOS','three_mortality','six_mortality','nine_mortality','twelve_mortality')
#test data
testdata = df[1:5,]
#for(i in 2:length(responseVararray))
for(i in 1:length(responseVararray))
{
responseVar = responseVararray[i]
formula <- as.formula(paste(responseVar,'~.',sep=''))
cat(paste0(i,".Training model for ",responseVar,"\n"))
model = ada(formula, data = df[,which(names(df) %in% c(responseVar, preds))])
saveRDS(model,paste0(responseVar,"ada.RDS"))
cat(paste0(i,".Doing a test prediction for ",responseVar,"\n"))
pred = predict(model, newdata=testdata[,which(names(testdata) %in% c(responseVar,preds))], type='prob')[, 2]
print(pred)
cat(paste("\n"))
}
|
#' Box-Cox Transformation for Non-Negative Data
#'
#' \code{step_BoxCox} creates a \emph{specification} of a recipe step that will
#' transform data using a simple Box-Cox transformation.
#'
#' @inheritParams step_center
#' @param role Not used by this step since no new variables are created.
#' @param lambdas A numeric vector of transformation values. This is
#' \code{NULL} until computed by \code{\link{prepare.recipe}}.
#' @param limits A length 2 numeric vector defining the range to compute the
#' transformation parameter lambda.
#' @param nunique An integer where data that have less possible values will
#' not be evaluate for a transformation
#' @return \code{step_BoxCox} returns an object of class \code{step_BoxCox}.
#' @keywords datagen
#' @concept preprocessing transformation_methods
#' @export
#' @details The Box-Cox transformation, which requires a strictly positive
#' variable, can be used to rescale a variable to be more similar to a
#' normal distribution. In this package, the partial log-likelihood function
#' is directly optimized within a reasonable set of transformation values
#' (which can be changed by the user).
#'
#' This transformation is typically done on the outcome variable using the
#' residuals for a statistical model (such as ordinary least squares).
#' Here, a simple null model (intercept only) is used to apply the
#' transformation to the \emph{predictor} variables individually. This can
#' have the effect of making the variable distributions more symmetric.
#'
#' If the transformation parameters are estimated to be very closed to the
#' bounds, or if the optimization fails, a value of \code{NA} is used and
#' no transformation is applied.
#'
#' @references Sakia, R. M. (1992). The Box-Cox transformation technique:
#' A review. \emph{The Statistician}, 169-178..
#' @examples
#'
#' rec <- recipe(~ ., data = as.data.frame(state.x77))
#'
#' bc_trans <- step_BoxCox(rec, all_numeric())
#'
#' bc_estimates <- prepare(bc_trans, training = as.data.frame(state.x77))
#'
#' bc_data <- bake(bc_estimates, as.data.frame(state.x77))
#'
#' plot(density(state.x77[, "Illiteracy"]), main = "before")
#' plot(density(bc_data$Illiteracy), main = "after")
#' @seealso \code{\link{step_YeoJohnson}} \code{\link{recipe}}
#' \code{\link{prepare.recipe}} \code{\link{bake.recipe}}
step_BoxCox <-
function(recipe,
...,
role = NA,
trained = FALSE,
lambdas = NULL,
limits = c(-5, 5),
nunique = 5) {
terms <- quos(...)
if (is_empty(terms))
stop("Please supply at least one variable specification. ",
"See ?selections.",
call. = FALSE)
add_step(
recipe,
step_BoxCox_new(
terms = terms,
role = role,
trained = trained,
lambdas = lambdas,
limits = sort(limits)[1:2],
nunique = nunique
)
)
}
step_BoxCox_new <-
function(terms = NULL,
role = NA,
trained = FALSE,
lambdas = NULL,
limits = NULL,
nunique = NULL) {
step(
subclass = "BoxCox",
terms = terms,
role = role,
trained = trained,
lambdas = lambdas,
limits = limits,
nunique = nunique
)
}
#' @export
prepare.step_BoxCox <- function(x, training, info = NULL, ...) {
col_names <- select_terms(x$terms, info = info)
values <- vapply(
training[, col_names],
estimate_bc,
c(lambda = 0),
limits = x$limits,
nunique = x$nunique
)
values <- values[!is.na(values)]
step_BoxCox_new(
terms = x$terms,
role = x$role,
trained = TRUE,
lambdas = values,
limits = x$limits,
nunique = x$nunique
)
}
#' @export
bake.step_BoxCox <- function(object, newdata, ...) {
if (length(object$lambdas) == 0)
return(as_tibble(newdata))
param <- names(object$lambdas)
for (i in seq_along(object$lambdas))
newdata[, param[i]] <-
bc_trans(getElement(newdata, param[i]), lambda = object$lambdas[i])
as_tibble(newdata)
}
print.step_BoxCox <-
function(x, width = max(20, options()$width - 35), ...) {
cat("Box-Cox transformation on ", sep = "")
if (x$trained) {
cat(format_ch_vec(names(x$lambdas), width = width))
} else
cat(format_selectors(x$terms, wdth = width))
if (x$trained)
cat(" [trained]\n")
else
cat("\n")
invisible(x)
}
## computes the new data
bc_trans <- function(x, lambda, eps = .001) {
if (is.na(lambda))
return(x)
if (abs(lambda) < eps)
log(x)
else
(x ^ lambda - 1) / lambda
}
## helper for the log-likelihood calc
#' @importFrom stats var
ll_bc <- function(lambda, y, gm, eps = .001) {
n <- length(y)
gm0 <- gm ^ (lambda - 1)
z <- if (abs(lambda) <= eps)
log(y) / gm0
else
(y ^ lambda - 1) / (lambda * gm0)
var_z <- var(z) * (n - 1) / n
- .5 * n * log(var_z)
}
#' @importFrom stats complete.cases
## eliminates missing data and returns -llh
bc_obj <- function(lam, dat) {
dat <- dat[complete.cases(dat)]
geo_mean <- exp(mean(log(dat)))
ll_bc(lambda = lam, y = dat, gm = geo_mean)
}
#' @importFrom stats optimize
## estimates the values
estimate_bc <- function(dat,
limits = c(-5, 5),
nunique = 5) {
eps <- .001
if (length(unique(dat)) < nunique |
any(dat[complete.cases(dat)] <= 0))
return(NA)
res <- optimize(
bc_obj,
interval = limits,
maximum = TRUE,
dat = dat,
tol = .0001
)
lam <- res$maximum
if (abs(limits[1] - lam) <= eps | abs(limits[2] - lam) <= eps)
lam <- NA
lam
}
| /R/BoxCox.R | no_license | lionel-/recipes | R | false | false | 5,641 | r | #' Box-Cox Transformation for Non-Negative Data
#'
#' \code{step_BoxCox} creates a \emph{specification} of a recipe step that will
#' transform data using a simple Box-Cox transformation.
#'
#' @inheritParams step_center
#' @param role Not used by this step since no new variables are created.
#' @param lambdas A numeric vector of transformation values. This is
#' \code{NULL} until computed by \code{\link{prepare.recipe}}.
#' @param limits A length 2 numeric vector defining the range to compute the
#' transformation parameter lambda.
#' @param nunique An integer where data that have less possible values will
#' not be evaluate for a transformation
#' @return \code{step_BoxCox} returns an object of class \code{step_BoxCox}.
#' @keywords datagen
#' @concept preprocessing transformation_methods
#' @export
#' @details The Box-Cox transformation, which requires a strictly positive
#' variable, can be used to rescale a variable to be more similar to a
#' normal distribution. In this package, the partial log-likelihood function
#' is directly optimized within a reasonable set of transformation values
#' (which can be changed by the user).
#'
#' This transformation is typically done on the outcome variable using the
#' residuals for a statistical model (such as ordinary least squares).
#' Here, a simple null model (intercept only) is used to apply the
#' transformation to the \emph{predictor} variables individually. This can
#' have the effect of making the variable distributions more symmetric.
#'
#' If the transformation parameters are estimated to be very closed to the
#' bounds, or if the optimization fails, a value of \code{NA} is used and
#' no transformation is applied.
#'
#' @references Sakia, R. M. (1992). The Box-Cox transformation technique:
#' A review. \emph{The Statistician}, 169-178..
#' @examples
#'
#' rec <- recipe(~ ., data = as.data.frame(state.x77))
#'
#' bc_trans <- step_BoxCox(rec, all_numeric())
#'
#' bc_estimates <- prepare(bc_trans, training = as.data.frame(state.x77))
#'
#' bc_data <- bake(bc_estimates, as.data.frame(state.x77))
#'
#' plot(density(state.x77[, "Illiteracy"]), main = "before")
#' plot(density(bc_data$Illiteracy), main = "after")
#' @seealso \code{\link{step_YeoJohnson}} \code{\link{recipe}}
#' \code{\link{prepare.recipe}} \code{\link{bake.recipe}}
step_BoxCox <-
function(recipe,
...,
role = NA,
trained = FALSE,
lambdas = NULL,
limits = c(-5, 5),
nunique = 5) {
terms <- quos(...)
if (is_empty(terms))
stop("Please supply at least one variable specification. ",
"See ?selections.",
call. = FALSE)
add_step(
recipe,
step_BoxCox_new(
terms = terms,
role = role,
trained = trained,
lambdas = lambdas,
limits = sort(limits)[1:2],
nunique = nunique
)
)
}
step_BoxCox_new <-
function(terms = NULL,
role = NA,
trained = FALSE,
lambdas = NULL,
limits = NULL,
nunique = NULL) {
step(
subclass = "BoxCox",
terms = terms,
role = role,
trained = trained,
lambdas = lambdas,
limits = limits,
nunique = nunique
)
}
#' @export
prepare.step_BoxCox <- function(x, training, info = NULL, ...) {
col_names <- select_terms(x$terms, info = info)
values <- vapply(
training[, col_names],
estimate_bc,
c(lambda = 0),
limits = x$limits,
nunique = x$nunique
)
values <- values[!is.na(values)]
step_BoxCox_new(
terms = x$terms,
role = x$role,
trained = TRUE,
lambdas = values,
limits = x$limits,
nunique = x$nunique
)
}
#' @export
bake.step_BoxCox <- function(object, newdata, ...) {
if (length(object$lambdas) == 0)
return(as_tibble(newdata))
param <- names(object$lambdas)
for (i in seq_along(object$lambdas))
newdata[, param[i]] <-
bc_trans(getElement(newdata, param[i]), lambda = object$lambdas[i])
as_tibble(newdata)
}
print.step_BoxCox <-
function(x, width = max(20, options()$width - 35), ...) {
cat("Box-Cox transformation on ", sep = "")
if (x$trained) {
cat(format_ch_vec(names(x$lambdas), width = width))
} else
cat(format_selectors(x$terms, wdth = width))
if (x$trained)
cat(" [trained]\n")
else
cat("\n")
invisible(x)
}
## computes the new data
bc_trans <- function(x, lambda, eps = .001) {
if (is.na(lambda))
return(x)
if (abs(lambda) < eps)
log(x)
else
(x ^ lambda - 1) / lambda
}
## helper for the log-likelihood calc
#' @importFrom stats var
ll_bc <- function(lambda, y, gm, eps = .001) {
n <- length(y)
gm0 <- gm ^ (lambda - 1)
z <- if (abs(lambda) <= eps)
log(y) / gm0
else
(y ^ lambda - 1) / (lambda * gm0)
var_z <- var(z) * (n - 1) / n
- .5 * n * log(var_z)
}
#' @importFrom stats complete.cases
## eliminates missing data and returns -llh
bc_obj <- function(lam, dat) {
dat <- dat[complete.cases(dat)]
geo_mean <- exp(mean(log(dat)))
ll_bc(lambda = lam, y = dat, gm = geo_mean)
}
#' @importFrom stats optimize
## estimates the values
estimate_bc <- function(dat,
limits = c(-5, 5),
nunique = 5) {
eps <- .001
if (length(unique(dat)) < nunique |
any(dat[complete.cases(dat)] <= 0))
return(NA)
res <- optimize(
bc_obj,
interval = limits,
maximum = TRUE,
dat = dat,
tol = .0001
)
lam <- res$maximum
if (abs(limits[1] - lam) <= eps | abs(limits[2] - lam) <= eps)
lam <- NA
lam
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/absoluteTest.R
\name{absoluteTest}
\alias{absoluteTest}
\title{absolute test homogeneity score}
\usage{
absoluteTest(eset, QSarray, p.adjust.method = "fdr", silent = F)
}
\arguments{
\item{eset}{expression set matrix}
\item{QSarray}{qusage object}
\item{p.adjust.method}{method for correcting falses}
\item{silent}{verbose}
}
\value{
homogeneity score
}
\description{
absolute test homogeneity score
}
| /man/absoluteTest.Rd | no_license | arcolombo/junk | R | false | true | 484 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/absoluteTest.R
\name{absoluteTest}
\alias{absoluteTest}
\title{absolute test homogeneity score}
\usage{
absoluteTest(eset, QSarray, p.adjust.method = "fdr", silent = F)
}
\arguments{
\item{eset}{expression set matrix}
\item{QSarray}{qusage object}
\item{p.adjust.method}{method for correcting falses}
\item{silent}{verbose}
}
\value{
homogeneity score
}
\description{
absolute test homogeneity score
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/EPFR.r
\name{sql.1mAllocD}
\alias{sql.1mAllocD}
\title{sql.1mAllocD}
\usage{
sql.1mAllocD(x, y, n, w, h)
}
\arguments{
\item{x}{= the YYYYMM for which you want data (known 26 days later)}
\item{y}{= a string vector of factors to be computed, the last element of which is the type of fund used.}
\item{n}{= any of StockFlows/China/Japan/CSI300/Energy}
\item{w}{= T/F depending on whether you are checking ftp}
\item{h}{= T/F depending on whether latest prices are being used}
}
\description{
Generates the SQL query to get the data for 1mAllocMo
}
\seealso{
Other sql: \code{\link{sql.1dActWtTrend.Alloc}},
\code{\link{sql.1dActWtTrend.Final}},
\code{\link{sql.1dActWtTrend.Flow}},
\code{\link{sql.1dActWtTrend.select}},
\code{\link{sql.1dActWtTrend.topline.from}},
\code{\link{sql.1dActWtTrend.topline}},
\code{\link{sql.1dActWtTrend.underlying.basic}},
\code{\link{sql.1dActWtTrend.underlying}},
\code{\link{sql.1dActWtTrend}},
\code{\link{sql.1dFloMo.CountryId.List}},
\code{\link{sql.1dFloMo.FI}},
\code{\link{sql.1dFloMo.Rgn}},
\code{\link{sql.1dFloMo.Sec.topline}},
\code{\link{sql.1dFloMo.filter}},
\code{\link{sql.1dFloMo.grp}},
\code{\link{sql.1dFloMo.select.wrapper}},
\code{\link{sql.1dFloMo.select}},
\code{\link{sql.1dFloMo.underlying}},
\code{\link{sql.1dFloMoAggr}}, \code{\link{sql.1dFloMo}},
\code{\link{sql.1dFloTrend.Alloc.data}},
\code{\link{sql.1dFloTrend.Alloc.fetch}},
\code{\link{sql.1dFloTrend.Alloc.final}},
\code{\link{sql.1dFloTrend.Alloc.from}},
\code{\link{sql.1dFloTrend.Alloc.purge}},
\code{\link{sql.1dFloTrend.Alloc}},
\code{\link{sql.1dFloTrend.select}},
\code{\link{sql.1dFloTrend.underlying}},
\code{\link{sql.1dFloTrend}}, \code{\link{sql.1dFundCt}},
\code{\link{sql.1dFundRet}}, \code{\link{sql.1dION}},
\code{\link{sql.1mActWt.underlying}},
\code{\link{sql.1mActWtIncrPct}},
\code{\link{sql.1mActWtTrend.underlying}},
\code{\link{sql.1mActWtTrend}},
\code{\link{sql.1mActWt}},
\code{\link{sql.1mAllocD.from}},
\code{\link{sql.1mAllocD.select}},
\code{\link{sql.1mAllocD.topline.from}},
\code{\link{sql.1mAllocMo.select}},
\code{\link{sql.1mAllocMo.underlying.from}},
\code{\link{sql.1mAllocMo.underlying.pre}},
\code{\link{sql.1mAllocMo}},
\code{\link{sql.1mAllocSkew.topline.from}},
\code{\link{sql.1mAllocSkew}},
\code{\link{sql.1mBullish.Alloc}},
\code{\link{sql.1mBullish.Final}},
\code{\link{sql.1mChActWt}}, \code{\link{sql.1mFloMo}},
\code{\link{sql.1mFloTrend.underlying}},
\code{\link{sql.1mFloTrend}}, \code{\link{sql.1mFundCt}},
\code{\link{sql.1mHoldAum}},
\code{\link{sql.1mSRIAdvisorPct}},
\code{\link{sql.1wFlow.Corp}},
\code{\link{sql.ActWtDiff2}},
\code{\link{sql.Allocation.Sec.FinsExREst}},
\code{\link{sql.Allocation.Sec}},
\code{\link{sql.Allocations.bulk.EqWtAvg}},
\code{\link{sql.Allocations.bulk.Single}},
\code{\link{sql.Allocation}},
\code{\link{sql.BenchIndex.duplication}},
\code{\link{sql.Bullish}}, \code{\link{sql.DailyFlo}},
\code{\link{sql.Diff}}, \code{\link{sql.Dispersion}},
\code{\link{sql.FloMo.Funds}}, \code{\link{sql.Flow}},
\code{\link{sql.Foreign}},
\code{\link{sql.FundHistory.macro}},
\code{\link{sql.FundHistory.sf}},
\code{\link{sql.FundHistory}}, \code{\link{sql.HSIdmap}},
\code{\link{sql.HerdingLSV}},
\code{\link{sql.Holdings.bulk.wrapper}},
\code{\link{sql.Holdings.bulk}},
\code{\link{sql.Holdings}}, \code{\link{sql.ION}},
\code{\link{sql.MonthlyAlloc}},
\code{\link{sql.MonthlyAssetsEnd}}, \code{\link{sql.Mo}},
\code{\link{sql.Overweight}}, \code{\link{sql.RDSuniv}},
\code{\link{sql.ReportDate}}, \code{\link{sql.SRI}},
\code{\link{sql.ShareClass}},
\code{\link{sql.TopDownAllocs.items}},
\code{\link{sql.TopDownAllocs.underlying}},
\code{\link{sql.TopDownAllocs}}, \code{\link{sql.Trend}},
\code{\link{sql.and}}, \code{\link{sql.arguments}},
\code{\link{sql.bcp}}, \code{\link{sql.breakdown}},
\code{\link{sql.case}}, \code{\link{sql.close}},
\code{\link{sql.connect.wrapper}},
\code{\link{sql.connect}},
\code{\link{sql.cross.border}},
\code{\link{sql.datediff}}, \code{\link{sql.declare}},
\code{\link{sql.delete}}, \code{\link{sql.drop}},
\code{\link{sql.exists}},
\code{\link{sql.extra.domicile}},
\code{\link{sql.index}}, \code{\link{sql.into}},
\code{\link{sql.in}}, \code{\link{sql.isin.old.to.new}},
\code{\link{sql.label}}, \code{\link{sql.map.classif}},
\code{\link{sql.mat.cofactor}},
\code{\link{sql.mat.crossprod.vector}},
\code{\link{sql.mat.crossprod}},
\code{\link{sql.mat.determinant}},
\code{\link{sql.mat.flip}},
\code{\link{sql.mat.multiply}}, \code{\link{sql.median}},
\code{\link{sql.nonneg}},
\code{\link{sql.query.underlying}},
\code{\link{sql.query}}, \code{\link{sql.regr}},
\code{\link{sql.tbl}}, \code{\link{sql.ui}},
\code{\link{sql.unbracket}}, \code{\link{sql.update}},
\code{\link{sql.yield.curve.1dFloMo}},
\code{\link{sql.yield.curve}},
\code{\link{sql.yyyymmdd}}, \code{\link{sql.yyyymm}}
}
\keyword{sql.1mAllocD}
| /man/sql.1mAllocD.Rd | no_license | vsrimurthy/EPFR | R | false | true | 5,135 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/EPFR.r
\name{sql.1mAllocD}
\alias{sql.1mAllocD}
\title{sql.1mAllocD}
\usage{
sql.1mAllocD(x, y, n, w, h)
}
\arguments{
\item{x}{= the YYYYMM for which you want data (known 26 days later)}
\item{y}{= a string vector of factors to be computed, the last element of which is the type of fund used.}
\item{n}{= any of StockFlows/China/Japan/CSI300/Energy}
\item{w}{= T/F depending on whether you are checking ftp}
\item{h}{= T/F depending on whether latest prices are being used}
}
\description{
Generates the SQL query to get the data for 1mAllocMo
}
\seealso{
Other sql: \code{\link{sql.1dActWtTrend.Alloc}},
\code{\link{sql.1dActWtTrend.Final}},
\code{\link{sql.1dActWtTrend.Flow}},
\code{\link{sql.1dActWtTrend.select}},
\code{\link{sql.1dActWtTrend.topline.from}},
\code{\link{sql.1dActWtTrend.topline}},
\code{\link{sql.1dActWtTrend.underlying.basic}},
\code{\link{sql.1dActWtTrend.underlying}},
\code{\link{sql.1dActWtTrend}},
\code{\link{sql.1dFloMo.CountryId.List}},
\code{\link{sql.1dFloMo.FI}},
\code{\link{sql.1dFloMo.Rgn}},
\code{\link{sql.1dFloMo.Sec.topline}},
\code{\link{sql.1dFloMo.filter}},
\code{\link{sql.1dFloMo.grp}},
\code{\link{sql.1dFloMo.select.wrapper}},
\code{\link{sql.1dFloMo.select}},
\code{\link{sql.1dFloMo.underlying}},
\code{\link{sql.1dFloMoAggr}}, \code{\link{sql.1dFloMo}},
\code{\link{sql.1dFloTrend.Alloc.data}},
\code{\link{sql.1dFloTrend.Alloc.fetch}},
\code{\link{sql.1dFloTrend.Alloc.final}},
\code{\link{sql.1dFloTrend.Alloc.from}},
\code{\link{sql.1dFloTrend.Alloc.purge}},
\code{\link{sql.1dFloTrend.Alloc}},
\code{\link{sql.1dFloTrend.select}},
\code{\link{sql.1dFloTrend.underlying}},
\code{\link{sql.1dFloTrend}}, \code{\link{sql.1dFundCt}},
\code{\link{sql.1dFundRet}}, \code{\link{sql.1dION}},
\code{\link{sql.1mActWt.underlying}},
\code{\link{sql.1mActWtIncrPct}},
\code{\link{sql.1mActWtTrend.underlying}},
\code{\link{sql.1mActWtTrend}},
\code{\link{sql.1mActWt}},
\code{\link{sql.1mAllocD.from}},
\code{\link{sql.1mAllocD.select}},
\code{\link{sql.1mAllocD.topline.from}},
\code{\link{sql.1mAllocMo.select}},
\code{\link{sql.1mAllocMo.underlying.from}},
\code{\link{sql.1mAllocMo.underlying.pre}},
\code{\link{sql.1mAllocMo}},
\code{\link{sql.1mAllocSkew.topline.from}},
\code{\link{sql.1mAllocSkew}},
\code{\link{sql.1mBullish.Alloc}},
\code{\link{sql.1mBullish.Final}},
\code{\link{sql.1mChActWt}}, \code{\link{sql.1mFloMo}},
\code{\link{sql.1mFloTrend.underlying}},
\code{\link{sql.1mFloTrend}}, \code{\link{sql.1mFundCt}},
\code{\link{sql.1mHoldAum}},
\code{\link{sql.1mSRIAdvisorPct}},
\code{\link{sql.1wFlow.Corp}},
\code{\link{sql.ActWtDiff2}},
\code{\link{sql.Allocation.Sec.FinsExREst}},
\code{\link{sql.Allocation.Sec}},
\code{\link{sql.Allocations.bulk.EqWtAvg}},
\code{\link{sql.Allocations.bulk.Single}},
\code{\link{sql.Allocation}},
\code{\link{sql.BenchIndex.duplication}},
\code{\link{sql.Bullish}}, \code{\link{sql.DailyFlo}},
\code{\link{sql.Diff}}, \code{\link{sql.Dispersion}},
\code{\link{sql.FloMo.Funds}}, \code{\link{sql.Flow}},
\code{\link{sql.Foreign}},
\code{\link{sql.FundHistory.macro}},
\code{\link{sql.FundHistory.sf}},
\code{\link{sql.FundHistory}}, \code{\link{sql.HSIdmap}},
\code{\link{sql.HerdingLSV}},
\code{\link{sql.Holdings.bulk.wrapper}},
\code{\link{sql.Holdings.bulk}},
\code{\link{sql.Holdings}}, \code{\link{sql.ION}},
\code{\link{sql.MonthlyAlloc}},
\code{\link{sql.MonthlyAssetsEnd}}, \code{\link{sql.Mo}},
\code{\link{sql.Overweight}}, \code{\link{sql.RDSuniv}},
\code{\link{sql.ReportDate}}, \code{\link{sql.SRI}},
\code{\link{sql.ShareClass}},
\code{\link{sql.TopDownAllocs.items}},
\code{\link{sql.TopDownAllocs.underlying}},
\code{\link{sql.TopDownAllocs}}, \code{\link{sql.Trend}},
\code{\link{sql.and}}, \code{\link{sql.arguments}},
\code{\link{sql.bcp}}, \code{\link{sql.breakdown}},
\code{\link{sql.case}}, \code{\link{sql.close}},
\code{\link{sql.connect.wrapper}},
\code{\link{sql.connect}},
\code{\link{sql.cross.border}},
\code{\link{sql.datediff}}, \code{\link{sql.declare}},
\code{\link{sql.delete}}, \code{\link{sql.drop}},
\code{\link{sql.exists}},
\code{\link{sql.extra.domicile}},
\code{\link{sql.index}}, \code{\link{sql.into}},
\code{\link{sql.in}}, \code{\link{sql.isin.old.to.new}},
\code{\link{sql.label}}, \code{\link{sql.map.classif}},
\code{\link{sql.mat.cofactor}},
\code{\link{sql.mat.crossprod.vector}},
\code{\link{sql.mat.crossprod}},
\code{\link{sql.mat.determinant}},
\code{\link{sql.mat.flip}},
\code{\link{sql.mat.multiply}}, \code{\link{sql.median}},
\code{\link{sql.nonneg}},
\code{\link{sql.query.underlying}},
\code{\link{sql.query}}, \code{\link{sql.regr}},
\code{\link{sql.tbl}}, \code{\link{sql.ui}},
\code{\link{sql.unbracket}}, \code{\link{sql.update}},
\code{\link{sql.yield.curve.1dFloMo}},
\code{\link{sql.yield.curve}},
\code{\link{sql.yyyymmdd}}, \code{\link{sql.yyyymm}}
}
\keyword{sql.1mAllocD}
|
library(lubridate)
weather <- read.csv("weather.csv")
#recode trace amounts as a small number
weather$snowfall <- as.character(weather$snowfall)
weather$snowfall[weather$snowfall=="T"] <- .01
weather$preciptotal <- as.character(weather$preciptotal)
weather$preciptotal[weather$preciptotal=="T"] <- .004
weather$depart_missing <- 0
weather$depart_missing[weather$depart=="M"] <- 1
#really shitty imputation; factor values get recoded as character and numeric, then the NA values (where there was a value that could not get encoed as numeric) get replaced by means
for(i in c(3:12,14:18,20)){
weather[,i] <- as.numeric(as.character(weather[,i]))
weather[is.na(weather[,i]),i] <- mean(weather[,i],na.rm=TRUE)
}
weather$month <- factor(month(weather$date))
weather$wday <- factor(wday(weather$date)) #weekend shopping spree!
##Create "daylightMins" variable
weather$sunriseMins <- as.numeric(substr(weather$sunrise,1,2))*60 +
as.numeric(substr(weather$sunrise,3,4))
weather$sunsetMins <- as.numeric(substr(weather$sunset,1,2))*60 +
as.numeric(substr(weather$sunset,3,4))
weather$daylightMins <- weather$sunsetMins - weather$sunriseMins | /weather_cleaning.R | no_license | Shweidman/Kaggle | R | false | false | 1,151 | r | library(lubridate)
weather <- read.csv("weather.csv")
#recode trace amounts as a small number
weather$snowfall <- as.character(weather$snowfall)
weather$snowfall[weather$snowfall=="T"] <- .01
weather$preciptotal <- as.character(weather$preciptotal)
weather$preciptotal[weather$preciptotal=="T"] <- .004
weather$depart_missing <- 0
weather$depart_missing[weather$depart=="M"] <- 1
#really shitty imputation; factor values get recoded as character and numeric, then the NA values (where there was a value that could not get encoed as numeric) get replaced by means
for(i in c(3:12,14:18,20)){
weather[,i] <- as.numeric(as.character(weather[,i]))
weather[is.na(weather[,i]),i] <- mean(weather[,i],na.rm=TRUE)
}
weather$month <- factor(month(weather$date))
weather$wday <- factor(wday(weather$date)) #weekend shopping spree!
##Create "daylightMins" variable
weather$sunriseMins <- as.numeric(substr(weather$sunrise,1,2))*60 +
as.numeric(substr(weather$sunrise,3,4))
weather$sunsetMins <- as.numeric(substr(weather$sunset,1,2))*60 +
as.numeric(substr(weather$sunset,3,4))
weather$daylightMins <- weather$sunsetMins - weather$sunriseMins |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.