content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
% Generated by roxygen2 (4.0.1): do not edit by hand
\name{list.takeWhile}
\alias{list.takeWhile}
\title{Take out members until the given condition is broken}
\usage{
list.takeWhile(.data, cond)
}
\arguments{
\item{.data}{\code{list}}
\item{cond}{A logical lambda expression}
}
\description{
Take out members until the given condition is broken
}
\examples{
\dontrun{
x <- list(p1 = list(type="A",score=list(c1=10,c2=8)),
p2 = list(type="B",score=list(c1=9,c2=9)),
p3 = list(type="B",score=list(c1=9,c2=7)))
list.takeWhile(x,type=="B")
list.takeWhile(x,min(score$c1,score$c2) >= 8)
}
}
| /man/list.takeWhile.Rd | permissive | kismsu/rlist | R | false | false | 602 | rd | % Generated by roxygen2 (4.0.1): do not edit by hand
\name{list.takeWhile}
\alias{list.takeWhile}
\title{Take out members until the given condition is broken}
\usage{
list.takeWhile(.data, cond)
}
\arguments{
\item{.data}{\code{list}}
\item{cond}{A logical lambda expression}
}
\description{
Take out members until the given condition is broken
}
\examples{
\dontrun{
x <- list(p1 = list(type="A",score=list(c1=10,c2=8)),
p2 = list(type="B",score=list(c1=9,c2=9)),
p3 = list(type="B",score=list(c1=9,c2=7)))
list.takeWhile(x,type=="B")
list.takeWhile(x,min(score$c1,score$c2) >= 8)
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RCy3-deprecated.R
\name{getNodeAttributeNames}
\alias{getNodeAttributeNames}
\alias{getNodeAttributeNames_deprecated}
\title{DEPRECATED: getNodeAttributeNames}
\usage{
getNodeAttributeNames_deprecated
}
\value{
None
}
\description{
This function is only provided for compatibility with older
versions of RCy3 and will be defunct and removed in the next releases.
Use the replacement function instead:
\link{getTableColumnNames}
}
| /man/getNodeAttributeNames-deprecated.Rd | permissive | olbeimarton/RCy3 | R | false | true | 509 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RCy3-deprecated.R
\name{getNodeAttributeNames}
\alias{getNodeAttributeNames}
\alias{getNodeAttributeNames_deprecated}
\title{DEPRECATED: getNodeAttributeNames}
\usage{
getNodeAttributeNames_deprecated
}
\value{
None
}
\description{
This function is only provided for compatibility with older
versions of RCy3 and will be defunct and removed in the next releases.
Use the replacement function instead:
\link{getTableColumnNames}
}
|
context("Test caching")
globalassign <- function(...) {
for (x in c(...)) assign(x,eval.parent(parse(text = x)),.GlobalEnv)
}
test_that("Caching works", {
calcCacheExample <- function() return(list(x = as.magpie(1), description = "-", unit = "-"))
globalassign("calcCacheExample")
setConfig(globalenv = TRUE, ignorecache = FALSE, .verbose = FALSE,
cachefolder = paste0(tempdir(), "/test_caching_works"))
expect_null(madrat:::cacheGet("calc","CacheExample"))
expect_message(calcOutput("CacheExample", aggregate = FALSE), "writing cache")
expect_identical(madrat:::cacheGet("calc","CacheExample")$x, as.magpie(1))
setConfig(ignorecache = TRUE, .verbose = FALSE)
expect_null(madrat:::cacheGet("calc","CacheExample"))
setConfig(ignorecache = FALSE, .verbose = FALSE)
expect_identical(basename(madrat:::cacheName("calc","CacheExample")), "calcCacheExample-F43888ba0.rds")
calcCacheExample <- function() return(list(x = as.magpie(2), description = "-", unit = "-"))
globalassign("calcCacheExample")
expect_null(madrat:::cacheName("calc","CacheExample", mode = "get"))
setConfig(forcecache = TRUE, .verbose = FALSE)
expect_identical(basename(madrat:::cacheName("calc","CacheExample")), "calcCacheExample.rds")
expect_message(cf <- madrat:::cacheName("calc","CacheExample", mode = "get"), "does not match fingerprint")
expect_identical(basename(cf), "calcCacheExample-F43888ba0.rds")
setConfig(forcecache = FALSE, .verbose = FALSE)
expect_message(a <- calcOutput("CacheExample", aggregate=FALSE), "writing cache")
expect_identical(basename(madrat:::cacheName("calc","CacheExample", mode = "get")), "calcCacheExample-F4ece4fe6.rds")
calcCacheExample <- function() return(list(x = as.magpie(3), description = "-", unit = "-"))
globalassign("calcCacheExample")
setConfig(forcecache = TRUE, .verbose = FALSE)
expect_message(cf <- madrat:::cacheName("calc","CacheExample", mode = "get"), "does not match fingerprint")
expect_identical(basename(cf), "calcCacheExample-F4ece4fe6.rds")
})
test_that("Argument hashing works", {
expect_null(madrat:::cacheArgumentsHash(madrat:::readTau))
expect_null(madrat:::cacheArgumentsHash(madrat:::readTau, list(subtype="paper")))
expect_identical(madrat:::cacheArgumentsHash(madrat:::readTau, args=list(subtype="historical")), "-50d72f51")
expect_identical(madrat:::cacheArgumentsHash(c(madrat:::readTau, madrat:::convertTau), args=list(subtype="historical")), "-50d72f51")
# nonexisting arguments will be ignored if ... is missing
expect_identical(madrat:::cacheArgumentsHash(madrat:::readTau, args=list(subtype="historical", notthere = 42)), "-50d72f51")
# if ... exists all arguments will get considered
expect_null(madrat:::cacheArgumentsHash(calcOutput, args=list(try=FALSE)))
expect_identical(madrat:::cacheArgumentsHash(calcOutput, args=list(try=TRUE)), "-01df3eb2")
expect_identical(madrat:::cacheArgumentsHash(calcOutput, args=list(try=TRUE, notthere = 42)), "-ae021eac")
calcArgs <- function(a = NULL) return(1)
expect_null(madrat:::cacheArgumentsHash(calcArgs))
expect_null(madrat:::cacheArgumentsHash(calcArgs, args=list(a = NULL)))
expect_identical(madrat:::cacheArgumentsHash(calcArgs, args=list(a=12)), "-8bb64daf")
expect_error(madrat:::cacheArgumentsHash(NULL,args=list(no="call")), "No call")
})
test_that("Cache naming and identification works correctly", {
setConfig(forcecache = FALSE, .verbose = FALSE)
downloadCacheExample <- function() return(list(url = 1, author = 1, title = 1, license = 1,
description = 1, unit = 1))
readCacheExample <- function() return(as.magpie(1))
correctCacheExample <- function(x, subtype = "blub") {
if (subtype == "blub") return(as.magpie(1))
else if (subtype == "bla") return(as.magpie(2))
}
globalassign("downloadCacheExample", "readCacheExample", "correctCacheExample")
expect_message(readSource("CacheExample", convert = "onlycorrect"), "correctCacheExample-F[^-]*.rds")
expect_message(readSource("CacheExample", convert = "onlycorrect", subtype = "bla"), "correctCacheExample-F[^-]*-d0d19d80.rds")
expect_message(readSource("CacheExample", convert = "onlycorrect", subtype = "blub"), "correctCacheExample-F[^-]*.rds")
readCacheExample <- function(subtype = "blub") {
if (subtype == "blub") return(as.magpie(1))
else if (subtype == "bla") return(as.magpie(2))
}
correctCacheExample <- function(x) return(x)
globalassign("downloadCacheExample", "readCacheExample", "correctCacheExample")
expect_message(readSource("CacheExample", convert = "onlycorrect"), "correctCacheExample-F[^-]*.rds")
expect_message(readSource("CacheExample", convert = "onlycorrect", subtype = "bla"), "correctCacheExample-F[^-]*-d0d19d80.rds")
expect_message(readSource("CacheExample", convert = "onlycorrect", subtype = "blub"), "correctCacheExample-F[^-]*.rds")
})
| /tests/testthat/test-caching.R | no_license | johanneskoch94/madrat | R | false | false | 4,936 | r | context("Test caching")
globalassign <- function(...) {
for (x in c(...)) assign(x,eval.parent(parse(text = x)),.GlobalEnv)
}
test_that("Caching works", {
calcCacheExample <- function() return(list(x = as.magpie(1), description = "-", unit = "-"))
globalassign("calcCacheExample")
setConfig(globalenv = TRUE, ignorecache = FALSE, .verbose = FALSE,
cachefolder = paste0(tempdir(), "/test_caching_works"))
expect_null(madrat:::cacheGet("calc","CacheExample"))
expect_message(calcOutput("CacheExample", aggregate = FALSE), "writing cache")
expect_identical(madrat:::cacheGet("calc","CacheExample")$x, as.magpie(1))
setConfig(ignorecache = TRUE, .verbose = FALSE)
expect_null(madrat:::cacheGet("calc","CacheExample"))
setConfig(ignorecache = FALSE, .verbose = FALSE)
expect_identical(basename(madrat:::cacheName("calc","CacheExample")), "calcCacheExample-F43888ba0.rds")
calcCacheExample <- function() return(list(x = as.magpie(2), description = "-", unit = "-"))
globalassign("calcCacheExample")
expect_null(madrat:::cacheName("calc","CacheExample", mode = "get"))
setConfig(forcecache = TRUE, .verbose = FALSE)
expect_identical(basename(madrat:::cacheName("calc","CacheExample")), "calcCacheExample.rds")
expect_message(cf <- madrat:::cacheName("calc","CacheExample", mode = "get"), "does not match fingerprint")
expect_identical(basename(cf), "calcCacheExample-F43888ba0.rds")
setConfig(forcecache = FALSE, .verbose = FALSE)
expect_message(a <- calcOutput("CacheExample", aggregate=FALSE), "writing cache")
expect_identical(basename(madrat:::cacheName("calc","CacheExample", mode = "get")), "calcCacheExample-F4ece4fe6.rds")
calcCacheExample <- function() return(list(x = as.magpie(3), description = "-", unit = "-"))
globalassign("calcCacheExample")
setConfig(forcecache = TRUE, .verbose = FALSE)
expect_message(cf <- madrat:::cacheName("calc","CacheExample", mode = "get"), "does not match fingerprint")
expect_identical(basename(cf), "calcCacheExample-F4ece4fe6.rds")
})
test_that("Argument hashing works", {
expect_null(madrat:::cacheArgumentsHash(madrat:::readTau))
expect_null(madrat:::cacheArgumentsHash(madrat:::readTau, list(subtype="paper")))
expect_identical(madrat:::cacheArgumentsHash(madrat:::readTau, args=list(subtype="historical")), "-50d72f51")
expect_identical(madrat:::cacheArgumentsHash(c(madrat:::readTau, madrat:::convertTau), args=list(subtype="historical")), "-50d72f51")
# nonexisting arguments will be ignored if ... is missing
expect_identical(madrat:::cacheArgumentsHash(madrat:::readTau, args=list(subtype="historical", notthere = 42)), "-50d72f51")
# if ... exists all arguments will get considered
expect_null(madrat:::cacheArgumentsHash(calcOutput, args=list(try=FALSE)))
expect_identical(madrat:::cacheArgumentsHash(calcOutput, args=list(try=TRUE)), "-01df3eb2")
expect_identical(madrat:::cacheArgumentsHash(calcOutput, args=list(try=TRUE, notthere = 42)), "-ae021eac")
calcArgs <- function(a = NULL) return(1)
expect_null(madrat:::cacheArgumentsHash(calcArgs))
expect_null(madrat:::cacheArgumentsHash(calcArgs, args=list(a = NULL)))
expect_identical(madrat:::cacheArgumentsHash(calcArgs, args=list(a=12)), "-8bb64daf")
expect_error(madrat:::cacheArgumentsHash(NULL,args=list(no="call")), "No call")
})
test_that("Cache naming and identification works correctly", {
setConfig(forcecache = FALSE, .verbose = FALSE)
downloadCacheExample <- function() return(list(url = 1, author = 1, title = 1, license = 1,
description = 1, unit = 1))
readCacheExample <- function() return(as.magpie(1))
correctCacheExample <- function(x, subtype = "blub") {
if (subtype == "blub") return(as.magpie(1))
else if (subtype == "bla") return(as.magpie(2))
}
globalassign("downloadCacheExample", "readCacheExample", "correctCacheExample")
expect_message(readSource("CacheExample", convert = "onlycorrect"), "correctCacheExample-F[^-]*.rds")
expect_message(readSource("CacheExample", convert = "onlycorrect", subtype = "bla"), "correctCacheExample-F[^-]*-d0d19d80.rds")
expect_message(readSource("CacheExample", convert = "onlycorrect", subtype = "blub"), "correctCacheExample-F[^-]*.rds")
readCacheExample <- function(subtype = "blub") {
if (subtype == "blub") return(as.magpie(1))
else if (subtype == "bla") return(as.magpie(2))
}
correctCacheExample <- function(x) return(x)
globalassign("downloadCacheExample", "readCacheExample", "correctCacheExample")
expect_message(readSource("CacheExample", convert = "onlycorrect"), "correctCacheExample-F[^-]*.rds")
expect_message(readSource("CacheExample", convert = "onlycorrect", subtype = "bla"), "correctCacheExample-F[^-]*-d0d19d80.rds")
expect_message(readSource("CacheExample", convert = "onlycorrect", subtype = "blub"), "correctCacheExample-F[^-]*.rds")
})
|
library(ggplot)
library(dplyr)
survival.data <- read.csv("SurvivalRates_DeceasedDonor.csv")
survival.data <- as.data.frame(survival.data)
survival.diag90d <- filter(survival.data,
survival.data$Category == "Diagnosis" & survival.data$TimePeriod == "90 days")
theme_clear <- theme_bw() + theme(plot.background=element_rect(fill="white",colour=NA)) +
theme(panel.grid.major.y=element_blank(),panel.grid.minor=element_blank())
diagnosisplot.ts <- ggplot(data = survival.diag90d,
aes(x=Year,y=SurvivalRate, colour=Value))+geom_line()+geom_point(size=4) + theme_clear
diagnosisplot.ts | /Kidney-Transplant/survivalratecharts.R | no_license | jarrardenator/shinyexamples | R | false | false | 634 | r | library(ggplot)
library(dplyr)
survival.data <- read.csv("SurvivalRates_DeceasedDonor.csv")
survival.data <- as.data.frame(survival.data)
survival.diag90d <- filter(survival.data,
survival.data$Category == "Diagnosis" & survival.data$TimePeriod == "90 days")
theme_clear <- theme_bw() + theme(plot.background=element_rect(fill="white",colour=NA)) +
theme(panel.grid.major.y=element_blank(),panel.grid.minor=element_blank())
diagnosisplot.ts <- ggplot(data = survival.diag90d,
aes(x=Year,y=SurvivalRate, colour=Value))+geom_line()+geom_point(size=4) + theme_clear
diagnosisplot.ts |
###############
#LIBS
###############
library(lme4);library(reshape);library(foreign) ;library(ggplot2);library(plyr);library(data.table);library(reshape2);library(Hmisc);library(mgcv);library(gdata);library(car);library(dplyr);library(ggmap);library(broom);library(splines)
#sourcing
source("/media/NAS/Uni/org/files/Uni/Projects/code/$Rsnips/CV_splits.r")
source("/media/NAS/Uni/org/files/Uni/Projects/code/$Rsnips/rmspe.r")
#if needed load res table
#res<- readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/resALL.AQ.rds")
#-------------------->> RES TABLE
res <- matrix(nrow=10, ncol=45)
res <- data.frame(res)
colnames(res) <- c(
"year"
,"m1.R2","m1.PE","m1.R2.s","m1.R2.t","m1.PE.s" #full model
,"m1cv.R2","m1cv.I","m1cv.I.se","m1cv.S","m1cv.S.se","m1cv.PE","m1cv.R2.s","m1cv.R2.t","m1cv.PE.s" #mod1 CV
,"m1cv.loc.R2","m1cv.loc.I","m1cv.loc.I.se","m1cv.loc.S","m1cv.loc.S.se","m1cv.loc.PE","m1cv.loc.PE.s","m1cv.loc.R2.s","m1cv.loc.R2.t"#loc m1
,"m2.R2" #mod2
,"m3.t31","m3.t33" #mod3 tests
,"m3.R2","m3.PE","m3.R2.s","m3.R2.t","m3.PE.s"#mod3
,"XX","XX","XX","XX","XX","XX","XX","XX","XX","XX","XX","XX","XX" )
res$year <- c(2003:2012)
### import data
m1.2003 <-readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod1.AQ.2003.rds")
})
raWDaf
raWDaf<-as.data.table(raWDaf)
bad<- raWDaf[R2< 0.01]
bad[,badid := paste(stn,sep="-")]
#################BAD STN
m1.2003[,badid := paste(stn,sep="-")]
####Take out bad stations
m1.2003 <- m1.2003[!(m1.2003$badid %in% bad$badid), ]
#scale vars
m1.2003[,elev.s:= scale(elev)]
m1.2003[,tden.s:= scale(tden)]
m1.2003[,pden.s:= scale(pden)]
m1.2003[,dist2A1.s:= scale(dist2A1)]
m1.2003[,dist2water.s:= scale(dist2water)]
m1.2003[,dist2rail.s:= scale(dist2rail)]
m1.2003[,Dist2road.s:= scale(Dist2road)]
m1.2003[,ndvi.s:= scale(ndvi)]
m1.2003[,MeanPbl.s:= scale(MeanPbl)]
m1.2003[,p_ind.s:= scale(p_ind)]
m1.2003[,p_for.s:= scale(p_for)]
m1.2003[,p_farm.s:= scale(p_farm)]
m1.2003[,p_dos.s:= scale(p_dos)]
m1.2003[,p_dev.s:= scale(p_dev)]
m1.2003[,p_os.s:= scale(p_os)]
m1.2003[,tempa.s:= scale(tempa)]
m1.2003[,WDa.s:= scale(WDa)]
m1.2003[,WSa.s:= scale(WSa)]
m1.2003[,RHa.s:= scale(RHa)]
m1.2003[,Raina.s:= scale(Raina)]
m1.2003[,NO2a.s:= scale(NO2a)]
m1.formula <- as.formula(PM10~ aod
+tempa.s+WDa.s+WSa.s+Dust+MeanPbl.s #temporal
+elev.s+tden.s+pden.s+Dist2road.s+ndvi.s #spatial
+p_os.s #+p_dev.s+p_dos.s+p_farm.s+p_for.s+p_ind.s #land use
#+aod*Dust #interactions
+(1+aod|day/reg_num)) #+(1|stn) !!! stn screws up mod3
#full fit
m1.fit.2003 <- lmer(m1.formula,data=m1.2003,weights=normwt)
m1.2003$pred.m1 <- predict(m1.fit.2003)
res[res$year=="2003", 'm1.R2'] <- print(summary(lm(PM10~pred.m1,data=m1.2003))$r.squared)
#RMSPE
res[res$year=="2003", 'm1.PE'] <- print(rmse(residuals(m1.fit.2003)))
#spatial
###to check
spatial2003<-m1.2003 %>%
group_by(stn) %>%
summarise(barpm = mean(PM10, na.rm=TRUE), barpred = mean(pred.m1, na.rm=TRUE))
m1.fit.2003.spat<- lm(barpm ~ barpred, data=spatial2003)
res[res$year=="2003", 'm1.R2.s'] <- print(summary(lm(barpm ~ barpred, data=spatial2003))$r.squared)
res[res$year=="2003", 'm1.PE.s'] <- print(rmse(residuals(m1.fit.2003.spat)))
#temporal
tempo2003<-left_join(m1.2003,spatial2003)
tempo2003$delpm <-tempo2003$PM10-tempo2003$barpm
tempo2003$delpred <-tempo2003$pred.m1-tempo2003$barpred
mod_temporal <- lm(delpm ~ delpred, data=tempo2003)
res[res$year=="2003", 'm1.R2.t'] <- print(summary(lm(delpm ~ delpred, data=tempo2003))$r.squared)
saveRDS(m1.2003,"/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod1.AQ.2003.pred.rds")
#---------------->>>> CV
#s1
splits_s1 <- splitdf(m1.2003)
test_s1 <- splits_s1$testset
train_s1 <- splits_s1$trainset
out_train_s1 <- lmer(m1.formula,data = train_s1,weights=normwt)
test_s1$pred.m1.cv <- predict(object=out_train_s1 ,newdata=test_s1,allow.new.levels=TRUE,re.form=NULL )
test_s1$iter<-"s1"
#s2
splits_s2 <- splitdf(m1.2003)
test_s2 <- splits_s2$testset
train_s2 <- splits_s2$trainset
out_train_s2 <- lmer(m1.formula,data = train_s2,weights=normwt)
test_s2$pred.m1.cv <- predict(object=out_train_s2 ,newdata=test_s2,allow.new.levels=TRUE,re.form=NULL )
test_s2$iter<-"s2"
#s3
splits_s3 <- splitdf(m1.2003)
test_s3 <- splits_s3$testset
train_s3 <- splits_s3$trainset
out_train_s3 <- lmer(m1.formula,data = train_s3,weights=normwt)
test_s3$pred.m1.cv <- predict(object=out_train_s3 ,newdata=test_s3,allow.new.levels=TRUE,re.form=NULL )
test_s3$iter<-"s3"
#s4
splits_s4 <- splitdf(m1.2003)
test_s4 <- splits_s4$testset
train_s4 <- splits_s4$trainset
out_train_s4 <- lmer(m1.formula,data = train_s4,weights=normwt)
test_s4$pred.m1.cv <- predict(object=out_train_s4 ,newdata=test_s4,allow.new.levels=TRUE,re.form=NULL )
test_s4$iter<-"s4"
#s5
splits_s5 <- splitdf(m1.2003)
test_s5 <- splits_s5$testset
train_s5 <- splits_s5$trainset
out_train_s5 <- lmer(m1.formula,data = train_s5,weights=normwt)
test_s5$pred.m1.cv <- predict(object=out_train_s5 ,newdata=test_s5,allow.new.levels=TRUE,re.form=NULL )
test_s5$iter<-"s5"
#s6
splits_s6 <- splitdf(m1.2003)
test_s6 <- splits_s6$testset
train_s6 <- splits_s6$trainset
out_train_s6 <- lmer(m1.formula,data = train_s6,weights=normwt)
test_s6$pred.m1.cv <- predict(object=out_train_s6 ,newdata=test_s6,allow.new.levels=TRUE,re.form=NULL )
test_s6$iter<-"s6"
#s7
splits_s7 <- splitdf(m1.2003)
test_s7 <- splits_s7$testset
train_s7 <- splits_s7$trainset
out_train_s7 <- lmer(m1.formula,data = train_s7,weights=normwt)
test_s7$pred.m1.cv <- predict(object=out_train_s7 ,newdata=test_s7,allow.new.levels=TRUE,re.form=NULL )
test_s7$iter<-"s7"
#s8
splits_s8 <- splitdf(m1.2003)
test_s8 <- splits_s8$testset
train_s8 <- splits_s8$trainset
out_train_s8 <- lmer(m1.formula,data = train_s8,weights=normwt)
test_s8$pred.m1.cv <- predict(object=out_train_s8 ,newdata=test_s8,allow.new.levels=TRUE,re.form=NULL )
test_s8$iter<-"s8"
#s9
splits_s9 <- splitdf(m1.2003)
test_s9 <- splits_s9$testset
train_s9 <- splits_s9$trainset
out_train_s9 <- lmer(m1.formula,data = train_s9,weights=normwt)
test_s9$pred.m1.cv <- predict(object=out_train_s9 ,newdata=test_s9,allow.new.levels=TRUE,re.form=NULL )
test_s9$iter<-"s9"
#s10
splits_s10 <- splitdf(m1.2003)
test_s10 <- splits_s10$testset
train_s10 <- splits_s10$trainset
out_train_s10 <- lmer(m1.formula,data = train_s10,weights=normwt)
test_s10$pred.m1.cv <- predict(object=out_train_s10 ,newdata=test_s10,allow.new.levels=TRUE,re.form=NULL )
test_s10$iter<-"s10"
#BIND 1 dataset
m1.2003.cv<- data.table(rbind(test_s1,test_s2,test_s3,test_s4,test_s5,test_s6,test_s7,test_s8,test_s9, test_s10))
# cleanup (remove from WS) objects from CV
rm(list = ls(pattern = "train_|test_"))
#table updates
m1.fit.2003.cv<-lm(PM10~pred.m1.cv,data=m1.2003.cv)
res[res$year=="2003", 'm1cv.R2'] <- print(summary(lm(PM10~pred.m1.cv,data=m1.2003.cv))$r.squared)
res[res$year=="2003", 'm1cv.I'] <-print(summary(lm(PM10~pred.m1.cv,data=m1.2003.cv))$coef[1,1])
res[res$year=="2003", 'm1cv.I.se'] <-print(summary(lm(PM10~pred.m1.cv,data=m1.2003.cv))$coef[1,2])
res[res$year=="2003", 'm1cv.S'] <-print(summary(lm(PM10~pred.m1.cv,data=m1.2003.cv))$coef[2,1])
res[res$year=="2003", 'm1cv.S.se'] <-print(summary(lm(PM10~pred.m1.cv,data=m1.2003.cv))$coef[2,2])
#RMSPE
res[res$year=="2003", 'm1cv.PE'] <- print(rmse(residuals(m1.fit.2003.cv)))
#spatial
spatial2003.cv<-m1.2003.cv %>%
group_by(stn) %>%
summarise(barpm = mean(PM10, na.rm=TRUE), barpred = mean(pred.m1, na.rm=TRUE))
m1.fit.2003.cv.s <- lm(barpm ~ barpred, data=spatial2003.cv)
res[res$year=="2003", 'm1cv.R2.s'] <- print(summary(lm(barpm ~ barpred, data=spatial2003.cv))$r.squared)
res[res$year=="2003", 'm1cv.PE.s'] <- print(rmse(residuals(m1.fit.2003.cv.s)))
#temporal
tempo2003.cv<-left_join(m1.2003.cv,spatial2003.cv)
tempo2003.cv$delpm <-tempo2003.cv$PM10-tempo2003.cv$barpm
tempo2003.cv$delpred <-tempo2003.cv$pred.m1.cv-tempo2003.cv$barpred
mod_temporal.cv <- lm(delpm ~ delpred, data=tempo2003.cv)
res[res$year=="2003", 'm1cv.R2.t'] <- print(summary(lm(delpm ~ delpred, data=tempo2003.cv))$r.squared)
#-------->>> loc stage
luf<-fread("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN004_LU_full_dataset/local.csv")
setnames(luf,"tden","loc.tden")
setnames(luf,"elev50","loc.elev")
#add 50m LU to CV data
setkey(m1.2003.cv,stn)
setkey(luf,stn)
m1.2003.cv.loc <- merge(m1.2003.cv, luf, all.x = T)
#m1.2003.cv.loc<-na.omit(m1.2003.cv.loc)
#create residual mp3 variable
m1.2003.cv.loc$res.m1<-m1.2003.cv.loc$PM10-m1.2003.cv.loc$pred.m1.cv
#The GAM model
gam.out<-gam(res.m1~s(loc.tden)+s(tden,MeanPbl)+s(loc.tden,WSa)+s(loc_p_os,fx=FALSE,k=4,bs='cr')+s(loc.elev,fx=FALSE,k=4,bs='cr')+s(dA1,fx=FALSE,k=4,bs='cr')+s(dsea,fx=FALSE,k=4,bs='cr'),data=m1.2003.cv.loc)
#plot(bp.model.ps)
#summary(bp.model.ps)
## reg
m1.2003.cv.loc$pred.m1.loc <-predict(gam.out)
m1.2003.cv.loc$pred.m1.both <- m1.2003.cv.loc$pred.m1.cv + m1.2003.cv.loc$pred.m1.loc
res[res$year=="2003", 'm1cv.loc.R2'] <- print(summary(lm(PM10~pred.m1.both,data=m1.2003.cv.loc))$r.squared)
res[res$year=="2003", 'm1cv.loc.I'] <-print(summary(lm(PM10~pred.m1.both,data=m1.2003.cv.loc))$coef[1,1])
res[res$year=="2003", 'm1cv.loc.I.se'] <-print(summary(lm(PM10~pred.m1.both,data=m1.2003.cv.loc))$coef[1,2])
res[res$year=="2003", 'm1cv.loc.S'] <-print(summary(lm(PM10~pred.m1.both,data=m1.2003.cv.loc))$coef[2,1])
res[res$year=="2003", 'm1cv.loc.S.se'] <-print(summary(lm(PM10~pred.m1.both,data=m1.2003.cv.loc))$coef[2,2])
#RMSPE
res[res$year=="2003", 'm1cv.loc.PE'] <- print(rmse(residuals(m1.fit.2003.cv)))
#spatial
spatial2003.cv.loc<-m1.2003.cv.loc %>%
group_by(stn) %>%
summarise(barpm = mean(PM10, na.rm=TRUE), barpred = mean(pred.m1, na.rm=TRUE))
m1.fit.2003.cv.loc.s <- lm(barpm ~ barpred, data=spatial2003.cv.loc)
res[res$year=="2003", 'm1cv.loc.R2.s'] <- print(summary(lm(barpm ~ barpred, data=spatial2003.cv.loc))$r.squared)
res[res$year=="2003", 'm1cv.loc.PE.s'] <- print(rmse(residuals(m1.fit.2003.cv.loc.s)))
#temporal
tempo2003.loc.cv<-left_join(m1.2003.cv.loc,spatial2003.cv.loc)
tempo2003.loc.cv$delpm <-tempo2003.loc.cv$PM10-tempo2003.loc.cv$barpm
tempo2003.loc.cv$delpred <-tempo2003.loc.cv$pred.m1.both-tempo2003.loc.cv$barpred
mod_temporal.loc.cv <- lm(delpm ~ delpred, data=tempo2003.loc.cv)
res[res$year=="2003", 'm1cv.loc.R2.t'] <- print(summary(lm(delpm ~ delpred, data=tempo2003.loc.cv))$r.squared)
#############save midpoint
saveRDS(res, "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/res.AQ.2003.rds")
saveRDS(res, "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/resALL.AQ.rds")
saveRDS(m1.2003.cv.loc,"/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod1.AQ.2003.predCV.rds")
###############
#MOD2
###############
m2.2003<-readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod2.AQ.2003.rds")
m2.2003[,elev.s:= scale(elev)]
m2.2003[,tden.s:= scale(tden)]
m2.2003[,pden.s:= scale(pden)]
m2.2003[,dist2A1.s:= scale(dist2A1)]
m2.2003[,dist2water.s:= scale(dist2water)]
m2.2003[,dist2rail.s:= scale(dist2rail)]
m2.2003[,Dist2road.s:= scale(Dist2road)]
m2.2003[,ndvi.s:= scale(ndvi)]
m2.2003[,MeanPbl.s:= scale(MeanPbl)]
m2.2003[,p_ind.s:= scale(p_ind)]
m2.2003[,p_for.s:= scale(p_for)]
m2.2003[,p_farm.s:= scale(p_farm)]
m2.2003[,p_dos.s:= scale(p_dos)]
m2.2003[,p_dev.s:= scale(p_dev)]
m2.2003[,p_os.s:= scale(p_os)]
m2.2003[,tempa.s:= scale(tempa)]
m2.2003[,WDa.s:= scale(WDa)]
m2.2003[,WSa.s:= scale(WSa)]
m2.2003[,RHa.s:= scale(RHa)]
m2.2003[,Raina.s:= scale(Raina)]
m2.2003[,NO2a.s:= scale(NO2a)]
#generate predictions
m2.2003[, pred.m2 := predict(object=m1.fit.2003,newdata=m2.2003,allow.new.levels=TRUE,re.form=NULL)]
describe(m2.2003$pred.m2)
#delete implossible valuesOA[24~
m2.2003 <- m2.2003[pred.m2 > 0.00000000000001 , ]
m2.2003 <- m2.2003[pred.m2 < 1500 , ]
saveRDS(m2.2003,"/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod2.AQ.2003.pred2.rds")
#-------------->prepare for mod3
m2.2003[, bimon := (m + 1) %/% 2]
setkey(m2.2003,day, aodid)
m2.2003<-m2.2003[!is.na(meanPM10)]
#run the lmer part regressing stage 2 pred Vs mean pm
#in israel check per month, also check 30km band and other methods for meanpm
m2.smooth = lme(pred.m2 ~ meanPM10,random = list(aodid= ~1 + meanPM10),control=lmeControl(opt = "optim"), data= m2.2003 )
#xm2.smooth = lmer(pred.m2 ~ meanPM10+(1+ meanPM10|aodid), data= m2.2003 )
#correlate to see everything from mod2 and the mpm works
m2.2003[, pred.t31 := predict(m2.smooth)]
m2.2003[, resid := residuals(m2.smooth)]
res[res$year=="2003", 'm3.t31'] <- print(summary(lm(pred.m2~pred.t31,data=m2.2003))$r.squared)
#split the files to the separate bi monthly datsets
T2003_bimon1 <- subset(m2.2003 ,m2.2003$bimon == "1")
T2003_bimon2 <- subset(m2.2003 ,m2.2003$bimon == "2")
T2003_bimon3 <- subset(m2.2003 ,m2.2003$bimon == "3")
T2003_bimon4 <- subset(m2.2003 ,m2.2003$bimon == "4")
T2003_bimon5 <- subset(m2.2003 ,m2.2003$bimon == "5")
T2003_bimon6 <- subset(m2.2003 ,m2.2003$bimon == "6")
#run the separate splines (smooth) for x and y for each bimon
#whats the default band (distance) that the spline goes out and uses
fit2_1 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2003_bimon1 )
fit2_2 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2003_bimon2 )
fit2_3 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2003_bimon3 )
fit2_4 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2003_bimon4 )
fit2_5 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2003_bimon5 )
fit2_6 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2003_bimon6 )
#get the predicted-fitted
Xpred_1 <- (T2003_bimon1$pred.t31 - fit2_1$fitted)
Xpred_2 <- (T2003_bimon2$pred.t31 - fit2_2$fitted)
Xpred_3 <- (T2003_bimon3$pred.t31 - fit2_3$fitted)
Xpred_4 <- (T2003_bimon4$pred.t31 - fit2_4$fitted)
Xpred_5 <- (T2003_bimon5$pred.t31 - fit2_5$fitted)
Xpred_6 <- (T2003_bimon6$pred.t31 - fit2_6$fitted)
#remerge to 1 file
m2.2003$pred.t32 <- c( Xpred_1,Xpred_2, Xpred_3, Xpred_4, Xpred_5, Xpred_6)
#this is important so that its sorted as in the first gamm
setkey(m2.2003,day, aodid)
#rerun the lme on the predictions including the spatial spline (smooth)
Final_pred_2003 <- lme(pred.t32 ~ meanPM10 ,random = list(aodid= ~1 + meanPM10 ),control=lmeControl(opt = "optim"),data= m2.2003 )
m2.2003[, pred.t33 := predict(Final_pred_2003)]
#check correlations
res[res$year=="2003", 'm3.t33'] <- print(summary(lm(pred.m2 ~ pred.t33,data=m2.2003))$r.squared)
#------------------------>>>
#import mod3
data.m3 <- readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod3.AQ.2003.rds")
#for PM10
data.m3 <- data.m3[,c(1,2,5,29:32,52,53),with=FALSE]
data.m3[, bimon := (m + 1) %/% 2]
setkey(data.m3,day, aodid)
data.m3<-data.m3[!is.na(meanPM10)]
#generate m.3 initial pred
data.m3$pred.m3.mix <- predict(Final_pred_2003,data.m3)
#create unique grid
ugrid <-data.m3 %>%
group_by(aodid) %>%
summarise(lat_aod = mean(lat_aod, na.rm=TRUE), long_aod = mean(long_aod, na.rm=TRUE),x_aod_ITM = mean(x_aod_ITM, na.rm=TRUE), y_aod_ITM = mean(y_aod_ITM, na.rm=TRUE))
#### PREDICT Gam part
#split back into bimons to include the gam prediction in final prediction
data.m3_bimon1 <- data.m3[bimon == 1, ]
data.m3_bimon2 <- data.m3[bimon == 2, ]
data.m3_bimon3 <- data.m3[bimon == 3, ]
data.m3_bimon4 <- data.m3[bimon == 4, ]
data.m3_bimon5 <- data.m3[bimon == 5, ]
data.m3_bimon6 <- data.m3[bimon == 6, ]
#addin unique grid to each bimon
uniq_gid_bimon1 <- ugrid
uniq_gid_bimon2 <- ugrid
uniq_gid_bimon3 <- ugrid
uniq_gid_bimon4 <- ugrid
uniq_gid_bimon5 <- ugrid
uniq_gid_bimon6 <- ugrid
#get predictions for Bimon residuals
uniq_gid_bimon1$gpred <- predict.gam(fit2_1,uniq_gid_bimon1)
uniq_gid_bimon2$gpred <- predict.gam(fit2_2,uniq_gid_bimon2)
uniq_gid_bimon3$gpred <- predict.gam(fit2_3,uniq_gid_bimon3)
uniq_gid_bimon4$gpred <- predict.gam(fit2_4,uniq_gid_bimon4)
uniq_gid_bimon5$gpred <- predict.gam(fit2_5,uniq_gid_bimon5)
uniq_gid_bimon6$gpred <- predict.gam(fit2_6,uniq_gid_bimon6)
#merge things back togheter
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> merges
setkey(uniq_gid_bimon1,aodid)
setkey(data.m3_bimon1,aodid)
data.m3_bimon1 <- merge(data.m3_bimon1, uniq_gid_bimon1[,list(aodid,gpred)], all.x = T)
setkey(uniq_gid_bimon2,aodid)
setkey(data.m3_bimon2,aodid)
data.m3_bimon2 <- merge(data.m3_bimon2, uniq_gid_bimon2[,list(aodid,gpred)], all.x = T)
setkey(uniq_gid_bimon3,aodid)
setkey(data.m3_bimon3,aodid)
data.m3_bimon3 <- merge(data.m3_bimon3, uniq_gid_bimon3[,list(aodid,gpred)], all.x = T)
setkey(uniq_gid_bimon4,aodid)
setkey(data.m3_bimon4,aodid)
data.m3_bimon4 <- merge(data.m3_bimon4, uniq_gid_bimon4[,list(aodid,gpred)], all.x = T)
setkey(uniq_gid_bimon5,aodid)
setkey(data.m3_bimon5,aodid)
data.m3_bimon5 <- merge(data.m3_bimon5, uniq_gid_bimon5[,list(aodid,gpred)], all.x = T)
setkey(uniq_gid_bimon6,aodid)
setkey(data.m3_bimon6,aodid)
data.m3_bimon6 <- merge(data.m3_bimon6, uniq_gid_bimon6[,list(aodid,gpred)], all.x = T)
#reattach all parts
mod3 <- rbind(data.m3_bimon1,data.m3_bimon2,data.m3_bimon3,data.m3_bimon4,data.m3_bimon5,data.m3_bimon6)
# create pred.m3
mod3$pred.m3 <-mod3$pred.m3.mix+mod3$gpred
#hist(mod3$pred.m3)
#describe(mod3$pred.m3)
#recode negative into zero
mod3 <- mod3[pred.m3 >= 0]
saveRDS(mod3,"/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod3.AQ.2003.pred.rds")
#clean
keep(mod3,res,rmse, sure=TRUE)
gc()
#########################
#prepare for m3.R2
#########################
#load mod1
mod1<- readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod1.AQ.2003.pred.rds")
mod1[,aodid:= paste(mod1$long_aod,mod1$lat_aod,sep="-")]
mod1<-mod1[,c("aodid","day","PM10","pred.m1","stn"),with=FALSE]
#R2.m3
setkey(mod3,day,aodid)
setkey(mod1,day,aodid)
m1.2003 <- merge(mod1,mod3[, list(day,aodid,pred.m3)], all.x = T)
m3.fit.2003<- summary(lm(PM10~pred.m3,data=m1.2003))
res[res$year=="2003", 'm3.R2'] <- print(summary(lm(PM10~pred.m3,data=m1.2003))$r.squared)
#RMSPE
res[res$year=="2003", 'm3.PE'] <- print(rmse(residuals(m3.fit.2003)))
#spatial
###to check
spatial2003<-m1.2003 %>%
group_by(stn) %>%
summarise(barpm = mean(PM10, na.rm=TRUE), barpred = mean(pred.m3, na.rm=TRUE))
m1.fit.2003.spat<- lm(barpm ~ barpred, data=spatial2003)
res[res$year=="2003", 'm3.R2.s'] <- print(summary(lm(barpm ~ barpred, data=spatial2003))$r.squared)
res[res$year=="2003", 'm3.PE.s'] <- print(rmse(residuals(m1.fit.2003.spat)))
#temporal
tempo2003<-left_join(m1.2003,spatial2003)
tempo2003$delpm <-tempo2003$PM10-tempo2003$barpm
tempo2003$delpred <-tempo2003$pred.m3-tempo2003$barpred
mod_temporal <- lm(delpm ~ delpred, data=tempo2003)
res[res$year=="2003", 'm3.R2.t'] <- print(summary(lm(delpm ~ delpred, data=tempo2003))$r.squared)
#############save midpoint
saveRDS(res, "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/res.AQ.2003.rds")
saveRDS(res, "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/resALL.AQ.rds")
#########################
#import mod2
mod2<- readRDS( "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod2.AQ.2003.pred2.rds")
mod2<-mod2[,c("aodid","day","pred.m2"),with=FALSE]
#----------------> store the best available
mod3best <- mod3[, list(aodid, x_aod_ITM, y_aod_ITM, day, pred.m3)]
setkey(mod3best, day, aodid)
setkey(mod2, day, aodid)
mod3best <- merge(mod3best, mod2[,list(aodid, day, pred.m2)], all.x = T)
#reload mod1
mod1<- readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod1.AQ.2003.pred.rds")
mod1$aodid<-paste(mod1$long_aod,mod1$lat_aod,sep="-")
mod1<-mod1[,c("aodid","day","PM10","pred.m1"),with=FALSE]
setkey(mod1,day,aodid)
mod3best <- merge(mod3best, mod1, all.x = T)
mod3best[,bestpred := pred.m3]
mod3best[!is.na(pred.m2),bestpred := pred.m2]
mod3best[!is.na(pred.m1),bestpred := pred.m1]
#save
saveRDS(mod3best,"/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod3.AQ.2003.FINAL.rds")
#save for GIS
write.csv(mod3best[, list(LTPM = mean(bestpred, na.rm = T),
predvariance = var(bestpred, na.rm = T),
predmin = min(bestpred, na.rm = T),
predmax = max(bestpred, na.rm = T),
npred = sum(!is.na(bestpred)),
npred.m1 = sum(!is.na(pred.m1)),
npred.m2 = sum(!is.na(pred.m2)),
npred.m3 = sum(!is.na(pred.m3)),
x_aod_ITM = x_aod_ITM[1], y_aod_ITM = y_aod_ITM[1]),by=aodid], "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/LTPM3.AQ.2003.csv", row.names = F)
keep(res, sure=TRUE)
c()
LIBS
library(lme4);library(reshape);library(foreign) ;library(ggplot2);library(plyr);library(data.table);library(reshape2);library(Hmisc);library(mgcv);library(gdata);library(car);library(dplyr);library(ggmap);library(broom);library(splines)
#sourcing
source("/media/NAS/Uni/org/files/Uni/Projects/code/$Rsnips/CV_splits.r")
source("/media/NAS/Uni/org/files/Uni/Projects/code/$Rsnips/rmspe.r")
#if needed load res table
res<- readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/resALL.AQ.rds")
### import data
m1.2004 <-readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod1.AQ.2004.rds")
################# clean BAD STN PM10 and check if improved model?
raWDaf <- ddply(m1.2004, c( "stn"),
function(x) {
mod1 <- lm(PM10 ~ aod, data=x)
data.frame(R2 = round(summary(mod1)$r.squared, 5),
nsamps = length(summary(mod1)$resid))
})
raWDaf
raWDaf<-as.data.table(raWDaf)
bad<- raWDaf[R2< 0.01]
bad[,badid := paste(stn,sep="-")]
#################BAD STN
m1.2004[,badid := paste(stn,sep="-")]
####Take out bad stations
m1.2004 <- m1.2004[!(m1.2004$badid %in% bad$badid), ]
#scale vars
m1.2004[,elev.s:= scale(elev)]
m1.2004[,tden.s:= scale(tden)]
m1.2004[,pden.s:= scale(pden)]
m1.2004[,dist2A1.s:= scale(dist2A1)]
m1.2004[,dist2water.s:= scale(dist2water)]
m1.2004[,dist2rail.s:= scale(dist2rail)]
m1.2004[,Dist2road.s:= scale(Dist2road)]
m1.2004[,ndvi.s:= scale(ndvi)]
m1.2004[,MeanPbl.s:= scale(MeanPbl)]
m1.2004[,p_ind.s:= scale(p_ind)]
m1.2004[,p_for.s:= scale(p_for)]
m1.2004[,p_farm.s:= scale(p_farm)]
m1.2004[,p_dos.s:= scale(p_dos)]
m1.2004[,p_dev.s:= scale(p_dev)]
m1.2004[,p_os.s:= scale(p_os)]
m1.2004[,tempa.s:= scale(tempa)]
m1.2004[,WDa.s:= scale(WDa)]
m1.2004[,WSa.s:= scale(WSa)]
m1.2004[,RHa.s:= scale(RHa)]
m1.2004[,Raina.s:= scale(Raina)]
m1.2004[,NO2a.s:= scale(NO2a)]
m1.formula <- as.formula(PM10~ aod
+tempa.s+WDa.s+WSa.s+Dust+MeanPbl.s #temporal
+elev.s+tden.s+pden.s+Dist2road.s+ndvi.s #spatial
+p_os.s #+p_dev.s+p_dos.s+p_farm.s+p_for.s+p_ind.s #land use
#+aod*Dust #interactions
+(1+aod|day/reg_num)) #+(1|stn) !!! stn screws up mod3
#full fit
m1.fit.2004 <- lmer(m1.formula,data=m1.2004,weights=normwt)
m1.2004$pred.m1 <- predict(m1.fit.2004)
res[res$year=="2004", 'm1.R2'] <- print(summary(lm(PM10~pred.m1,data=m1.2004))$r.squared)
#RMSPE
res[res$year=="2004", 'm1.PE'] <- print(rmse(residuals(m1.fit.2004)))
#spatial
###to check
spatial2004<-m1.2004 %>%
group_by(stn) %>%
summarise(barpm = mean(PM10, na.rm=TRUE), barpred = mean(pred.m1, na.rm=TRUE))
m1.fit.2004.spat<- lm(barpm ~ barpred, data=spatial2004)
res[res$year=="2004", 'm1.R2.s'] <- print(summary(lm(barpm ~ barpred, data=spatial2004))$r.squared)
res[res$year=="2004", 'm1.PE.s'] <- print(rmse(residuals(m1.fit.2004.spat)))
#temporal
tempo2004<-left_join(m1.2004,spatial2004)
tempo2004$delpm <-tempo2004$PM10-tempo2004$barpm
tempo2004$delpred <-tempo2004$pred.m1-tempo2004$barpred
mod_temporal <- lm(delpm ~ delpred, data=tempo2004)
res[res$year=="2004", 'm1.R2.t'] <- print(summary(lm(delpm ~ delpred, data=tempo2004))$r.squared)
saveRDS(m1.2004,"/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod1.AQ.2004.pred.rds")
#---------------->>>> CV
#s1
splits_s1 <- splitdf(m1.2004)
test_s1 <- splits_s1$testset
train_s1 <- splits_s1$trainset
out_train_s1 <- lmer(m1.formula,data = train_s1,weights=normwt)
test_s1$pred.m1.cv <- predict(object=out_train_s1 ,newdata=test_s1,allow.new.levels=TRUE,re.form=NULL )
test_s1$iter<-"s1"
#s2
splits_s2 <- splitdf(m1.2004)
test_s2 <- splits_s2$testset
train_s2 <- splits_s2$trainset
out_train_s2 <- lmer(m1.formula,data = train_s2,weights=normwt)
test_s2$pred.m1.cv <- predict(object=out_train_s2 ,newdata=test_s2,allow.new.levels=TRUE,re.form=NULL )
test_s2$iter<-"s2"
#s3
splits_s3 <- splitdf(m1.2004)
test_s3 <- splits_s3$testset
train_s3 <- splits_s3$trainset
out_train_s3 <- lmer(m1.formula,data = train_s3,weights=normwt)
test_s3$pred.m1.cv <- predict(object=out_train_s3 ,newdata=test_s3,allow.new.levels=TRUE,re.form=NULL )
test_s3$iter<-"s3"
#s4
splits_s4 <- splitdf(m1.2004)
test_s4 <- splits_s4$testset
train_s4 <- splits_s4$trainset
out_train_s4 <- lmer(m1.formula,data = train_s4,weights=normwt)
test_s4$pred.m1.cv <- predict(object=out_train_s4 ,newdata=test_s4,allow.new.levels=TRUE,re.form=NULL )
test_s4$iter<-"s4"
#s5
splits_s5 <- splitdf(m1.2004)
test_s5 <- splits_s5$testset
train_s5 <- splits_s5$trainset
out_train_s5 <- lmer(m1.formula,data = train_s5,weights=normwt)
test_s5$pred.m1.cv <- predict(object=out_train_s5 ,newdata=test_s5,allow.new.levels=TRUE,re.form=NULL )
test_s5$iter<-"s5"
#s6
splits_s6 <- splitdf(m1.2004)
test_s6 <- splits_s6$testset
train_s6 <- splits_s6$trainset
out_train_s6 <- lmer(m1.formula,data = train_s6,weights=normwt)
test_s6$pred.m1.cv <- predict(object=out_train_s6 ,newdata=test_s6,allow.new.levels=TRUE,re.form=NULL )
test_s6$iter<-"s6"
#s7
splits_s7 <- splitdf(m1.2004)
test_s7 <- splits_s7$testset
train_s7 <- splits_s7$trainset
out_train_s7 <- lmer(m1.formula,data = train_s7,weights=normwt)
test_s7$pred.m1.cv <- predict(object=out_train_s7 ,newdata=test_s7,allow.new.levels=TRUE,re.form=NULL )
test_s7$iter<-"s7"
#s8
splits_s8 <- splitdf(m1.2004)
test_s8 <- splits_s8$testset
train_s8 <- splits_s8$trainset
out_train_s8 <- lmer(m1.formula,data = train_s8,weights=normwt)
test_s8$pred.m1.cv <- predict(object=out_train_s8 ,newdata=test_s8,allow.new.levels=TRUE,re.form=NULL )
test_s8$iter<-"s8"
#s9
splits_s9 <- splitdf(m1.2004)
test_s9 <- splits_s9$testset
train_s9 <- splits_s9$trainset
out_train_s9 <- lmer(m1.formula,data = train_s9,weights=normwt)
test_s9$pred.m1.cv <- predict(object=out_train_s9 ,newdata=test_s9,allow.new.levels=TRUE,re.form=NULL )
test_s9$iter<-"s9"
#s10
splits_s10 <- splitdf(m1.2004)
test_s10 <- splits_s10$testset
train_s10 <- splits_s10$trainset
out_train_s10 <- lmer(m1.formula,data = train_s10,weights=normwt)
test_s10$pred.m1.cv <- predict(object=out_train_s10 ,newdata=test_s10,allow.new.levels=TRUE,re.form=NULL )
test_s10$iter<-"s10"
#BIND 1 dataset
m1.2004.cv<- data.table(rbind(test_s1,test_s2,test_s3,test_s4,test_s5,test_s6,test_s7,test_s8,test_s9, test_s10))
# cleanup (remove from WS) objects from CV
rm(list = ls(pattern = "train_|test_"))
#table updates
m1.fit.2004.cv<-lm(PM10~pred.m1.cv,data=m1.2004.cv)
res[res$year=="2004", 'm1cv.R2'] <- print(summary(lm(PM10~pred.m1.cv,data=m1.2004.cv))$r.squared)
res[res$year=="2004", 'm1cv.I'] <-print(summary(lm(PM10~pred.m1.cv,data=m1.2004.cv))$coef[1,1])
res[res$year=="2004", 'm1cv.I.se'] <-print(summary(lm(PM10~pred.m1.cv,data=m1.2004.cv))$coef[1,2])
res[res$year=="2004", 'm1cv.S'] <-print(summary(lm(PM10~pred.m1.cv,data=m1.2004.cv))$coef[2,1])
res[res$year=="2004", 'm1cv.S.se'] <-print(summary(lm(PM10~pred.m1.cv,data=m1.2004.cv))$coef[2,2])
#RMSPE
res[res$year=="2004", 'm1cv.PE'] <- print(rmse(residuals(m1.fit.2004.cv)))
#spatial
spatial2004.cv<-m1.2004.cv %>%
group_by(stn) %>%
summarise(barpm = mean(PM10, na.rm=TRUE), barpred = mean(pred.m1, na.rm=TRUE))
m1.fit.2004.cv.s <- lm(barpm ~ barpred, data=spatial2004.cv)
res[res$year=="2004", 'm1cv.R2.s'] <- print(summary(lm(barpm ~ barpred, data=spatial2004.cv))$r.squared)
res[res$year=="2004", 'm1cv.PE.s'] <- print(rmse(residuals(m1.fit.2004.cv.s)))
#temporal
tempo2004.cv<-left_join(m1.2004.cv,spatial2004.cv)
tempo2004.cv$delpm <-tempo2004.cv$PM10-tempo2004.cv$barpm
tempo2004.cv$delpred <-tempo2004.cv$pred.m1.cv-tempo2004.cv$barpred
mod_temporal.cv <- lm(delpm ~ delpred, data=tempo2004.cv)
res[res$year=="2004", 'm1cv.R2.t'] <- print(summary(lm(delpm ~ delpred, data=tempo2004.cv))$r.squared)
#-------->>> loc stage
luf<-fread("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN004_LU_full_dataset/local.csv")
setnames(luf,"tden","loc.tden")
setnames(luf,"elev50","loc.elev")
#add 50m LU to CV data
setkey(m1.2004.cv,stn)
setkey(luf,stn)
m1.2004.cv.loc <- merge(m1.2004.cv, luf, all.x = T)
#m1.2004.cv.loc<-na.omit(m1.2004.cv.loc)
#create residual mp3 variable
m1.2004.cv.loc$res.m1<-m1.2004.cv.loc$PM10-m1.2004.cv.loc$pred.m1.cv
#The GAM model
gam.out<-gam(res.m1~s(loc.tden)+s(tden,MeanPbl)+s(loc.tden,WSa)+s(loc_p_os,fx=FALSE,k=4,bs='cr')+s(loc.elev,fx=FALSE,k=4,bs='cr')+s(dA1,fx=FALSE,k=4,bs='cr')+s(dsea,fx=FALSE,k=4,bs='cr'),data=m1.2004.cv.loc)
#plot(bp.model.ps)
#summary(bp.model.ps)
## reg
m1.2004.cv.loc$pred.m1.loc <-predict(gam.out)
m1.2004.cv.loc$pred.m1.both <- m1.2004.cv.loc$pred.m1.cv + m1.2004.cv.loc$pred.m1.loc
res[res$year=="2004", 'm1cv.loc.R2'] <- print(summary(lm(PM10~pred.m1.both,data=m1.2004.cv.loc))$r.squared)
res[res$year=="2004", 'm1cv.loc.I'] <-print(summary(lm(PM10~pred.m1.both,data=m1.2004.cv.loc))$coef[1,1])
res[res$year=="2004", 'm1cv.loc.I.se'] <-print(summary(lm(PM10~pred.m1.both,data=m1.2004.cv.loc))$coef[1,2])
res[res$year=="2004", 'm1cv.loc.S'] <-print(summary(lm(PM10~pred.m1.both,data=m1.2004.cv.loc))$coef[2,1])
res[res$year=="2004", 'm1cv.loc.S.se'] <-print(summary(lm(PM10~pred.m1.both,data=m1.2004.cv.loc))$coef[2,2])
#RMSPE
res[res$year=="2004", 'm1cv.loc.PE'] <- print(rmse(residuals(m1.fit.2004.cv)))
#spatial
spatial2004.cv.loc<-m1.2004.cv.loc %>%
group_by(stn) %>%
summarise(barpm = mean(PM10, na.rm=TRUE), barpred = mean(pred.m1, na.rm=TRUE))
m1.fit.2004.cv.loc.s <- lm(barpm ~ barpred, data=spatial2004.cv.loc)
res[res$year=="2004", 'm1cv.loc.R2.s'] <- print(summary(lm(barpm ~ barpred, data=spatial2004.cv.loc))$r.squared)
res[res$year=="2004", 'm1cv.loc.PE.s'] <- print(rmse(residuals(m1.fit.2004.cv.loc.s)))
#temporal
tempo2004.loc.cv<-left_join(m1.2004.cv.loc,spatial2004.cv.loc)
tempo2004.loc.cv$delpm <-tempo2004.loc.cv$PM10-tempo2004.loc.cv$barpm
tempo2004.loc.cv$delpred <-tempo2004.loc.cv$pred.m1.both-tempo2004.loc.cv$barpred
mod_temporal.loc.cv <- lm(delpm ~ delpred, data=tempo2004.loc.cv)
res[res$year=="2004", 'm1cv.loc.R2.t'] <- print(summary(lm(delpm ~ delpred, data=tempo2004.loc.cv))$r.squared)
#############save midpoint
saveRDS(res, "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/res.AQ.2004.rds")
saveRDS(res, "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/resALL.AQ.rds")
saveRDS(m1.2004.cv.loc,"/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod1.AQ.2004.predCV.rds")
###############
#MOD2
###############
m2.2004<-readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod2.AQ.2004.rds")
m2.2004[,elev.s:= scale(elev)]
m2.2004[,tden.s:= scale(tden)]
m2.2004[,pden.s:= scale(pden)]
m2.2004[,dist2A1.s:= scale(dist2A1)]
m2.2004[,dist2water.s:= scale(dist2water)]
m2.2004[,dist2rail.s:= scale(dist2rail)]
m2.2004[,Dist2road.s:= scale(Dist2road)]
m2.2004[,ndvi.s:= scale(ndvi)]
m2.2004[,MeanPbl.s:= scale(MeanPbl)]
m2.2004[,p_ind.s:= scale(p_ind)]
m2.2004[,p_for.s:= scale(p_for)]
m2.2004[,p_farm.s:= scale(p_farm)]
m2.2004[,p_dos.s:= scale(p_dos)]
m2.2004[,p_dev.s:= scale(p_dev)]
m2.2004[,p_os.s:= scale(p_os)]
m2.2004[,tempa.s:= scale(tempa)]
m2.2004[,WDa.s:= scale(WDa)]
m2.2004[,WSa.s:= scale(WSa)]
m2.2004[,RHa.s:= scale(RHa)]
m2.2004[,Raina.s:= scale(Raina)]
m2.2004[,NO2a.s:= scale(NO2a)]
#generate predictions
m2.2004[, pred.m2 := predict(object=m1.fit.2004,newdata=m2.2004,allow.new.levels=TRUE,re.form=NULL)]
describe(m2.2004$pred.m2)
#delete implossible valuesOA[24~
m2.2004 <- m2.2004[pred.m2 > 0.00000000000001 , ]
m2.2004 <- m2.2004[pred.m2 < 1500 , ]
saveRDS(m2.2004,"/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod2.AQ.2004.pred2.rds")
#-------------->prepare for mod3
m2.2004[, bimon := (m + 1) %/% 2]
setkey(m2.2004,day, aodid)
m2.2004<-m2.2004[!is.na(meanPM10)]
#run the lmer part regressing stage 2 pred Vs mean pm
#in israel check per month, also check 30km band and other methods for meanpm
m2.smooth = lme(pred.m2 ~ meanPM10,random = list(aodid= ~1 + meanPM10),control=lmeControl(opt = "optim"), data= m2.2004 )
#xm2.smooth = lmer(pred.m2 ~ meanPM10+(1+ meanPM10|aodid), data= m2.2004 )
#correlate to see everything from mod2 and the mpm works
m2.2004[, pred.t31 := predict(m2.smooth)]
m2.2004[, resid := residuals(m2.smooth)]
res[res$year=="2004", 'm3.t31'] <- print(summary(lm(pred.m2~pred.t31,data=m2.2004))$r.squared)
#split the files to the separate bi monthly datsets
T2004_bimon1 <- subset(m2.2004 ,m2.2004$bimon == "1")
T2004_bimon2 <- subset(m2.2004 ,m2.2004$bimon == "2")
T2004_bimon3 <- subset(m2.2004 ,m2.2004$bimon == "3")
T2004_bimon4 <- subset(m2.2004 ,m2.2004$bimon == "4")
T2004_bimon5 <- subset(m2.2004 ,m2.2004$bimon == "5")
T2004_bimon6 <- subset(m2.2004 ,m2.2004$bimon == "6")
#run the separate splines (smooth) for x and y for each bimon
#whats the default band (distance) that the spline goes out and uses
fit2_1 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2004_bimon1 )
fit2_2 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2004_bimon2 )
fit2_3 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2004_bimon3 )
fit2_4 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2004_bimon4 )
fit2_5 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2004_bimon5 )
fit2_6 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2004_bimon6 )
#get the predicted-fitted
Xpred_1 <- (T2004_bimon1$pred.t31 - fit2_1$fitted)
Xpred_2 <- (T2004_bimon2$pred.t31 - fit2_2$fitted)
Xpred_3 <- (T2004_bimon3$pred.t31 - fit2_3$fitted)
Xpred_4 <- (T2004_bimon4$pred.t31 - fit2_4$fitted)
Xpred_5 <- (T2004_bimon5$pred.t31 - fit2_5$fitted)
Xpred_6 <- (T2004_bimon6$pred.t31 - fit2_6$fitted)
#remerge to 1 file
m2.2004$pred.t32 <- c( Xpred_1,Xpred_2, Xpred_3, Xpred_4, Xpred_5, Xpred_6)
#this is important so that its sorted as in the first gamm
setkey(m2.2004,day, aodid)
#rerun the lme on the predictions including the spatial spline (smooth)
Final_pred_2004 <- lme(pred.t32 ~ meanPM10 ,random = list(aodid= ~1 + meanPM10 ),control=lmeControl(opt = "optim"),data= m2.2004 )
m2.2004[, pred.t33 := predict(Final_pred_2004)]
#check correlations
res[res$year=="2004", 'm3.t33'] <- print(summary(lm(pred.m2 ~ pred.t33,data=m2.2004))$r.squared)
#------------------------>>>
#import mod3
data.m3 <- readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod3.AQ.2004.rds")
#for PM10
data.m3 <- data.m3[,c(1,2,5,29:32,52,53),with=FALSE]
data.m3[, bimon := (m + 1) %/% 2]
setkey(data.m3,day, aodid)
data.m3<-data.m3[!is.na(meanPM10)]
#generate m.3 initial pred
data.m3$pred.m3.mix <- predict(Final_pred_2004,data.m3)
#create unique grid
ugrid <-data.m3 %>%
group_by(aodid) %>%
summarise(lat_aod = mean(lat_aod, na.rm=TRUE), long_aod = mean(long_aod, na.rm=TRUE),x_aod_ITM = mean(x_aod_ITM, na.rm=TRUE), y_aod_ITM = mean(y_aod_ITM, na.rm=TRUE))
#### PREDICT Gam part
#split back into bimons to include the gam prediction in final prediction
data.m3_bimon1 <- data.m3[bimon == 1, ]
data.m3_bimon2 <- data.m3[bimon == 2, ]
data.m3_bimon3 <- data.m3[bimon == 3, ]
data.m3_bimon4 <- data.m3[bimon == 4, ]
data.m3_bimon5 <- data.m3[bimon == 5, ]
data.m3_bimon6 <- data.m3[bimon == 6, ]
#addin unique grid to each bimon
uniq_gid_bimon1 <- ugrid
uniq_gid_bimon2 <- ugrid
uniq_gid_bimon3 <- ugrid
uniq_gid_bimon4 <- ugrid
uniq_gid_bimon5 <- ugrid
uniq_gid_bimon6 <- ugrid
#get predictions for Bimon residuals
uniq_gid_bimon1$gpred <- predict.gam(fit2_1,uniq_gid_bimon1)
uniq_gid_bimon2$gpred <- predict.gam(fit2_2,uniq_gid_bimon2)
uniq_gid_bimon3$gpred <- predict.gam(fit2_3,uniq_gid_bimon3)
uniq_gid_bimon4$gpred <- predict.gam(fit2_4,uniq_gid_bimon4)
uniq_gid_bimon5$gpred <- predict.gam(fit2_5,uniq_gid_bimon5)
uniq_gid_bimon6$gpred <- predict.gam(fit2_6,uniq_gid_bimon6)
#merge things back togheter
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> merges
setkey(uniq_gid_bimon1,aodid)
setkey(data.m3_bimon1,aodid)
data.m3_bimon1 <- merge(data.m3_bimon1, uniq_gid_bimon1[,list(aodid,gpred)], all.x = T)
setkey(uniq_gid_bimon2,aodid)
setkey(data.m3_bimon2,aodid)
data.m3_bimon2 <- merge(data.m3_bimon2, uniq_gid_bimon2[,list(aodid,gpred)], all.x = T)
setkey(uniq_gid_bimon3,aodid)
setkey(data.m3_bimon3,aodid)
data.m3_bimon3 <- merge(data.m3_bimon3, uniq_gid_bimon3[,list(aodid,gpred)], all.x = T)
setkey(uniq_gid_bimon4,aodid)
setkey(data.m3_bimon4,aodid)
data.m3_bimon4 <- merge(data.m3_bimon4, uniq_gid_bimon4[,list(aodid,gpred)], all.x = T)
setkey(uniq_gid_bimon5,aodid)
setkey(data.m3_bimon5,aodid)
data.m3_bimon5 <- merge(data.m3_bimon5, uniq_gid_bimon5[,list(aodid,gpred)], all.x = T)
setkey(uniq_gid_bimon6,aodid)
setkey(data.m3_bimon6,aodid)
data.m3_bimon6 <- merge(data.m3_bimon6, uniq_gid_bimon6[,list(aodid,gpred)], all.x = T)
#reattach all parts
mod3 <- rbind(data.m3_bimon1,data.m3_bimon2,data.m3_bimon3,data.m3_bimon4,data.m3_bimon5,data.m3_bimon6)
# create pred.m3
mod3$pred.m3 <-mod3$pred.m3.mix+mod3$gpred
#hist(mod3$pred.m3)
#describe(mod3$pred.m3)
#recode negative into zero
mod3 <- mod3[pred.m3 >= 0]
saveRDS(mod3,"/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod3.AQ.2004.pred.rds")
#clean
keep(mod3,res,rmse, sure=TRUE)
gc()
#########################
#prepare for m3.R2
#########################
#load mod1
mod1<- readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod1.AQ.2004.pred.rds")
mod1[,aodid:= paste(mod1$long_aod,mod1$lat_aod,sep="-")]
mod1<-mod1[,c("aodid","day","PM10","pred.m1","stn"),with=FALSE]
#R2.m3
setkey(mod3,day,aodid)
setkey(mod1,day,aodid)
m1.2004 <- merge(mod1,mod3[, list(day,aodid,pred.m3)], all.x = T)
m3.fit.2004<- summary(lm(PM10~pred.m3,data=m1.2004))
res[res$year=="2004", 'm3.R2'] <- print(summary(lm(PM10~pred.m3,data=m1.2004))$r.squared)
#RMSPE
res[res$year=="2004", 'm3.PE'] <- print(rmse(residuals(m3.fit.2004)))
#spatial
###to check
spatial2004<-m1.2004 %>%
group_by(stn) %>%
summarise(barpm = mean(PM10, na.rm=TRUE), barpred = mean(pred.m3, na.rm=TRUE))
m1.fit.2004.spat<- lm(barpm ~ barpred, data=spatial2004)
res[res$year=="2004", 'm3.R2.s'] <- print(summary(lm(barpm ~ barpred, data=spatial2004))$r.squared)
res[res$year=="2004", 'm3.PE.s'] <- print(rmse(residuals(m1.fit.2004.spat)))
#temporal
tempo2004<-left_join(m1.2004,spatial2004)
tempo2004$delpm <-tempo2004$PM10-tempo2004$barpm
tempo2004$delpred <-tempo2004$pred.m3-tempo2004$barpred
mod_temporal <- lm(delpm ~ delpred, data=tempo2004)
res[res$year=="2004", 'm3.R2.t'] <- print(summary(lm(delpm ~ delpred, data=tempo2004))$r.squared)
#############save midpoint
saveRDS(res, "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/res.AQ.2004.rds")
saveRDS(res, "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/resALL.AQ.rds")
#########################
#import mod2
mod2<- readRDS( "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod2.AQ.2004.pred2.rds")
mod2<-mod2[,c("aodid","day","pred.m2"),with=FALSE]
#----------------> store the best available
mod3best <- mod3[, list(aodid, x_aod_ITM, y_aod_ITM, day, pred.m3)]
setkey(mod3best, day, aodid)
setkey(mod2, day, aodid)
mod3best <- merge(mod3best, mod2[,list(aodid, day, pred.m2)], all.x = T)
#reload mod1
mod1<- readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod1.AQ.2004.pred.rds")
mod1$aodid<-paste(mod1$long_aod,mod1$lat_aod,sep="-")
mod1<-mod1[,c("aodid","day","PM10","pred.m1"),with=FALSE]
setkey(mod1,day,aodid)
mod3best <- merge(mod3best, mod1, all.x = T)
mod3best[,bestpred := pred.m3]
mod3best[!is.na(pred.m2),bestpred := pred.m2]
mod3best[!is.na(pred.m1),bestpred := pred.m1]
#save
saveRDS(mod3best,"/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod3.AQ.2004.FINAL.rds")
#save for GIS
write.csv(mod3best[, list(LTPM = mean(bestpred, na.rm = T),
predvariance = var(bestpred, na.rm = T),
predmin = min(bestpred, na.rm = T),
predmax = max(bestpred, na.rm = T),
npred = sum(!is.na(bestpred)),
npred.m1 = sum(!is.na(pred.m1)),
npred.m2 = sum(!is.na(pred.m2)),
npred.m3 = sum(!is.na(pred.m3)),
x_aod_ITM = x_aod_ITM[1], y_aod_ITM = y_aod_ITM[1]),by=aodid], "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/LTPM3.AQ.2004.csv", row.names = F)
keep(res, sure=TRUE)
gc()
###############
#LIBS
###############
library(lme4);library(reshape);library(foreign) ;library(ggplot2);library(plyr);library(data.table);library(reshape2);library(Hmisc);library(mgcv);library(gdata);library(car);library(dplyr);library(ggmap);library(broom);library(splines)
#sourcing
source("/media/NAS/Uni/org/files/Uni/Projects/code/$Rsnips/CV_splits.r")
source("/media/NAS/Uni/org/files/Uni/Projects/code/$Rsnips/rmspe.r")
#if needed load res table
res<- readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/resALL.AQ.rds")
### import data
m1.2005 <-readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod1.AQ.2005.rds")
################# clean BAD STN PM10 and check if improved model?
raWDaf <- ddply(m1.2005, c( "stn"),
function(x) {
mod1 <- lm(PM10 ~ aod, data=x)
data.frame(R2 = round(summary(mod1)$r.squared, 5),
nsamps = length(summary(mod1)$resid))
})
raWDaf
raWDaf<-as.data.table(raWDaf)
bad<- raWDaf[R2< 0.01]
bad[,badid := paste(stn,sep="-")]
#################BAD STN
m1.2005[,badid := paste(stn,sep="-")]
####Take out bad stations
m1.2005 <- m1.2005[!(m1.2005$badid %in% bad$badid), ]
#scale vars
m1.2005[,elev.s:= scale(elev)]
m1.2005[,tden.s:= scale(tden)]
m1.2005[,pden.s:= scale(pden)]
m1.2005[,dist2A1.s:= scale(dist2A1)]
m1.2005[,dist2water.s:= scale(dist2water)]
m1.2005[,dist2rail.s:= scale(dist2rail)]
m1.2005[,Dist2road.s:= scale(Dist2road)]
m1.2005[,ndvi.s:= scale(ndvi)]
m1.2005[,MeanPbl.s:= scale(MeanPbl)]
m1.2005[,p_ind.s:= scale(p_ind)]
m1.2005[,p_for.s:= scale(p_for)]
m1.2005[,p_farm.s:= scale(p_farm)]
m1.2005[,p_dos.s:= scale(p_dos)]
m1.2005[,p_dev.s:= scale(p_dev)]
m1.2005[,p_os.s:= scale(p_os)]
m1.2005[,tempa.s:= scale(tempa)]
m1.2005[,WDa.s:= scale(WDa)]
m1.2005[,WSa.s:= scale(WSa)]
m1.2005[,RHa.s:= scale(RHa)]
m1.2005[,Raina.s:= scale(Raina)]
m1.2005[,NO2a.s:= scale(NO2a)]
m1.formula <- as.formula(PM10~ aod
+tempa.s+WDa.s+WSa.s+Dust+MeanPbl.s #temporal
+elev.s+tden.s+pden.s+Dist2road.s+ndvi.s #spatial
+p_os.s #+p_dev.s+p_dos.s+p_farm.s+p_for.s+p_ind.s #land use
#+aod*Dust #interactions
+(1+aod|day/reg_num)) #+(1|stn) !!! stn screws up mod3
#full fit
m1.fit.2005 <- lmer(m1.formula,data=m1.2005,weights=normwt)
m1.2005$pred.m1 <- predict(m1.fit.2005)
res[res$year=="2005", 'm1.R2'] <- print(summary(lm(PM10~pred.m1,data=m1.2005))$r.squared)
#RMSPE
res[res$year=="2005", 'm1.PE'] <- print(rmse(residuals(m1.fit.2005)))
#spatial
###to check
spatial2005<-m1.2005 %>%
group_by(stn) %>%
summarise(barpm = mean(PM10, na.rm=TRUE), barpred = mean(pred.m1, na.rm=TRUE))
m1.fit.2005.spat<- lm(barpm ~ barpred, data=spatial2005)
res[res$year=="2005", 'm1.R2.s'] <- print(summary(lm(barpm ~ barpred, data=spatial2005))$r.squared)
res[res$year=="2005", 'm1.PE.s'] <- print(rmse(residuals(m1.fit.2005.spat)))
#temporal
tempo2005<-left_join(m1.2005,spatial2005)
tempo2005$delpm <-tempo2005$PM10-tempo2005$barpm
tempo2005$delpred <-tempo2005$pred.m1-tempo2005$barpred
mod_temporal <- lm(delpm ~ delpred, data=tempo2005)
res[res$year=="2005", 'm1.R2.t'] <- print(summary(lm(delpm ~ delpred, data=tempo2005))$r.squared)
saveRDS(m1.2005,"/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod1.AQ.2005.pred.rds")
#---------------->>>> CV
#s1
splits_s1 <- splitdf(m1.2005)
test_s1 <- splits_s1$testset
train_s1 <- splits_s1$trainset
out_train_s1 <- lmer(m1.formula,data = train_s1,weights=normwt)
test_s1$pred.m1.cv <- predict(object=out_train_s1 ,newdata=test_s1,allow.new.levels=TRUE,re.form=NULL )
test_s1$iter<-"s1"
#s2
splits_s2 <- splitdf(m1.2005)
test_s2 <- splits_s2$testset
train_s2 <- splits_s2$trainset
out_train_s2 <- lmer(m1.formula,data = train_s2,weights=normwt)
test_s2$pred.m1.cv <- predict(object=out_train_s2 ,newdata=test_s2,allow.new.levels=TRUE,re.form=NULL )
test_s2$iter<-"s2"
#s3
splits_s3 <- splitdf(m1.2005)
test_s3 <- splits_s3$testset
train_s3 <- splits_s3$trainset
out_train_s3 <- lmer(m1.formula,data = train_s3,weights=normwt)
test_s3$pred.m1.cv <- predict(object=out_train_s3 ,newdata=test_s3,allow.new.levels=TRUE,re.form=NULL )
test_s3$iter<-"s3"
#s4
splits_s4 <- splitdf(m1.2005)
test_s4 <- splits_s4$testset
train_s4 <- splits_s4$trainset
out_train_s4 <- lmer(m1.formula,data = train_s4,weights=normwt)
test_s4$pred.m1.cv <- predict(object=out_train_s4 ,newdata=test_s4,allow.new.levels=TRUE,re.form=NULL )
test_s4$iter<-"s4"
#s5
splits_s5 <- splitdf(m1.2005)
test_s5 <- splits_s5$testset
train_s5 <- splits_s5$trainset
out_train_s5 <- lmer(m1.formula,data = train_s5,weights=normwt)
test_s5$pred.m1.cv <- predict(object=out_train_s5 ,newdata=test_s5,allow.new.levels=TRUE,re.form=NULL )
test_s5$iter<-"s5"
#s6
splits_s6 <- splitdf(m1.2005)
test_s6 <- splits_s6$testset
train_s6 <- splits_s6$trainset
out_train_s6 <- lmer(m1.formula,data = train_s6,weights=normwt)
test_s6$pred.m1.cv <- predict(object=out_train_s6 ,newdata=test_s6,allow.new.levels=TRUE,re.form=NULL )
test_s6$iter<-"s6"
#s7
splits_s7 <- splitdf(m1.2005)
test_s7 <- splits_s7$testset
train_s7 <- splits_s7$trainset
out_train_s7 <- lmer(m1.formula,data = train_s7,weights=normwt)
test_s7$pred.m1.cv <- predict(object=out_train_s7 ,newdata=test_s7,allow.new.levels=TRUE,re.form=NULL )
test_s7$iter<-"s7"
#s8
splits_s8 <- splitdf(m1.2005)
test_s8 <- splits_s8$testset
train_s8 <- splits_s8$trainset
out_train_s8 <- lmer(m1.formula,data = train_s8,weights=normwt)
test_s8$pred.m1.cv <- predict(object=out_train_s8 ,newdata=test_s8,allow.new.levels=TRUE,re.form=NULL )
test_s8$iter<-"s8"
#s9
splits_s9 <- splitdf(m1.2005)
test_s9 <- splits_s9$testset
train_s9 <- splits_s9$trainset
out_train_s9 <- lmer(m1.formula,data = train_s9,weights=normwt)
test_s9$pred.m1.cv <- predict(object=out_train_s9 ,newdata=test_s9,allow.new.levels=TRUE,re.form=NULL )
test_s9$iter<-"s9"
#s10
splits_s10 <- splitdf(m1.2005)
test_s10 <- splits_s10$testset
train_s10 <- splits_s10$trainset
out_train_s10 <- lmer(m1.formula,data = train_s10,weights=normwt)
test_s10$pred.m1.cv <- predict(object=out_train_s10 ,newdata=test_s10,allow.new.levels=TRUE,re.form=NULL )
test_s10$iter<-"s10"
#BIND 1 dataset
m1.2005.cv<- data.table(rbind(test_s1,test_s2,test_s3,test_s4,test_s5,test_s6,test_s7,test_s8,test_s9, test_s10))
# cleanup (remove from WS) objects from CV
rm(list = ls(pattern = "train_|test_"))
#table updates
m1.fit.2005.cv<-lm(PM10~pred.m1.cv,data=m1.2005.cv)
res[res$year=="2005", 'm1cv.R2'] <- print(summary(lm(PM10~pred.m1.cv,data=m1.2005.cv))$r.squared)
res[res$year=="2005", 'm1cv.I'] <-print(summary(lm(PM10~pred.m1.cv,data=m1.2005.cv))$coef[1,1])
res[res$year=="2005", 'm1cv.I.se'] <-print(summary(lm(PM10~pred.m1.cv,data=m1.2005.cv))$coef[1,2])
res[res$year=="2005", 'm1cv.S'] <-print(summary(lm(PM10~pred.m1.cv,data=m1.2005.cv))$coef[2,1])
res[res$year=="2005", 'm1cv.S.se'] <-print(summary(lm(PM10~pred.m1.cv,data=m1.2005.cv))$coef[2,2])
#RMSPE
res[res$year=="2005", 'm1cv.PE'] <- print(rmse(residuals(m1.fit.2005.cv)))
#spatial
spatial2005.cv<-m1.2005.cv %>%
group_by(stn) %>%
summarise(barpm = mean(PM10, na.rm=TRUE), barpred = mean(pred.m1, na.rm=TRUE))
m1.fit.2005.cv.s <- lm(barpm ~ barpred, data=spatial2005.cv)
res[res$year=="2005", 'm1cv.R2.s'] <- print(summary(lm(barpm ~ barpred, data=spatial2005.cv))$r.squared)
res[res$year=="2005", 'm1cv.PE.s'] <- print(rmse(residuals(m1.fit.2005.cv.s)))
#temporal
tempo2005.cv<-left_join(m1.2005.cv,spatial2005.cv)
tempo2005.cv$delpm <-tempo2005.cv$PM10-tempo2005.cv$barpm
tempo2005.cv$delpred <-tempo2005.cv$pred.m1.cv-tempo2005.cv$barpred
mod_temporal.cv <- lm(delpm ~ delpred, data=tempo2005.cv)
res[res$year=="2005", 'm1cv.R2.t'] <- print(summary(lm(delpm ~ delpred, data=tempo2005.cv))$r.squared)
#-------->>> loc stage
luf<-fread("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN004_LU_full_dataset/local.csv")
setnames(luf,"tden","loc.tden")
setnames(luf,"elev50","loc.elev")
#add 50m LU to CV data
setkey(m1.2005.cv,stn)
setkey(luf,stn)
m1.2005.cv.loc <- merge(m1.2005.cv, luf, all.x = T)
#m1.2005.cv.loc<-na.omit(m1.2005.cv.loc)
#create residual mp3 variable
m1.2005.cv.loc$res.m1<-m1.2005.cv.loc$PM10-m1.2005.cv.loc$pred.m1.cv
#The GAM model
gam.out<-gam(res.m1~s(loc.tden)+s(tden,MeanPbl)+s(loc.tden,WSa)+s(loc_p_os,fx=FALSE,k=4,bs='cr')+s(loc.elev,fx=FALSE,k=4,bs='cr')+s(dA1,fx=FALSE,k=4,bs='cr')+s(dsea,fx=FALSE,k=4,bs='cr'),data=m1.2005.cv.loc)
#plot(bp.model.ps)
#summary(bp.model.ps)
## reg
m1.2005.cv.loc$pred.m1.loc <-predict(gam.out)
m1.2005.cv.loc$pred.m1.both <- m1.2005.cv.loc$pred.m1.cv + m1.2005.cv.loc$pred.m1.loc
res[res$year=="2005", 'm1cv.loc.R2'] <- print(summary(lm(PM10~pred.m1.both,data=m1.2005.cv.loc))$r.squared)
res[res$year=="2005", 'm1cv.loc.I'] <-print(summary(lm(PM10~pred.m1.both,data=m1.2005.cv.loc))$coef[1,1])
res[res$year=="2005", 'm1cv.loc.I.se'] <-print(summary(lm(PM10~pred.m1.both,data=m1.2005.cv.loc))$coef[1,2])
res[res$year=="2005", 'm1cv.loc.S'] <-print(summary(lm(PM10~pred.m1.both,data=m1.2005.cv.loc))$coef[2,1])
res[res$year=="2005", 'm1cv.loc.S.se'] <-print(summary(lm(PM10~pred.m1.both,data=m1.2005.cv.loc))$coef[2,2])
#RMSPE
res[res$year=="2005", 'm1cv.loc.PE'] <- print(rmse(residuals(m1.fit.2005.cv)))
#spatial
spatial2005.cv.loc<-m1.2005.cv.loc %>%
group_by(stn) %>%
summarise(barpm = mean(PM10, na.rm=TRUE), barpred = mean(pred.m1, na.rm=TRUE))
m1.fit.2005.cv.loc.s <- lm(barpm ~ barpred, data=spatial2005.cv.loc)
res[res$year=="2005", 'm1cv.loc.R2.s'] <- print(summary(lm(barpm ~ barpred, data=spatial2005.cv.loc))$r.squared)
res[res$year=="2005", 'm1cv.loc.PE.s'] <- print(rmse(residuals(m1.fit.2005.cv.loc.s)))
#temporal
tempo2005.loc.cv<-left_join(m1.2005.cv.loc,spatial2005.cv.loc)
tempo2005.loc.cv$delpm <-tempo2005.loc.cv$PM10-tempo2005.loc.cv$barpm
tempo2005.loc.cv$delpred <-tempo2005.loc.cv$pred.m1.both-tempo2005.loc.cv$barpred
mod_temporal.loc.cv <- lm(delpm ~ delpred, data=tempo2005.loc.cv)
res[res$year=="2005", 'm1cv.loc.R2.t'] <- print(summary(lm(delpm ~ delpred, data=tempo2005.loc.cv))$r.squared)
#############save midpoint
saveRDS(res, "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/res.AQ.2005.rds")
saveRDS(res, "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/resALL.AQ.rds")
saveRDS(m1.2005.cv.loc,"/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod1.AQ.2005.predCV.rds")
###############
#MOD2
###############
m2.2005<-readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod2.AQ.2005.rds")
m2.2005[,elev.s:= scale(elev)]
m2.2005[,tden.s:= scale(tden)]
m2.2005[,pden.s:= scale(pden)]
m2.2005[,dist2A1.s:= scale(dist2A1)]
m2.2005[,dist2water.s:= scale(dist2water)]
m2.2005[,dist2rail.s:= scale(dist2rail)]
m2.2005[,Dist2road.s:= scale(Dist2road)]
m2.2005[,ndvi.s:= scale(ndvi)]
m2.2005[,MeanPbl.s:= scale(MeanPbl)]
m2.2005[,p_ind.s:= scale(p_ind)]
m2.2005[,p_for.s:= scale(p_for)]
m2.2005[,p_farm.s:= scale(p_farm)]
m2.2005[,p_dos.s:= scale(p_dos)]
m2.2005[,p_dev.s:= scale(p_dev)]
m2.2005[,p_os.s:= scale(p_os)]
m2.2005[,tempa.s:= scale(tempa)]
m2.2005[,WDa.s:= scale(WDa)]
m2.2005[,WSa.s:= scale(WSa)]
m2.2005[,RHa.s:= scale(RHa)]
m2.2005[,Raina.s:= scale(Raina)]
m2.2005[,NO2a.s:= scale(NO2a)]
#generate predictions
m2.2005[, pred.m2 := predict(object=m1.fit.2005,newdata=m2.2005,allow.new.levels=TRUE,re.form=NULL)]
describe(m2.2005$pred.m2)
#delete implossible valuesOA[24~
m2.2005 <- m2.2005[pred.m2 > 0.00000000000001 , ]
m2.2005 <- m2.2005[pred.m2 < 1500 , ]
saveRDS(m2.2005,"/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod2.AQ.2005.pred2.rds")
#-------------->prepare for mod3
m2.2005[, bimon := (m + 1) %/% 2]
setkey(m2.2005,day, aodid)
m2.2005<-m2.2005[!is.na(meanPM10)]
#run the lmer part regressing stage 2 pred Vs mean pm
#in israel check per month, also check 30km band and other methods for meanpm
m2.smooth = lme(pred.m2 ~ meanPM10,random = list(aodid= ~1 + meanPM10),control=lmeControl(opt = "optim"), data= m2.2005 )
#xm2.smooth = lmer(pred.m2 ~ meanPM10+(1+ meanPM10|aodid), data= m2.2005 )
#correlate to see everything from mod2 and the mpm works
m2.2005[, pred.t31 := predict(m2.smooth)]
m2.2005[, resid := residuals(m2.smooth)]
res[res$year=="2005", 'm3.t31'] <- print(summary(lm(pred.m2~pred.t31,data=m2.2005))$r.squared)
#split the files to the separate bi monthly datsets
T2005_bimon1 <- subset(m2.2005 ,m2.2005$bimon == "1")
T2005_bimon2 <- subset(m2.2005 ,m2.2005$bimon == "2")
T2005_bimon3 <- subset(m2.2005 ,m2.2005$bimon == "3")
T2005_bimon4 <- subset(m2.2005 ,m2.2005$bimon == "4")
T2005_bimon5 <- subset(m2.2005 ,m2.2005$bimon == "5")
T2005_bimon6 <- subset(m2.2005 ,m2.2005$bimon == "6")
#run the separate splines (smooth) for x and y for each bimon
#whats the default band (distance) that the spline goes out and uses
fit2_1 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2005_bimon1 )
fit2_2 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2005_bimon2 )
fit2_3 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2005_bimon3 )
fit2_4 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2005_bimon4 )
fit2_5 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2005_bimon5 )
fit2_6 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2005_bimon6 )
#get the predicted-fitted
Xpred_1 <- (T2005_bimon1$pred.t31 - fit2_1$fitted)
Xpred_2 <- (T2005_bimon2$pred.t31 - fit2_2$fitted)
Xpred_3 <- (T2005_bimon3$pred.t31 - fit2_3$fitted)
Xpred_4 <- (T2005_bimon4$pred.t31 - fit2_4$fitted)
Xpred_5 <- (T2005_bimon5$pred.t31 - fit2_5$fitted)
Xpred_6 <- (T2005_bimon6$pred.t31 - fit2_6$fitted)
#remerge to 1 file
m2.2005$pred.t32 <- c( Xpred_1,Xpred_2, Xpred_3, Xpred_4, Xpred_5, Xpred_6)
#this is important so that its sorted as in the first gamm
setkey(m2.2005,day, aodid)
#rerun the lme on the predictions including the spatial spline (smooth)
Final_pred_2005 <- lme(pred.t32 ~ meanPM10 ,random = list(aodid= ~1 + meanPM10 ),control=lmeControl(opt = "optim"),data= m2.2005 )
m2.2005[, pred.t33 := predict(Final_pred_2005)]
#check correlations
res[res$year=="2005", 'm3.t33'] <- print(summary(lm(pred.m2 ~ pred.t33,data=m2.2005))$r.squared)
#------------------------>>>
#import mod3
data.m3 <- readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod3.AQ.2005.rds")
#for PM10
data.m3 <- data.m3[,c(1,2,5,29:32,52,53),with=FALSE]
data.m3[, bimon := (m + 1) %/% 2]
setkey(data.m3,day, aodid)
data.m3<-data.m3[!is.na(meanPM10)]
#generate m.3 initial pred
data.m3$pred.m3.mix <- predict(Final_pred_2005,data.m3)
#create unique grid
ugrid <-data.m3 %>%
group_by(aodid) %>%
summarise(lat_aod = mean(lat_aod, na.rm=TRUE), long_aod = mean(long_aod, na.rm=TRUE),x_aod_ITM = mean(x_aod_ITM, na.rm=TRUE), y_aod_ITM = mean(y_aod_ITM, na.rm=TRUE))
#### PREDICT Gam part
#split back into bimons to include the gam prediction in final prediction
data.m3_bimon1 <- data.m3[bimon == 1, ]
data.m3_bimon2 <- data.m3[bimon == 2, ]
data.m3_bimon3 <- data.m3[bimon == 3, ]
data.m3_bimon4 <- data.m3[bimon == 4, ]
data.m3_bimon5 <- data.m3[bimon == 5, ]
data.m3_bimon6 <- data.m3[bimon == 6, ]
#addin unique grid to each bimon
uniq_gid_bimon1 <- ugrid
uniq_gid_bimon2 <- ugrid
uniq_gid_bimon3 <- ugrid
uniq_gid_bimon4 <- ugrid
uniq_gid_bimon5 <- ugrid
uniq_gid_bimon6 <- ugrid
#get predictions for Bimon residuals
uniq_gid_bimon1$gpred <- predict.gam(fit2_1,uniq_gid_bimon1)
uniq_gid_bimon2$gpred <- predict.gam(fit2_2,uniq_gid_bimon2)
uniq_gid_bimon3$gpred <- predict.gam(fit2_3,uniq_gid_bimon3)
uniq_gid_bimon4$gpred <- predict.gam(fit2_4,uniq_gid_bimon4)
uniq_gid_bimon5$gpred <- predict.gam(fit2_5,uniq_gid_bimon5)
uniq_gid_bimon6$gpred <- predict.gam(fit2_6,uniq_gid_bimon6)
#merge things back togheter
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> merges
setkey(uniq_gid_bimon1,aodid)
setkey(data.m3_bimon1,aodid)
data.m3_bimon1 <- merge(data.m3_bimon1, uniq_gid_bimon1[,list(aodid,gpred)], all.x = T)
setkey(uniq_gid_bimon2,aodid)
setkey(data.m3_bimon2,aodid)
data.m3_bimon2 <- merge(data.m3_bimon2, uniq_gid_bimon2[,list(aodid,gpred)], all.x = T)
setkey(uniq_gid_bimon3,aodid)
setkey(data.m3_bimon3,aodid)
data.m3_bimon3 <- merge(data.m3_bimon3, uniq_gid_bimon3[,list(aodid,gpred)], all.x = T)
setkey(uniq_gid_bimon4,aodid)
setkey(data.m3_bimon4,aodid)
data.m3_bimon4 <- merge(data.m3_bimon4, uniq_gid_bimon4[,list(aodid,gpred)], all.x = T)
setkey(uniq_gid_bimon5,aodid)
setkey(data.m3_bimon5,aodid)
data.m3_bimon5 <- merge(data.m3_bimon5, uniq_gid_bimon5[,list(aodid,gpred)], all.x = T)
setkey(uniq_gid_bimon6,aodid)
setkey(data.m3_bimon6,aodid)
data.m3_bimon6 <- merge(data.m3_bimon6, uniq_gid_bimon6[,list(aodid,gpred)], all.x = T)
#reattach all parts
mod3 <- rbind(data.m3_bimon1,data.m3_bimon2,data.m3_bimon3,data.m3_bimon4,data.m3_bimon5,data.m3_bimon6)
# create pred.m3
mod3$pred.m3 <-mod3$pred.m3.mix+mod3$gpred
#hist(mod3$pred.m3)
#describe(mod3$pred.m3)
#recode negative into zero
mod3 <- mod3[pred.m3 >= 0]
saveRDS(mod3,"/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod3.AQ.2005.pred.rds")
#clean
keep(mod3,res,rmse, sure=TRUE)
gc()
#########################
#prepare for m3.R2
#########################
#load mod1
mod1<- readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod1.AQ.2005.pred.rds")
mod1[,aodid:= paste(mod1$long_aod,mod1$lat_aod,sep="-")]
mod1<-mod1[,c("aodid","day","PM10","pred.m1","stn"),with=FALSE]
#R2.m3
setkey(mod3,day,aodid)
setkey(mod1,day,aodid)
m1.2005 <- merge(mod1,mod3[, list(day,aodid,pred.m3)], all.x = T)
m3.fit.2005<- summary(lm(PM10~pred.m3,data=m1.2005))
res[res$year=="2005", 'm3.R2'] <- print(summary(lm(PM10~pred.m3,data=m1.2005))$r.squared)
#RMSPE
res[res$year=="2005", 'm3.PE'] <- print(rmse(residuals(m3.fit.2005)))
#spatial
###to check
spatial2005<-m1.2005 %>%
group_by(stn) %>%
summarise(barpm = mean(PM10, na.rm=TRUE), barpred = mean(pred.m3, na.rm=TRUE))
m1.fit.2005.spat<- lm(barpm ~ barpred, data=spatial2005)
res[res$year=="2005", 'm3.R2.s'] <- print(summary(lm(barpm ~ barpred, data=spatial2005))$r.squared)
res[res$year=="2005", 'm3.PE.s'] <- print(rmse(residuals(m1.fit.2005.spat)))
#temporal
tempo2005<-left_join(m1.2005,spatial2005)
tempo2005$delpm <-tempo2005$PM10-tempo2005$barpm
tempo2005$delpred <-tempo2005$pred.m3-tempo2005$barpred
mod_temporal <- lm(delpm ~ delpred, data=tempo2005)
res[res$year=="2005", 'm3.R2.t'] <- print(summary(lm(delpm ~ delpred, data=tempo2005))$r.squared)
#############save midpoint
saveRDS(res, "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/res.AQ.2005.rds")
saveRDS(res, "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/resALL.AQ.rds")
#########################
#import mod2
mod2<- readRDS( "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod2.AQ.2005.pred2.rds")
mod2<-mod2[,c("aodid","day","pred.m2"),with=FALSE]
#----------------> store the best available
mod3best <- mod3[, list(aodid, x_aod_ITM, y_aod_ITM, day, pred.m3)]
setkey(mod3best, day, aodid)
setkey(mod2, day, aodid)
mod3best <- merge(mod3best, mod2[,list(aodid, day, pred.m2)], all.x = T)
#reload mod1
mod1<- readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod1.AQ.2005.pred.rds")
mod1$aodid<-paste(mod1$long_aod,mod1$lat_aod,sep="-")
mod1<-mod1[,c("aodid","day","PM10","pred.m1"),with=FALSE]
setkey(mod1,day,aodid)
mod3best <- merge(mod3best, mod1, all.x = T)
mod3best[,bestpred := pred.m3]
mod3best[!is.na(pred.m2),bestpred := pred.m2]
mod3best[!is.na(pred.m1),bestpred := pred.m1]
#save
saveRDS(mod3best,"/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod3.AQ.2005.FINAL.rds")
#save for GIS
write.csv(mod3best[, list(LTPM = mean(bestpred, na.rm = T),
predvariance = var(bestpred, na.rm = T),
predmin = min(bestpred, na.rm = T),
predmax = max(bestpred, na.rm = T),
npred = sum(!is.na(bestpred)),
npred.m1 = sum(!is.na(pred.m1)),
npred.m2 = sum(!is.na(pred.m2)),
npred.m3 = sum(!is.na(pred.m3)),
x_aod_ITM = x_aod_ITM[1], y_aod_ITM = y_aod_ITM[1]),by=aodid], "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/LTPM3.AQ.2005.csv", row.names = F)
keep(res, sure=TRUE)
c()
###############
#LIBS
###############
library(lme4);library(reshape);library(foreign) ;library(ggplot2);library(plyr);library(data.table);library(reshape2);library(Hmisc);library(mgcv);library(gdata);library(car);library(dplyr);library(ggmap);library(broom);library(splines)
#sourcing
source("/media/NAS/Uni/org/files/Uni/Projects/code/$Rsnips/CV_splits.r")
source("/media/NAS/Uni/org/files/Uni/Projects/code/$Rsnips/rmspe.r")
#if needed load res table
#res<- readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/resALL.AQ.rds")
### import data
m1.2006 <-readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod1.AQ.2006.rds")
################# clean BAD STN PM10 and check if improved model?
raWDaf <- ddply(m1.2006, c( "stn"),
function(x) {
mod1 <- lm(PM10 ~ aod, data=x)
data.frame(R2 = round(summary(mod1)$r.squared, 5),
nsamps = length(summary(mod1)$resid))
})
raWDaf
raWDaf<-as.data.table(raWDaf)
bad<- raWDaf[R2< 0.01]
bad[,badid := paste(stn,sep="-")]
#################BAD STN
m1.2006[,badid := paste(stn,sep="-")]
####Take out bad stations
m1.2006 <- m1.2006[!(m1.2006$badid %in% bad$badid), ]
#scale vars
m1.2006[,elev.s:= scale(elev)]
m1.2006[,tden.s:= scale(tden)]
m1.2006[,pden.s:= scale(pden)]
m1.2006[,dist2A1.s:= scale(dist2A1)]
m1.2006[,dist2water.s:= scale(dist2water)]
m1.2006[,dist2rail.s:= scale(dist2rail)]
m1.2006[,Dist2road.s:= scale(Dist2road)]
m1.2006[,ndvi.s:= scale(ndvi)]
m1.2006[,MeanPbl.s:= scale(MeanPbl)]
m1.2006[,p_ind.s:= scale(p_ind)]
m1.2006[,p_for.s:= scale(p_for)]
m1.2006[,p_farm.s:= scale(p_farm)]
m1.2006[,p_dos.s:= scale(p_dos)]
m1.2006[,p_dev.s:= scale(p_dev)]
m1.2006[,p_os.s:= scale(p_os)]
m1.2006[,tempa.s:= scale(tempa)]
m1.2006[,WDa.s:= scale(WDa)]
m1.2006[,WSa.s:= scale(WSa)]
m1.2006[,RHa.s:= scale(RHa)]
m1.2006[,Raina.s:= scale(Raina)]
m1.2006[,NO2a.s:= scale(NO2a)]
m1.formula <- as.formula(PM10~ aod
+tempa.s+WDa.s+WSa.s+Dust+MeanPbl.s #temporal
+elev.s+tden.s+pden.s+Dist2road.s+ndvi.s #spatial
+p_os.s #+p_dev.s+p_dos.s+p_farm.s+p_for.s+p_ind.s #land use
#+aod*Dust #interactions
+(1+aod|day/reg_num)) #+(1|stn) !!! stn screws up mod3
#full fit
m1.fit.2006 <- lmer(m1.formula,data=m1.2006,weights=normwt)
m1.2006$pred.m1 <- predict(m1.fit.2006)
res[res$year=="2006", 'm1.R2'] <- print(summary(lm(PM10~pred.m1,data=m1.2006))$r.squared)
#RMSPE
res[res$year=="2006", 'm1.PE'] <- print(rmse(residuals(m1.fit.2006)))
#spatial
###to check
spatial2006<-m1.2006 %>%
group_by(stn) %>%
summarise(barpm = mean(PM10, na.rm=TRUE), barpred = mean(pred.m1, na.rm=TRUE))
m1.fit.2006.spat<- lm(barpm ~ barpred, data=spatial2006)
res[res$year=="2006", 'm1.R2.s'] <- print(summary(lm(barpm ~ barpred, data=spatial2006))$r.squared)
res[res$year=="2006", 'm1.PE.s'] <- print(rmse(residuals(m1.fit.2006.spat)))
#temporal
tempo2006<-left_join(m1.2006,spatial2006)
tempo2006$delpm <-tempo2006$PM10-tempo2006$barpm
tempo2006$delpred <-tempo2006$pred.m1-tempo2006$barpred
mod_temporal <- lm(delpm ~ delpred, data=tempo2006)
res[res$year=="2006", 'm1.R2.t'] <- print(summary(lm(delpm ~ delpred, data=tempo2006))$r.squared)
saveRDS(m1.2006,"/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod1.AQ.2006.pred.rds")
#---------------->>>> CV
#s1
splits_s1 <- splitdf(m1.2006)
test_s1 <- splits_s1$testset
train_s1 <- splits_s1$trainset
out_train_s1 <- lmer(m1.formula,data = train_s1,weights=normwt)
test_s1$pred.m1.cv <- predict(object=out_train_s1 ,newdata=test_s1,allow.new.levels=TRUE,re.form=NULL )
test_s1$iter<-"s1"
#s2
splits_s2 <- splitdf(m1.2006)
test_s2 <- splits_s2$testset
train_s2 <- splits_s2$trainset
out_train_s2 <- lmer(m1.formula,data = train_s2,weights=normwt)
test_s2$pred.m1.cv <- predict(object=out_train_s2 ,newdata=test_s2,allow.new.levels=TRUE,re.form=NULL )
test_s2$iter<-"s2"
#s3
splits_s3 <- splitdf(m1.2006)
test_s3 <- splits_s3$testset
train_s3 <- splits_s3$trainset
out_train_s3 <- lmer(m1.formula,data = train_s3,weights=normwt)
test_s3$pred.m1.cv <- predict(object=out_train_s3 ,newdata=test_s3,allow.new.levels=TRUE,re.form=NULL )
test_s3$iter<-"s3"
#s4
splits_s4 <- splitdf(m1.2006)
test_s4 <- splits_s4$testset
train_s4 <- splits_s4$trainset
out_train_s4 <- lmer(m1.formula,data = train_s4,weights=normwt)
test_s4$pred.m1.cv <- predict(object=out_train_s4 ,newdata=test_s4,allow.new.levels=TRUE,re.form=NULL )
test_s4$iter<-"s4"
#s5
splits_s5 <- splitdf(m1.2006)
test_s5 <- splits_s5$testset
train_s5 <- splits_s5$trainset
out_train_s5 <- lmer(m1.formula,data = train_s5,weights=normwt)
test_s5$pred.m1.cv <- predict(object=out_train_s5 ,newdata=test_s5,allow.new.levels=TRUE,re.form=NULL )
test_s5$iter<-"s5"
#s6
splits_s6 <- splitdf(m1.2006)
test_s6 <- splits_s6$testset
train_s6 <- splits_s6$trainset
out_train_s6 <- lmer(m1.formula,data = train_s6,weights=normwt)
test_s6$pred.m1.cv <- predict(object=out_train_s6 ,newdata=test_s6,allow.new.levels=TRUE,re.form=NULL )
test_s6$iter<-"s6"
#s7
splits_s7 <- splitdf(m1.2006)
test_s7 <- splits_s7$testset
train_s7 <- splits_s7$trainset
out_train_s7 <- lmer(m1.formula,data = train_s7,weights=normwt)
test_s7$pred.m1.cv <- predict(object=out_train_s7 ,newdata=test_s7,allow.new.levels=TRUE,re.form=NULL )
test_s7$iter<-"s7"
#s8
splits_s8 <- splitdf(m1.2006)
test_s8 <- splits_s8$testset
train_s8 <- splits_s8$trainset
out_train_s8 <- lmer(m1.formula,data = train_s8,weights=normwt)
test_s8$pred.m1.cv <- predict(object=out_train_s8 ,newdata=test_s8,allow.new.levels=TRUE,re.form=NULL )
test_s8$iter<-"s8"
#s9
splits_s9 <- splitdf(m1.2006)
test_s9 <- splits_s9$testset
train_s9 <- splits_s9$trainset
out_train_s9 <- lmer(m1.formula,data = train_s9,weights=normwt)
test_s9$pred.m1.cv <- predict(object=out_train_s9 ,newdata=test_s9,allow.new.levels=TRUE,re.form=NULL )
test_s9$iter<-"s9"
#s10
splits_s10 <- splitdf(m1.2006)
test_s10 <- splits_s10$testset
train_s10 <- splits_s10$trainset
out_train_s10 <- lmer(m1.formula,data = train_s10,weights=normwt)
test_s10$pred.m1.cv <- predict(object=out_train_s10 ,newdata=test_s10,allow.new.levels=TRUE,re.form=NULL )
test_s10$iter<-"s10"
#BIND 1 dataset
m1.2006.cv<- data.table(rbind(test_s1,test_s2,test_s3,test_s4,test_s5,test_s6,test_s7,test_s8,test_s9, test_s10))
# cleanup (remove from WS) objects from CV
rm(list = ls(pattern = "train_|test_"))
#table updates
m1.fit.2006.cv<-lm(PM10~pred.m1.cv,data=m1.2006.cv)
res[res$year=="2006", 'm1cv.R2'] <- print(summary(lm(PM10~pred.m1.cv,data=m1.2006.cv))$r.squared)
res[res$year=="2006", 'm1cv.I'] <-print(summary(lm(PM10~pred.m1.cv,data=m1.2006.cv))$coef[1,1])
res[res$year=="2006", 'm1cv.I.se'] <-print(summary(lm(PM10~pred.m1.cv,data=m1.2006.cv))$coef[1,2])
res[res$year=="2006", 'm1cv.S'] <-print(summary(lm(PM10~pred.m1.cv,data=m1.2006.cv))$coef[2,1])
res[res$year=="2006", 'm1cv.S.se'] <-print(summary(lm(PM10~pred.m1.cv,data=m1.2006.cv))$coef[2,2])
#RMSPE
res[res$year=="2006", 'm1cv.PE'] <- print(rmse(residuals(m1.fit.2006.cv)))
#spatial
spatial2006.cv<-m1.2006.cv %>%
group_by(stn) %>%
summarise(barpm = mean(PM10, na.rm=TRUE), barpred = mean(pred.m1, na.rm=TRUE))
m1.fit.2006.cv.s <- lm(barpm ~ barpred, data=spatial2006.cv)
res[res$year=="2006", 'm1cv.R2.s'] <- print(summary(lm(barpm ~ barpred, data=spatial2006.cv))$r.squared)
res[res$year=="2006", 'm1cv.PE.s'] <- print(rmse(residuals(m1.fit.2006.cv.s)))
#temporal
tempo2006.cv<-left_join(m1.2006.cv,spatial2006.cv)
tempo2006.cv$delpm <-tempo2006.cv$PM10-tempo2006.cv$barpm
tempo2006.cv$delpred <-tempo2006.cv$pred.m1.cv-tempo2006.cv$barpred
mod_temporal.cv <- lm(delpm ~ delpred, data=tempo2006.cv)
res[res$year=="2006", 'm1cv.R2.t'] <- print(summary(lm(delpm ~ delpred, data=tempo2006.cv))$r.squared)
#-------->>> loc stage
luf<-fread("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN004_LU_full_dataset/local.csv")
setnames(luf,"tden","loc.tden")
setnames(luf,"elev50","loc.elev")
#add 50m LU to CV data
setkey(m1.2006.cv,stn)
setkey(luf,stn)
m1.2006.cv.loc <- merge(m1.2006.cv, luf, all.x = T)
#m1.2006.cv.loc<-na.omit(m1.2006.cv.loc)
#create residual mp3 variable
m1.2006.cv.loc$res.m1<-m1.2006.cv.loc$PM10-m1.2006.cv.loc$pred.m1.cv
#The GAM model
gam.out<-gam(res.m1~s(loc.tden)+s(tden,MeanPbl)+s(loc.tden,WSa)+s(loc_p_os,fx=FALSE,k=4,bs='cr')+s(loc.elev,fx=FALSE,k=4,bs='cr')+s(dA1,fx=FALSE,k=4,bs='cr')+s(dsea,fx=FALSE,k=4,bs='cr'),data=m1.2006.cv.loc)
#plot(bp.model.ps)
#summary(bp.model.ps)
## reg
m1.2006.cv.loc$pred.m1.loc <-predict(gam.out)
m1.2006.cv.loc$pred.m1.both <- m1.2006.cv.loc$pred.m1.cv + m1.2006.cv.loc$pred.m1.loc
res[res$year=="2006", 'm1cv.loc.R2'] <- print(summary(lm(PM10~pred.m1.both,data=m1.2006.cv.loc))$r.squared)
res[res$year=="2006", 'm1cv.loc.I'] <-print(summary(lm(PM10~pred.m1.both,data=m1.2006.cv.loc))$coef[1,1])
res[res$year=="2006", 'm1cv.loc.I.se'] <-print(summary(lm(PM10~pred.m1.both,data=m1.2006.cv.loc))$coef[1,2])
res[res$year=="2006", 'm1cv.loc.S'] <-print(summary(lm(PM10~pred.m1.both,data=m1.2006.cv.loc))$coef[2,1])
res[res$year=="2006", 'm1cv.loc.S.se'] <-print(summary(lm(PM10~pred.m1.both,data=m1.2006.cv.loc))$coef[2,2])
#RMSPE
res[res$year=="2006", 'm1cv.loc.PE'] <- print(rmse(residuals(m1.fit.2006.cv)))
#spatial
spatial2006.cv.loc<-m1.2006.cv.loc %>%
group_by(stn) %>%
summarise(barpm = mean(PM10, na.rm=TRUE), barpred = mean(pred.m1, na.rm=TRUE))
m1.fit.2006.cv.loc.s <- lm(barpm ~ barpred, data=spatial2006.cv.loc)
res[res$year=="2006", 'm1cv.loc.R2.s'] <- print(summary(lm(barpm ~ barpred, data=spatial2006.cv.loc))$r.squared)
res[res$year=="2006", 'm1cv.loc.PE.s'] <- print(rmse(residuals(m1.fit.2006.cv.loc.s)))
#temporal
tempo2006.loc.cv<-left_join(m1.2006.cv.loc,spatial2006.cv.loc)
tempo2006.loc.cv$delpm <-tempo2006.loc.cv$PM10-tempo2006.loc.cv$barpm
tempo2006.loc.cv$delpred <-tempo2006.loc.cv$pred.m1.both-tempo2006.loc.cv$barpred
mod_temporal.loc.cv <- lm(delpm ~ delpred, data=tempo2006.loc.cv)
res[res$year=="2006", 'm1cv.loc.R2.t'] <- print(summary(lm(delpm ~ delpred, data=tempo2006.loc.cv))$r.squared)
#############save midpoint
saveRDS(res, "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/res.AQ.2006.rds")
saveRDS(res, "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/resALL.AQ.rds")
saveRDS(m1.2006.cv.loc,"/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod1.AQ.2006.predCV.rds")
###############
#MOD2
###############
m2.2006<-readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod2.AQ.2006.rds")
m2.2006[,elev.s:= scale(elev)]
m2.2006[,tden.s:= scale(tden)]
m2.2006[,pden.s:= scale(pden)]
m2.2006[,dist2A1.s:= scale(dist2A1)]
m2.2006[,dist2water.s:= scale(dist2water)]
m2.2006[,dist2rail.s:= scale(dist2rail)]
m2.2006[,Dist2road.s:= scale(Dist2road)]
m2.2006[,ndvi.s:= scale(ndvi)]
m2.2006[,MeanPbl.s:= scale(MeanPbl)]
m2.2006[,p_ind.s:= scale(p_ind)]
m2.2006[,p_for.s:= scale(p_for)]
m2.2006[,p_farm.s:= scale(p_farm)]
m2.2006[,p_dos.s:= scale(p_dos)]
m2.2006[,p_dev.s:= scale(p_dev)]
m2.2006[,p_os.s:= scale(p_os)]
m2.2006[,tempa.s:= scale(tempa)]
m2.2006[,WDa.s:= scale(WDa)]
m2.2006[,WSa.s:= scale(WSa)]
m2.2006[,RHa.s:= scale(RHa)]
m2.2006[,Raina.s:= scale(Raina)]
m2.2006[,NO2a.s:= scale(NO2a)]
#generate predictions
m2.2006[, pred.m2 := predict(object=m1.fit.2006,newdata=m2.2006,allow.new.levels=TRUE,re.form=NULL)]
describe(m2.2006$pred.m2)
#delete implossible valuesOA[24~
m2.2006 <- m2.2006[pred.m2 > 0.00000000000001 , ]
m2.2006 <- m2.2006[pred.m2 < 1500 , ]
saveRDS(m2.2006,"/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod2.AQ.2006.pred2.rds")
#-------------->prepare for mod3
m2.2006[, bimon := (m + 1) %/% 2]
setkey(m2.2006,day, aodid)
m2.2006<-m2.2006[!is.na(meanPM10)]
#run the lmer part regressing stage 2 pred Vs mean pm
#in israel check per month, also check 30km band and other methods for meanpm
m2.smooth = lme(pred.m2 ~ meanPM10,random = list(aodid= ~1 + meanPM10),control=lmeControl(opt = "optim"), data= m2.2006 )
#xm2.smooth = lmer(pred.m2 ~ meanPM10+(1+ meanPM10|aodid), data= m2.2006 )
#correlate to see everything from mod2 and the mpm works
m2.2006[, pred.t31 := predict(m2.smooth)]
m2.2006[, resid := residuals(m2.smooth)]
res[res$year=="2006", 'm3.t31'] <- print(summary(lm(pred.m2~pred.t31,data=m2.2006))$r.squared)
#split the files to the separate bi monthly datsets
T2006_bimon1 <- subset(m2.2006 ,m2.2006$bimon == "1")
T2006_bimon2 <- subset(m2.2006 ,m2.2006$bimon == "2")
T2006_bimon3 <- subset(m2.2006 ,m2.2006$bimon == "3")
T2006_bimon4 <- subset(m2.2006 ,m2.2006$bimon == "4")
T2006_bimon5 <- subset(m2.2006 ,m2.2006$bimon == "5")
T2006_bimon6 <- subset(m2.2006 ,m2.2006$bimon == "6")
#run the separate splines (smooth) for x and y for each bimon
#whats the default band (distance) that the spline goes out and uses
fit2_1 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2006_bimon1 )
fit2_2 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2006_bimon2 )
fit2_3 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2006_bimon3 )
fit2_4 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2006_bimon4 )
fit2_5 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2006_bimon5 )
fit2_6 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2006_bimon6 )
#get the predicted-fitted
Xpred_1 <- (T2006_bimon1$pred.t31 - fit2_1$fitted)
Xpred_2 <- (T2006_bimon2$pred.t31 - fit2_2$fitted)
Xpred_3 <- (T2006_bimon3$pred.t31 - fit2_3$fitted)
Xpred_4 <- (T2006_bimon4$pred.t31 - fit2_4$fitted)
Xpred_5 <- (T2006_bimon5$pred.t31 - fit2_5$fitted)
Xpred_6 <- (T2006_bimon6$pred.t31 - fit2_6$fitted)
#remerge to 1 file
m2.2006$pred.t32 <- c( Xpred_1,Xpred_2, Xpred_3, Xpred_4, Xpred_5, Xpred_6)
#this is important so that its sorted as in the first gamm
setkey(m2.2006,day, aodid)
#rerun the lme on the predictions including the spatial spline (smooth)
Final_pred_2006 <- lme(pred.t32 ~ meanPM10 ,random = list(aodid= ~1 + meanPM10 ),control=lmeControl(opt = "optim"),data= m2.2006 )
m2.2006[, pred.t33 := predict(Final_pred_2006)]
#check correlations
res[res$year=="2006", 'm3.t33'] <- print(summary(lm(pred.m2 ~ pred.t33,data=m2.2006))$r.squared)
#------------------------>>>
#import mod3
data.m3 <- readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod3.AQ.2006.rds")
#for PM10
data.m3 <- data.m3[,c(1,2,5,29:32,52,53),with=FALSE]
data.m3[, bimon := (m + 1) %/% 2]
setkey(data.m3,day, aodid)
data.m3<-data.m3[!is.na(meanPM10)]
#generate m.3 initial pred
data.m3$pred.m3.mix <- predict(Final_pred_2006,data.m3)
#create unique grid
ugrid <-data.m3 %>%
group_by(aodid) %>%
summarise(lat_aod = mean(lat_aod, na.rm=TRUE), long_aod = mean(long_aod, na.rm=TRUE),x_aod_ITM = mean(x_aod_ITM, na.rm=TRUE), y_aod_ITM = mean(y_aod_ITM, na.rm=TRUE))
#### PREDICT Gam part
#split back into bimons to include the gam prediction in final prediction
data.m3_bimon1 <- data.m3[bimon == 1, ]
data.m3_bimon2 <- data.m3[bimon == 2, ]
data.m3_bimon3 <- data.m3[bimon == 3, ]
data.m3_bimon4 <- data.m3[bimon == 4, ]
data.m3_bimon5 <- data.m3[bimon == 5, ]
data.m3_bimon6 <- data.m3[bimon == 6, ]
#addin unique grid to each bimon
uniq_gid_bimon1 <- ugrid
uniq_gid_bimon2 <- ugrid
uniq_gid_bimon3 <- ugrid
uniq_gid_bimon4 <- ugrid
uniq_gid_bimon5 <- ugrid
uniq_gid_bimon6 <- ugrid
#get predictions for Bimon residuals
uniq_gid_bimon1$gpred <- predict.gam(fit2_1,uniq_gid_bimon1)
uniq_gid_bimon2$gpred <- predict.gam(fit2_2,uniq_gid_bimon2)
uniq_gid_bimon3$gpred <- predict.gam(fit2_3,uniq_gid_bimon3)
uniq_gid_bimon4$gpred <- predict.gam(fit2_4,uniq_gid_bimon4)
uniq_gid_bimon5$gpred <- predict.gam(fit2_5,uniq_gid_bimon5)
uniq_gid_bimon6$gpred <- predict.gam(fit2_6,uniq_gid_bimon6)
#merge things back togheter
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> merges
setkey(uniq_gid_bimon1,aodid)
setkey(data.m3_bimon1,aodid)
data.m3_bimon1 <- merge(data.m3_bimon1, uniq_gid_bimon1[,list(aodid,gpred)], all.x = T)
setkey(uniq_gid_bimon2,aodid)
setkey(data.m3_bimon2,aodid)
data.m3_bimon2 <- merge(data.m3_bimon2, uniq_gid_bimon2[,list(aodid,gpred)], all.x = T)
setkey(uniq_gid_bimon3,aodid)
setkey(data.m3_bimon3,aodid)
data.m3_bimon3 <- merge(data.m3_bimon3, uniq_gid_bimon3[,list(aodid,gpred)], all.x = T)
setkey(uniq_gid_bimon4,aodid)
setkey(data.m3_bimon4,aodid)
data.m3_bimon4 <- merge(data.m3_bimon4, uniq_gid_bimon4[,list(aodid,gpred)], all.x = T)
setkey(uniq_gid_bimon5,aodid)
setkey(data.m3_bimon5,aodid)
data.m3_bimon5 <- merge(data.m3_bimon5, uniq_gid_bimon5[,list(aodid,gpred)], all.x = T)
setkey(uniq_gid_bimon6,aodid)
setkey(data.m3_bimon6,aodid)
data.m3_bimon6 <- merge(data.m3_bimon6, uniq_gid_bimon6[,list(aodid,gpred)], all.x = T)
#reattach all parts
mod3 <- rbind(data.m3_bimon1,data.m3_bimon2,data.m3_bimon3,data.m3_bimon4,data.m3_bimon5,data.m3_bimon6)
# create pred.m3
mod3$pred.m3 <-mod3$pred.m3.mix+mod3$gpred
#hist(mod3$pred.m3)
#describe(mod3$pred.m3)
#recode negative into zero
mod3 <- mod3[pred.m3 >= 0]
saveRDS(mod3,"/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod3.AQ.2006.pred.rds")
#clean
keep(mod3,res,rmse, sure=TRUE)
gc()
#########################
#prepare for m3.R2
#########################
#load mod1
mod1<- readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod1.AQ.2006.pred.rds")
mod1[,aodid:= paste(mod1$long_aod,mod1$lat_aod,sep="-")]
mod1<-mod1[,c("aodid","day","PM10","pred.m1","stn"),with=FALSE]
#R2.m3
setkey(mod3,day,aodid)
setkey(mod1,day,aodid)
m1.2006 <- merge(mod1,mod3[, list(day,aodid,pred.m3)], all.x = T)
m3.fit.2006<- summary(lm(PM10~pred.m3,data=m1.2006))
res[res$year=="2006", 'm3.R2'] <- print(summary(lm(PM10~pred.m3,data=m1.2006))$r.squared)
#RMSPE
res[res$year=="2006", 'm3.PE'] <- print(rmse(residuals(m3.fit.2006)))
#spatial
###to check
spatial2006<-m1.2006 %>%
group_by(stn) %>%
summarise(barpm = mean(PM10, na.rm=TRUE), barpred = mean(pred.m3, na.rm=TRUE))
m1.fit.2006.spat<- lm(barpm ~ barpred, data=spatial2006)
res[res$year=="2006", 'm3.R2.s'] <- print(summary(lm(barpm ~ barpred, data=spatial2006))$r.squared)
res[res$year=="2006", 'm3.PE.s'] <- print(rmse(residuals(m1.fit.2006.spat)))
#temporal
tempo2006<-left_join(m1.2006,spatial2006)
tempo2006$delpm <-tempo2006$PM10-tempo2006$barpm
tempo2006$delpred <-tempo2006$pred.m3-tempo2006$barpred
mod_temporal <- lm(delpm ~ delpred, data=tempo2006)
res[res$year=="2006", 'm3.R2.t'] <- print(summary(lm(delpm ~ delpred, data=tempo2006))$r.squared)
#############save midpoint
saveRDS(res, "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/res.AQ.2006.rds")
saveRDS(res, "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/resALL.AQ.rds")
#########################
#import mod2
mod2<- readRDS( "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod2.AQ.2006.pred2.rds")
mod2<-mod2[,c("aodid","day","pred.m2"),with=FALSE]
#----------------> store the best available
mod3best <- mod3[, list(aodid, x_aod_ITM, y_aod_ITM, day, pred.m3)]
setkey(mod3best, day, aodid)
setkey(mod2, day, aodid)
mod3best <- merge(mod3best, mod2[,list(aodid, day, pred.m2)], all.x = T)
#reload mod1
mod1<- readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod1.AQ.2006.pred.rds")
mod1$aodid<-paste(mod1$long_aod,mod1$lat_aod,sep="-")
mod1<-mod1[,c("aodid","day","PM10","pred.m1"),with=FALSE]
setkey(mod1,day,aodid)
mod3best <- merge(mod3best, mod1, all.x = T)
mod3best[,bestpred := pred.m3]
mod3best[!is.na(pred.m2),bestpred := pred.m2]
mod3best[!is.na(pred.m1),bestpred := pred.m1]
#save
saveRDS(mod3best,"/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod3.AQ.2006.FINAL.rds")
#save for GIS
write.csv(mod3best[, list(LTPM = mean(bestpred, na.rm = T),
predvariance = var(bestpred, na.rm = T),
predmin = min(bestpred, na.rm = T),
predmax = max(bestpred, na.rm = T),
npred = sum(!is.na(bestpred)),
npred.m1 = sum(!is.na(pred.m1)),
npred.m2 = sum(!is.na(pred.m2)),
npred.m3 = sum(!is.na(pred.m3)),
x_aod_ITM = x_aod_ITM[1], y_aod_ITM = y_aod_ITM[1]),by=aodid], "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/LTPM3.AQ.2006.csv", row.names = F)
keep(res, sure=TRUE)
c()
###############
#LIBS
###############
library(lme4);library(reshape);library(foreign) ;library(ggplot2);library(plyr);library(data.table);library(reshape2);library(Hmisc);library(mgcv);library(gdata);library(car);library(dplyr);library(ggmap);library(broom);library(splines)
#sourcing
source("/media/NAS/Uni/org/files/Uni/Projects/code/$Rsnips/CV_splits.r")
source("/media/NAS/Uni/org/files/Uni/Projects/code/$Rsnips/rmspe.r")
#if needed load res table
#res<- readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/resALL.AQ.rds")
### import data
m1.2007 <-readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod1.AQ.2007.rds")
################# clean BAD STN PM10 and check if improved model?
raWDaf <- ddply(m1.2007, c( "stn"),
function(x) {
mod1 <- lm(PM10 ~ aod, data=x)
data.frame(R2 = round(summary(mod1)$r.squared, 5),
nsamps = length(summary(mod1)$resid))
})
raWDaf
raWDaf<-as.data.table(raWDaf)
bad<- raWDaf[R2< 0.01]
bad[,badid := paste(stn,sep="-")]
#################BAD STN
m1.2007[,badid := paste(stn,sep="-")]
####Take out bad stations
m1.2007 <- m1.2007[!(m1.2007$badid %in% bad$badid), ]
#scale vars
m1.2007[,elev.s:= scale(elev)]
m1.2007[,tden.s:= scale(tden)]
m1.2007[,pden.s:= scale(pden)]
m1.2007[,dist2A1.s:= scale(dist2A1)]
m1.2007[,dist2water.s:= scale(dist2water)]
m1.2007[,dist2rail.s:= scale(dist2rail)]
m1.2007[,Dist2road.s:= scale(Dist2road)]
m1.2007[,ndvi.s:= scale(ndvi)]
m1.2007[,MeanPbl.s:= scale(MeanPbl)]
m1.2007[,p_ind.s:= scale(p_ind)]
m1.2007[,p_for.s:= scale(p_for)]
m1.2007[,p_farm.s:= scale(p_farm)]
m1.2007[,p_dos.s:= scale(p_dos)]
m1.2007[,p_dev.s:= scale(p_dev)]
m1.2007[,p_os.s:= scale(p_os)]
m1.2007[,tempa.s:= scale(tempa)]
m1.2007[,WDa.s:= scale(WDa)]
m1.2007[,WSa.s:= scale(WSa)]
m1.2007[,RHa.s:= scale(RHa)]
m1.2007[,Raina.s:= scale(Raina)]
m1.2007[,NO2a.s:= scale(NO2a)]
m1.formula <- as.formula(PM10~ aod
+tempa.s+WDa.s+WSa.s+Dust+MeanPbl.s #temporal
+elev.s+tden.s+pden.s+Dist2road.s+ndvi.s #spatial
+p_os.s #+p_dev.s+p_dos.s+p_farm.s+p_for.s+p_ind.s #land use
#+aod*Dust #interactions
+(1+aod|day/reg_num)) #+(1|stn) !!! stn screws up mod3
#full fit
m1.fit.2007 <- lmer(m1.formula,data=m1.2007,weights=normwt)
m1.2007$pred.m1 <- predict(m1.fit.2007)
res[res$year=="2007", 'm1.R2'] <- print(summary(lm(PM10~pred.m1,data=m1.2007))$r.squared)
#RMSPE
res[res$year=="2007", 'm1.PE'] <- print(rmse(residuals(m1.fit.2007)))
#spatial
###to check
spatial2007<-m1.2007 %>%
group_by(stn) %>%
summarise(barpm = mean(PM10, na.rm=TRUE), barpred = mean(pred.m1, na.rm=TRUE))
m1.fit.2007.spat<- lm(barpm ~ barpred, data=spatial2007)
res[res$year=="2007", 'm1.R2.s'] <- print(summary(lm(barpm ~ barpred, data=spatial2007))$r.squared)
res[res$year=="2007", 'm1.PE.s'] <- print(rmse(residuals(m1.fit.2007.spat)))
#temporal
tempo2007<-left_join(m1.2007,spatial2007)
tempo2007$delpm <-tempo2007$PM10-tempo2007$barpm
tempo2007$delpred <-tempo2007$pred.m1-tempo2007$barpred
mod_temporal <- lm(delpm ~ delpred, data=tempo2007)
res[res$year=="2007", 'm1.R2.t'] <- print(summary(lm(delpm ~ delpred, data=tempo2007))$r.squared)
saveRDS(m1.2007,"/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod1.AQ.2007.pred.rds")
#---------------->>>> CV
#s1
splits_s1 <- splitdf(m1.2007)
test_s1 <- splits_s1$testset
train_s1 <- splits_s1$trainset
out_train_s1 <- lmer(m1.formula,data = train_s1,weights=normwt)
test_s1$pred.m1.cv <- predict(object=out_train_s1 ,newdata=test_s1,allow.new.levels=TRUE,re.form=NULL )
test_s1$iter<-"s1"
#s2
splits_s2 <- splitdf(m1.2007)
test_s2 <- splits_s2$testset
train_s2 <- splits_s2$trainset
out_train_s2 <- lmer(m1.formula,data = train_s2,weights=normwt)
test_s2$pred.m1.cv <- predict(object=out_train_s2 ,newdata=test_s2,allow.new.levels=TRUE,re.form=NULL )
test_s2$iter<-"s2"
#s3
splits_s3 <- splitdf(m1.2007)
test_s3 <- splits_s3$testset
train_s3 <- splits_s3$trainset
out_train_s3 <- lmer(m1.formula,data = train_s3,weights=normwt)
test_s3$pred.m1.cv <- predict(object=out_train_s3 ,newdata=test_s3,allow.new.levels=TRUE,re.form=NULL )
test_s3$iter<-"s3"
#s4
splits_s4 <- splitdf(m1.2007)
test_s4 <- splits_s4$testset
train_s4 <- splits_s4$trainset
out_train_s4 <- lmer(m1.formula,data = train_s4,weights=normwt)
test_s4$pred.m1.cv <- predict(object=out_train_s4 ,newdata=test_s4,allow.new.levels=TRUE,re.form=NULL )
test_s4$iter<-"s4"
#s5
splits_s5 <- splitdf(m1.2007)
test_s5 <- splits_s5$testset
train_s5 <- splits_s5$trainset
out_train_s5 <- lmer(m1.formula,data = train_s5,weights=normwt)
test_s5$pred.m1.cv <- predict(object=out_train_s5 ,newdata=test_s5,allow.new.levels=TRUE,re.form=NULL )
test_s5$iter<-"s5"
#s6
splits_s6 <- splitdf(m1.2007)
test_s6 <- splits_s6$testset
train_s6 <- splits_s6$trainset
out_train_s6 <- lmer(m1.formula,data = train_s6,weights=normwt)
test_s6$pred.m1.cv <- predict(object=out_train_s6 ,newdata=test_s6,allow.new.levels=TRUE,re.form=NULL )
test_s6$iter<-"s6"
#s7
splits_s7 <- splitdf(m1.2007)
test_s7 <- splits_s7$testset
train_s7 <- splits_s7$trainset
out_train_s7 <- lmer(m1.formula,data = train_s7,weights=normwt)
test_s7$pred.m1.cv <- predict(object=out_train_s7 ,newdata=test_s7,allow.new.levels=TRUE,re.form=NULL )
test_s7$iter<-"s7"
#s8
splits_s8 <- splitdf(m1.2007)
test_s8 <- splits_s8$testset
train_s8 <- splits_s8$trainset
out_train_s8 <- lmer(m1.formula,data = train_s8,weights=normwt)
test_s8$pred.m1.cv <- predict(object=out_train_s8 ,newdata=test_s8,allow.new.levels=TRUE,re.form=NULL )
test_s8$iter<-"s8"
#s9
splits_s9 <- splitdf(m1.2007)
test_s9 <- splits_s9$testset
train_s9 <- splits_s9$trainset
out_train_s9 <- lmer(m1.formula,data = train_s9,weights=normwt)
test_s9$pred.m1.cv <- predict(object=out_train_s9 ,newdata=test_s9,allow.new.levels=TRUE,re.form=NULL )
test_s9$iter<-"s9"
#s10
splits_s10 <- splitdf(m1.2007)
test_s10 <- splits_s10$testset
train_s10 <- splits_s10$trainset
out_train_s10 <- lmer(m1.formula,data = train_s10,weights=normwt)
test_s10$pred.m1.cv <- predict(object=out_train_s10 ,newdata=test_s10,allow.new.levels=TRUE,re.form=NULL )
test_s10$iter<-"s10"
#BIND 1 dataset
m1.2007.cv<- data.table(rbind(test_s1,test_s2,test_s3,test_s4,test_s5,test_s6,test_s7,test_s8,test_s9, test_s10))
# cleanup (remove from WS) objects from CV
rm(list = ls(pattern = "train_|test_"))
#table updates
m1.fit.2007.cv<-lm(PM10~pred.m1.cv,data=m1.2007.cv)
res[res$year=="2007", 'm1cv.R2'] <- print(summary(lm(PM10~pred.m1.cv,data=m1.2007.cv))$r.squared)
res[res$year=="2007", 'm1cv.I'] <-print(summary(lm(PM10~pred.m1.cv,data=m1.2007.cv))$coef[1,1])
res[res$year=="2007", 'm1cv.I.se'] <-print(summary(lm(PM10~pred.m1.cv,data=m1.2007.cv))$coef[1,2])
res[res$year=="2007", 'm1cv.S'] <-print(summary(lm(PM10~pred.m1.cv,data=m1.2007.cv))$coef[2,1])
res[res$year=="2007", 'm1cv.S.se'] <-print(summary(lm(PM10~pred.m1.cv,data=m1.2007.cv))$coef[2,2])
#RMSPE
res[res$year=="2007", 'm1cv.PE'] <- print(rmse(residuals(m1.fit.2007.cv)))
#spatial
spatial2007.cv<-m1.2007.cv %>%
group_by(stn) %>%
summarise(barpm = mean(PM10, na.rm=TRUE), barpred = mean(pred.m1, na.rm=TRUE))
m1.fit.2007.cv.s <- lm(barpm ~ barpred, data=spatial2007.cv)
res[res$year=="2007", 'm1cv.R2.s'] <- print(summary(lm(barpm ~ barpred, data=spatial2007.cv))$r.squared)
res[res$year=="2007", 'm1cv.PE.s'] <- print(rmse(residuals(m1.fit.2007.cv.s)))
#temporal
tempo2007.cv<-left_join(m1.2007.cv,spatial2007.cv)
tempo2007.cv$delpm <-tempo2007.cv$PM10-tempo2007.cv$barpm
tempo2007.cv$delpred <-tempo2007.cv$pred.m1.cv-tempo2007.cv$barpred
mod_temporal.cv <- lm(delpm ~ delpred, data=tempo2007.cv)
res[res$year=="2007", 'm1cv.R2.t'] <- print(summary(lm(delpm ~ delpred, data=tempo2007.cv))$r.squared)
#-------->>> loc stage
luf<-fread("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN004_LU_full_dataset/local.csv")
setnames(luf,"tden","loc.tden")
setnames(luf,"elev50","loc.elev")
#add 50m LU to CV data
setkey(m1.2007.cv,stn)
setkey(luf,stn)
m1.2007.cv.loc <- merge(m1.2007.cv, luf, all.x = T)
#m1.2007.cv.loc<-na.omit(m1.2007.cv.loc)
#create residual mp3 variable
m1.2007.cv.loc$res.m1<-m1.2007.cv.loc$PM10-m1.2007.cv.loc$pred.m1.cv
#The GAM model
gam.out<-gam(res.m1~s(loc.tden)+s(tden,MeanPbl)+s(loc.tden,WSa)+s(loc_p_os,fx=FALSE,k=4,bs='cr')+s(loc.elev,fx=FALSE,k=4,bs='cr')+s(dA1,fx=FALSE,k=4,bs='cr')+s(dsea,fx=FALSE,k=4,bs='cr'),data=m1.2007.cv.loc)
#plot(bp.model.ps)
#summary(bp.model.ps)
## reg
m1.2007.cv.loc$pred.m1.loc <-predict(gam.out)
m1.2007.cv.loc$pred.m1.both <- m1.2007.cv.loc$pred.m1.cv + m1.2007.cv.loc$pred.m1.loc
res[res$year=="2007", 'm1cv.loc.R2'] <- print(summary(lm(PM10~pred.m1.both,data=m1.2007.cv.loc))$r.squared)
res[res$year=="2007", 'm1cv.loc.I'] <-print(summary(lm(PM10~pred.m1.both,data=m1.2007.cv.loc))$coef[1,1])
res[res$year=="2007", 'm1cv.loc.I.se'] <-print(summary(lm(PM10~pred.m1.both,data=m1.2007.cv.loc))$coef[1,2])
res[res$year=="2007", 'm1cv.loc.S'] <-print(summary(lm(PM10~pred.m1.both,data=m1.2007.cv.loc))$coef[2,1])
res[res$year=="2007", 'm1cv.loc.S.se'] <-print(summary(lm(PM10~pred.m1.both,data=m1.2007.cv.loc))$coef[2,2])
#RMSPE
res[res$year=="2007", 'm1cv.loc.PE'] <- print(rmse(residuals(m1.fit.2007.cv)))
#spatial
spatial2007.cv.loc<-m1.2007.cv.loc %>%
group_by(stn) %>%
summarise(barpm = mean(PM10, na.rm=TRUE), barpred = mean(pred.m1, na.rm=TRUE))
m1.fit.2007.cv.loc.s <- lm(barpm ~ barpred, data=spatial2007.cv.loc)
res[res$year=="2007", 'm1cv.loc.R2.s'] <- print(summary(lm(barpm ~ barpred, data=spatial2007.cv.loc))$r.squared)
res[res$year=="2007", 'm1cv.loc.PE.s'] <- print(rmse(residuals(m1.fit.2007.cv.loc.s)))
#temporal
tempo2007.loc.cv<-left_join(m1.2007.cv.loc,spatial2007.cv.loc)
tempo2007.loc.cv$delpm <-tempo2007.loc.cv$PM10-tempo2007.loc.cv$barpm
tempo2007.loc.cv$delpred <-tempo2007.loc.cv$pred.m1.both-tempo2007.loc.cv$barpred
mod_temporal.loc.cv <- lm(delpm ~ delpred, data=tempo2007.loc.cv)
res[res$year=="2007", 'm1cv.loc.R2.t'] <- print(summary(lm(delpm ~ delpred, data=tempo2007.loc.cv))$r.squared)
#############save midpoint
saveRDS(res, "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/res.AQ.2007.rds")
saveRDS(res, "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/resALL.AQ.rds")
saveRDS(m1.2007.cv.loc,"/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod1.AQ.2007.predCV.rds")
###############
#MOD2
###############
m2.2007<-readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod2.AQ.2007.rds")
m2.2007[,elev.s:= scale(elev)]
m2.2007[,tden.s:= scale(tden)]
m2.2007[,pden.s:= scale(pden)]
m2.2007[,dist2A1.s:= scale(dist2A1)]
m2.2007[,dist2water.s:= scale(dist2water)]
m2.2007[,dist2rail.s:= scale(dist2rail)]
m2.2007[,Dist2road.s:= scale(Dist2road)]
m2.2007[,ndvi.s:= scale(ndvi)]
m2.2007[,MeanPbl.s:= scale(MeanPbl)]
m2.2007[,p_ind.s:= scale(p_ind)]
m2.2007[,p_for.s:= scale(p_for)]
m2.2007[,p_farm.s:= scale(p_farm)]
m2.2007[,p_dos.s:= scale(p_dos)]
m2.2007[,p_dev.s:= scale(p_dev)]
m2.2007[,p_os.s:= scale(p_os)]
m2.2007[,tempa.s:= scale(tempa)]
m2.2007[,WDa.s:= scale(WDa)]
m2.2007[,WSa.s:= scale(WSa)]
m2.2007[,RHa.s:= scale(RHa)]
m2.2007[,Raina.s:= scale(Raina)]
m2.2007[,NO2a.s:= scale(NO2a)]
#generate predictions
m2.2007[, pred.m2 := predict(object=m1.fit.2007,newdata=m2.2007,allow.new.levels=TRUE,re.form=NULL)]
describe(m2.2007$pred.m2)
#delete implossible valuesOA[24~
m2.2007 <- m2.2007[pred.m2 > 0.00000000000001 , ]
m2.2007 <- m2.2007[pred.m2 < 1500 , ]
saveRDS(m2.2007,"/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod2.AQ.2007.pred2.rds")
#-------------->prepare for mod3
m2.2007[, bimon := (m + 1) %/% 2]
setkey(m2.2007,day, aodid)
m2.2007<-m2.2007[!is.na(meanPM10)]
#run the lmer part regressing stage 2 pred Vs mean pm
#in israel check per month, also check 30km band and other methods for meanpm
m2.smooth = lme(pred.m2 ~ meanPM10,random = list(aodid= ~1 + meanPM10),control=lmeControl(opt = "optim"), data= m2.2007 )
#xm2.smooth = lmer(pred.m2 ~ meanPM10+(1+ meanPM10|aodid), data= m2.2007 )
#correlate to see everything from mod2 and the mpm works
m2.2007[, pred.t31 := predict(m2.smooth)]
m2.2007[, resid := residuals(m2.smooth)]
res[res$year=="2007", 'm3.t31'] <- print(summary(lm(pred.m2~pred.t31,data=m2.2007))$r.squared)
#split the files to the separate bi monthly datsets
T2007_bimon1 <- subset(m2.2007 ,m2.2007$bimon == "1")
T2007_bimon2 <- subset(m2.2007 ,m2.2007$bimon == "2")
T2007_bimon3 <- subset(m2.2007 ,m2.2007$bimon == "3")
T2007_bimon4 <- subset(m2.2007 ,m2.2007$bimon == "4")
T2007_bimon5 <- subset(m2.2007 ,m2.2007$bimon == "5")
T2007_bimon6 <- subset(m2.2007 ,m2.2007$bimon == "6")
#run the separate splines (smooth) for x and y for each bimon
#whats the default band (distance) that the spline goes out and uses
fit2_1 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2007_bimon1 )
fit2_2 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2007_bimon2 )
fit2_3 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2007_bimon3 )
fit2_4 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2007_bimon4 )
fit2_5 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2007_bimon5 )
fit2_6 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2007_bimon6 )
#get the predicted-fitted
Xpred_1 <- (T2007_bimon1$pred.t31 - fit2_1$fitted)
Xpred_2 <- (T2007_bimon2$pred.t31 - fit2_2$fitted)
Xpred_3 <- (T2007_bimon3$pred.t31 - fit2_3$fitted)
Xpred_4 <- (T2007_bimon4$pred.t31 - fit2_4$fitted)
Xpred_5 <- (T2007_bimon5$pred.t31 - fit2_5$fitted)
Xpred_6 <- (T2007_bimon6$pred.t31 - fit2_6$fitted)
#remerge to 1 file
m2.2007$pred.t32 <- c( Xpred_1,Xpred_2, Xpred_3, Xpred_4, Xpred_5, Xpred_6)
#this is important so that its sorted as in the first gamm
setkey(m2.2007,day, aodid)
#rerun the lme on the predictions including the spatial spline (smooth)
Final_pred_2007 <- lme(pred.t32 ~ meanPM10 ,random = list(aodid= ~1 + meanPM10 ),control=lmeControl(opt = "optim"),data= m2.2007 )
m2.2007[, pred.t33 := predict(Final_pred_2007)]
#check correlations
res[res$year=="2007", 'm3.t33'] <- print(summary(lm(pred.m2 ~ pred.t33,data=m2.2007))$r.squared)
#------------------------>>>
#import mod3
data.m3 <- readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod3.AQ.2007.rds")
#for PM10
data.m3 <- data.m3[,c(1,2,5,29:32,52,53),with=FALSE]
data.m3[, bimon := (m + 1) %/% 2]
setkey(data.m3,day, aodid)
data.m3<-data.m3[!is.na(meanPM10)]
#generate m.3 initial pred
data.m3$pred.m3.mix <- predict(Final_pred_2007,data.m3)
#create unique grid
ugrid <-data.m3 %>%
group_by(aodid) %>%
summarise(lat_aod = mean(lat_aod, na.rm=TRUE), long_aod = mean(long_aod, na.rm=TRUE),x_aod_ITM = mean(x_aod_ITM, na.rm=TRUE), y_aod_ITM = mean(y_aod_ITM, na.rm=TRUE))
#### PREDICT Gam part
#split back into bimons to include the gam prediction in final prediction
data.m3_bimon1 <- data.m3[bimon == 1, ]
data.m3_bimon2 <- data.m3[bimon == 2, ]
data.m3_bimon3 <- data.m3[bimon == 3, ]
data.m3_bimon4 <- data.m3[bimon == 4, ]
data.m3_bimon5 <- data.m3[bimon == 5, ]
data.m3_bimon6 <- data.m3[bimon == 6, ]
#addin unique grid to each bimon
uniq_gid_bimon1 <- ugrid
uniq_gid_bimon2 <- ugrid
uniq_gid_bimon3 <- ugrid
uniq_gid_bimon4 <- ugrid
uniq_gid_bimon5 <- ugrid
uniq_gid_bimon6 <- ugrid
#get predictions for Bimon residuals
uniq_gid_bimon1$gpred <- predict.gam(fit2_1,uniq_gid_bimon1)
uniq_gid_bimon2$gpred <- predict.gam(fit2_2,uniq_gid_bimon2)
uniq_gid_bimon3$gpred <- predict.gam(fit2_3,uniq_gid_bimon3)
uniq_gid_bimon4$gpred <- predict.gam(fit2_4,uniq_gid_bimon4)
uniq_gid_bimon5$gpred <- predict.gam(fit2_5,uniq_gid_bimon5)
uniq_gid_bimon6$gpred <- predict.gam(fit2_6,uniq_gid_bimon6)
#merge things back togheter
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> merges
setkey(uniq_gid_bimon1,aodid)
setkey(data.m3_bimon1,aodid)
data.m3_bimon1 <- merge(data.m3_bimon1, uniq_gid_bimon1[,list(aodid,gpred)], all.x = T)
setkey(uniq_gid_bimon2,aodid)
setkey(data.m3_bimon2,aodid)
data.m3_bimon2 <- merge(data.m3_bimon2, uniq_gid_bimon2[,list(aodid,gpred)], all.x = T)
setkey(uniq_gid_bimon3,aodid)
setkey(data.m3_bimon3,aodid)
data.m3_bimon3 <- merge(data.m3_bimon3, uniq_gid_bimon3[,list(aodid,gpred)], all.x = T)
setkey(uniq_gid_bimon4,aodid)
setkey(data.m3_bimon4,aodid)
data.m3_bimon4 <- merge(data.m3_bimon4, uniq_gid_bimon4[,list(aodid,gpred)], all.x = T)
setkey(uniq_gid_bimon5,aodid)
setkey(data.m3_bimon5,aodid)
data.m3_bimon5 <- merge(data.m3_bimon5, uniq_gid_bimon5[,list(aodid,gpred)], all.x = T)
setkey(uniq_gid_bimon6,aodid)
setkey(data.m3_bimon6,aodid)
data.m3_bimon6 <- merge(data.m3_bimon6, uniq_gid_bimon6[,list(aodid,gpred)], all.x = T)
#reattach all parts
mod3 <- rbind(data.m3_bimon1,data.m3_bimon2,data.m3_bimon3,data.m3_bimon4,data.m3_bimon5,data.m3_bimon6)
# create pred.m3
mod3$pred.m3 <-mod3$pred.m3.mix+mod3$gpred
#hist(mod3$pred.m3)
#describe(mod3$pred.m3)
#recode negative into zero
mod3 <- mod3[pred.m3 >= 0]
saveRDS(mod3,"/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod3.AQ.2007.pred.rds")
#clean
keep(mod3,res,rmse, sure=TRUE)
gc()
#########################
#prepare for m3.R2
#########################
#load mod1
mod1<- readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod1.AQ.2007.pred.rds")
mod1[,aodid:= paste(mod1$long_aod,mod1$lat_aod,sep="-")]
mod1<-mod1[,c("aodid","day","PM10","pred.m1","stn"),with=FALSE]
#R2.m3
setkey(mod3,day,aodid)
setkey(mod1,day,aodid)
m1.2007 <- merge(mod1,mod3[, list(day,aodid,pred.m3)], all.x = T)
m3.fit.2007<- summary(lm(PM10~pred.m3,data=m1.2007))
res[res$year=="2007", 'm3.R2'] <- print(summary(lm(PM10~pred.m3,data=m1.2007))$r.squared)
#RMSPE
res[res$year=="2007", 'm3.PE'] <- print(rmse(residuals(m3.fit.2007)))
#spatial
###to check
spatial2007<-m1.2007 %>%
group_by(stn) %>%
summarise(barpm = mean(PM10, na.rm=TRUE), barpred = mean(pred.m3, na.rm=TRUE))
m1.fit.2007.spat<- lm(barpm ~ barpred, data=spatial2007)
res[res$year=="2007", 'm3.R2.s'] <- print(summary(lm(barpm ~ barpred, data=spatial2007))$r.squared)
res[res$year=="2007", 'm3.PE.s'] <- print(rmse(residuals(m1.fit.2007.spat)))
#temporal
tempo2007<-left_join(m1.2007,spatial2007)
tempo2007$delpm <-tempo2007$PM10-tempo2007$barpm
tempo2007$delpred <-tempo2007$pred.m3-tempo2007$barpred
mod_temporal <- lm(delpm ~ delpred, data=tempo2007)
res[res$year=="2007", 'm3.R2.t'] <- print(summary(lm(delpm ~ delpred, data=tempo2007))$r.squared)
#############save midpoint
saveRDS(res, "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/res.AQ.2007.rds")
saveRDS(res, "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/resALL.AQ.rds")
#########################
#import mod2
mod2<- readRDS( "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod2.AQ.2007.pred2.rds")
mod2<-mod2[,c("aodid","day","pred.m2"),with=FALSE]
#----------------> store the best available
mod3best <- mod3[, list(aodid, x_aod_ITM, y_aod_ITM, day, pred.m3)]
setkey(mod3best, day, aodid)
setkey(mod2, day, aodid)
mod3best <- merge(mod3best, mod2[,list(aodid, day, pred.m2)], all.x = T)
#reload mod1
mod1<- readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod1.AQ.2007.pred.rds")
mod1$aodid<-paste(mod1$long_aod,mod1$lat_aod,sep="-")
mod1<-mod1[,c("aodid","day","PM10","pred.m1"),with=FALSE]
setkey(mod1,day,aodid)
mod3best <- merge(mod3best, mod1, all.x = T)
mod3best[,bestpred := pred.m3]
mod3best[!is.na(pred.m2),bestpred := pred.m2]
mod3best[!is.na(pred.m1),bestpred := pred.m1]
#save
saveRDS(mod3best,"/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod3.AQ.2007.FINAL.rds")
#save for GIS
write.csv(mod3best[, list(LTPM = mean(bestpred, na.rm = T),
predvariance = var(bestpred, na.rm = T),
predmin = min(bestpred, na.rm = T),
predmax = max(bestpred, na.rm = T),
npred = sum(!is.na(bestpred)),
npred.m1 = sum(!is.na(pred.m1)),
npred.m2 = sum(!is.na(pred.m2)),
npred.m3 = sum(!is.na(pred.m3)),
x_aod_ITM = x_aod_ITM[1], y_aod_ITM = y_aod_ITM[1]),by=aodid], "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/LTPM3.AQ.2007.csv", row.names = F)
keep(res, sure=TRUE)
c()
###############
#LIBS
###############
library(lme4);library(reshape);library(foreign) ;library(ggplot2);library(plyr);library(data.table);library(reshape2);library(Hmisc);library(mgcv);library(gdata);library(car);library(dplyr);library(ggmap);library(broom);library(splines)
#sourcing
source("/media/NAS/Uni/org/files/Uni/Projects/code/$Rsnips/CV_splits.r")
source("/media/NAS/Uni/org/files/Uni/Projects/code/$Rsnips/rmspe.r")
#if needed load res table
#res<- readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/resALL.AQ.rds")
### import data
m1.2008 <-readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod1.AQ.2008.rds")
################# clean BAD STN PM10 and check if improved model?
raWDaf <- ddply(m1.2008, c( "stn"),
function(x) {
mod1 <- lm(PM10 ~ aod, data=x)
data.frame(R2 = round(summary(mod1)$r.squared, 5),
nsamps = length(summary(mod1)$resid))
})
raWDaf
raWDaf<-as.data.table(raWDaf)
bad<- raWDaf[R2< 0.01]
bad[,badid := paste(stn,sep="-")]
#################BAD STN
m1.2008[,badid := paste(stn,sep="-")]
####Take out bad stations
m1.2008 <- m1.2008[!(m1.2008$badid %in% bad$badid), ]
#scale vars
m1.2008[,elev.s:= scale(elev)]
m1.2008[,tden.s:= scale(tden)]
m1.2008[,pden.s:= scale(pden)]
m1.2008[,dist2A1.s:= scale(dist2A1)]
m1.2008[,dist2water.s:= scale(dist2water)]
m1.2008[,dist2rail.s:= scale(dist2rail)]
m1.2008[,Dist2road.s:= scale(Dist2road)]
m1.2008[,ndvi.s:= scale(ndvi)]
m1.2008[,MeanPbl.s:= scale(MeanPbl)]
m1.2008[,p_ind.s:= scale(p_ind)]
m1.2008[,p_for.s:= scale(p_for)]
m1.2008[,p_farm.s:= scale(p_farm)]
m1.2008[,p_dos.s:= scale(p_dos)]
m1.2008[,p_dev.s:= scale(p_dev)]
m1.2008[,p_os.s:= scale(p_os)]
m1.2008[,tempa.s:= scale(tempa)]
m1.2008[,WDa.s:= scale(WDa)]
m1.2008[,WSa.s:= scale(WSa)]
m1.2008[,RHa.s:= scale(RHa)]
m1.2008[,Raina.s:= scale(Raina)]
m1.2008[,NO2a.s:= scale(NO2a)]
m1.formula <- as.formula(PM10~ aod
+tempa.s+WDa.s+WSa.s+Dust+MeanPbl.s #temporal
+elev.s+tden.s+pden.s+Dist2road.s+ndvi.s #spatial
+p_os.s #+p_dev.s+p_dos.s+p_farm.s+p_for.s+p_ind.s #land use
#+aod*Dust #interactions
+(1+aod|day/reg_num)) #+(1|stn) !!! stn screws up mod3
#full fit
m1.fit.2008 <- lmer(m1.formula,data=m1.2008,weights=normwt)
m1.2008$pred.m1 <- predict(m1.fit.2008)
res[res$year=="2008", 'm1.R2'] <- print(summary(lm(PM10~pred.m1,data=m1.2008))$r.squared)
#RMSPE
res[res$year=="2008", 'm1.PE'] <- print(rmse(residuals(m1.fit.2008)))
#spatial
###to check
spatial2008<-m1.2008 %>%
group_by(stn) %>%
summarise(barpm = mean(PM10, na.rm=TRUE), barpred = mean(pred.m1, na.rm=TRUE))
m1.fit.2008.spat<- lm(barpm ~ barpred, data=spatial2008)
res[res$year=="2008", 'm1.R2.s'] <- print(summary(lm(barpm ~ barpred, data=spatial2008))$r.squared)
res[res$year=="2008", 'm1.PE.s'] <- print(rmse(residuals(m1.fit.2008.spat)))
#temporal
tempo2008<-left_join(m1.2008,spatial2008)
tempo2008$delpm <-tempo2008$PM10-tempo2008$barpm
tempo2008$delpred <-tempo2008$pred.m1-tempo2008$barpred
mod_temporal <- lm(delpm ~ delpred, data=tempo2008)
res[res$year=="2008", 'm1.R2.t'] <- print(summary(lm(delpm ~ delpred, data=tempo2008))$r.squared)
saveRDS(m1.2008,"/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod1.AQ.2008.pred.rds")
#---------------->>>> CV
#s1
splits_s1 <- splitdf(m1.2008)
test_s1 <- splits_s1$testset
train_s1 <- splits_s1$trainset
out_train_s1 <- lmer(m1.formula,data = train_s1,weights=normwt)
test_s1$pred.m1.cv <- predict(object=out_train_s1 ,newdata=test_s1,allow.new.levels=TRUE,re.form=NULL )
test_s1$iter<-"s1"
#s2
splits_s2 <- splitdf(m1.2008)
test_s2 <- splits_s2$testset
train_s2 <- splits_s2$trainset
out_train_s2 <- lmer(m1.formula,data = train_s2,weights=normwt)
test_s2$pred.m1.cv <- predict(object=out_train_s2 ,newdata=test_s2,allow.new.levels=TRUE,re.form=NULL )
test_s2$iter<-"s2"
#s3
splits_s3 <- splitdf(m1.2008)
test_s3 <- splits_s3$testset
train_s3 <- splits_s3$trainset
out_train_s3 <- lmer(m1.formula,data = train_s3,weights=normwt)
test_s3$pred.m1.cv <- predict(object=out_train_s3 ,newdata=test_s3,allow.new.levels=TRUE,re.form=NULL )
test_s3$iter<-"s3"
#s4
splits_s4 <- splitdf(m1.2008)
test_s4 <- splits_s4$testset
train_s4 <- splits_s4$trainset
out_train_s4 <- lmer(m1.formula,data = train_s4,weights=normwt)
test_s4$pred.m1.cv <- predict(object=out_train_s4 ,newdata=test_s4,allow.new.levels=TRUE,re.form=NULL )
test_s4$iter<-"s4"
#s5
splits_s5 <- splitdf(m1.2008)
test_s5 <- splits_s5$testset
train_s5 <- splits_s5$trainset
out_train_s5 <- lmer(m1.formula,data = train_s5,weights=normwt)
test_s5$pred.m1.cv <- predict(object=out_train_s5 ,newdata=test_s5,allow.new.levels=TRUE,re.form=NULL )
test_s5$iter<-"s5"
#s6
splits_s6 <- splitdf(m1.2008)
test_s6 <- splits_s6$testset
train_s6 <- splits_s6$trainset
out_train_s6 <- lmer(m1.formula,data = train_s6,weights=normwt)
test_s6$pred.m1.cv <- predict(object=out_train_s6 ,newdata=test_s6,allow.new.levels=TRUE,re.form=NULL )
test_s6$iter<-"s6"
#s7
splits_s7 <- splitdf(m1.2008)
test_s7 <- splits_s7$testset
train_s7 <- splits_s7$trainset
out_train_s7 <- lmer(m1.formula,data = train_s7,weights=normwt)
test_s7$pred.m1.cv <- predict(object=out_train_s7 ,newdata=test_s7,allow.new.levels=TRUE,re.form=NULL )
test_s7$iter<-"s7"
#s8
splits_s8 <- splitdf(m1.2008)
test_s8 <- splits_s8$testset
train_s8 <- splits_s8$trainset
out_train_s8 <- lmer(m1.formula,data = train_s8,weights=normwt)
test_s8$pred.m1.cv <- predict(object=out_train_s8 ,newdata=test_s8,allow.new.levels=TRUE,re.form=NULL )
test_s8$iter<-"s8"
#s9
splits_s9 <- splitdf(m1.2008)
test_s9 <- splits_s9$testset
train_s9 <- splits_s9$trainset
out_train_s9 <- lmer(m1.formula,data = train_s9,weights=normwt)
test_s9$pred.m1.cv <- predict(object=out_train_s9 ,newdata=test_s9,allow.new.levels=TRUE,re.form=NULL )
test_s9$iter<-"s9"
#s10
splits_s10 <- splitdf(m1.2008)
test_s10 <- splits_s10$testset
train_s10 <- splits_s10$trainset
out_train_s10 <- lmer(m1.formula,data = train_s10,weights=normwt)
test_s10$pred.m1.cv <- predict(object=out_train_s10 ,newdata=test_s10,allow.new.levels=TRUE,re.form=NULL )
test_s10$iter<-"s10"
#BIND 1 dataset
m1.2008.cv<- data.table(rbind(test_s1,test_s2,test_s3,test_s4,test_s5,test_s6,test_s7,test_s8,test_s9, test_s10))
# cleanup (remove from WS) objects from CV
rm(list = ls(pattern = "train_|test_"))
#table updates
m1.fit.2008.cv<-lm(PM10~pred.m1.cv,data=m1.2008.cv)
res[res$year=="2008", 'm1cv.R2'] <- print(summary(lm(PM10~pred.m1.cv,data=m1.2008.cv))$r.squared)
res[res$year=="2008", 'm1cv.I'] <-print(summary(lm(PM10~pred.m1.cv,data=m1.2008.cv))$coef[1,1])
res[res$year=="2008", 'm1cv.I.se'] <-print(summary(lm(PM10~pred.m1.cv,data=m1.2008.cv))$coef[1,2])
res[res$year=="2008", 'm1cv.S'] <-print(summary(lm(PM10~pred.m1.cv,data=m1.2008.cv))$coef[2,1])
res[res$year=="2008", 'm1cv.S.se'] <-print(summary(lm(PM10~pred.m1.cv,data=m1.2008.cv))$coef[2,2])
#RMSPE
res[res$year=="2008", 'm1cv.PE'] <- print(rmse(residuals(m1.fit.2008.cv)))
#spatial
spatial2008.cv<-m1.2008.cv %>%
group_by(stn) %>%
summarise(barpm = mean(PM10, na.rm=TRUE), barpred = mean(pred.m1, na.rm=TRUE))
m1.fit.2008.cv.s <- lm(barpm ~ barpred, data=spatial2008.cv)
res[res$year=="2008", 'm1cv.R2.s'] <- print(summary(lm(barpm ~ barpred, data=spatial2008.cv))$r.squared)
res[res$year=="2008", 'm1cv.PE.s'] <- print(rmse(residuals(m1.fit.2008.cv.s)))
#temporal
tempo2008.cv<-left_join(m1.2008.cv,spatial2008.cv)
tempo2008.cv$delpm <-tempo2008.cv$PM10-tempo2008.cv$barpm
tempo2008.cv$delpred <-tempo2008.cv$pred.m1.cv-tempo2008.cv$barpred
mod_temporal.cv <- lm(delpm ~ delpred, data=tempo2008.cv)
res[res$year=="2008", 'm1cv.R2.t'] <- print(summary(lm(delpm ~ delpred, data=tempo2008.cv))$r.squared)
#-------->>> loc stage
luf<-fread("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN004_LU_full_dataset/local.csv")
setnames(luf,"tden","loc.tden")
setnames(luf,"elev50","loc.elev")
#add 50m LU to CV data
setkey(m1.2008.cv,stn)
setkey(luf,stn)
m1.2008.cv.loc <- merge(m1.2008.cv, luf, all.x = T)
#m1.2008.cv.loc<-na.omit(m1.2008.cv.loc)
#create residual mp3 variable
m1.2008.cv.loc$res.m1<-m1.2008.cv.loc$PM10-m1.2008.cv.loc$pred.m1.cv
#The GAM model
gam.out<-gam(res.m1~s(loc.tden)+s(tden,MeanPbl)+s(loc.tden,WSa)+s(loc_p_os,fx=FALSE,k=4,bs='cr')+s(loc.elev,fx=FALSE,k=4,bs='cr')+s(dA1,fx=FALSE,k=4,bs='cr')+s(dsea,fx=FALSE,k=4,bs='cr'),data=m1.2008.cv.loc)
#plot(bp.model.ps)
#summary(bp.model.ps)
## reg
m1.2008.cv.loc$pred.m1.loc <-predict(gam.out)
m1.2008.cv.loc$pred.m1.both <- m1.2008.cv.loc$pred.m1.cv + m1.2008.cv.loc$pred.m1.loc
res[res$year=="2008", 'm1cv.loc.R2'] <- print(summary(lm(PM10~pred.m1.both,data=m1.2008.cv.loc))$r.squared)
res[res$year=="2008", 'm1cv.loc.I'] <-print(summary(lm(PM10~pred.m1.both,data=m1.2008.cv.loc))$coef[1,1])
res[res$year=="2008", 'm1cv.loc.I.se'] <-print(summary(lm(PM10~pred.m1.both,data=m1.2008.cv.loc))$coef[1,2])
res[res$year=="2008", 'm1cv.loc.S'] <-print(summary(lm(PM10~pred.m1.both,data=m1.2008.cv.loc))$coef[2,1])
res[res$year=="2008", 'm1cv.loc.S.se'] <-print(summary(lm(PM10~pred.m1.both,data=m1.2008.cv.loc))$coef[2,2])
#RMSPE
res[res$year=="2008", 'm1cv.loc.PE'] <- print(rmse(residuals(m1.fit.2008.cv)))
#spatial
spatial2008.cv.loc<-m1.2008.cv.loc %>%
group_by(stn) %>%
summarise(barpm = mean(PM10, na.rm=TRUE), barpred = mean(pred.m1, na.rm=TRUE))
m1.fit.2008.cv.loc.s <- lm(barpm ~ barpred, data=spatial2008.cv.loc)
res[res$year=="2008", 'm1cv.loc.R2.s'] <- print(summary(lm(barpm ~ barpred, data=spatial2008.cv.loc))$r.squared)
res[res$year=="2008", 'm1cv.loc.PE.s'] <- print(rmse(residuals(m1.fit.2008.cv.loc.s)))
#temporal
tempo2008.loc.cv<-left_join(m1.2008.cv.loc,spatial2008.cv.loc)
tempo2008.loc.cv$delpm <-tempo2008.loc.cv$PM10-tempo2008.loc.cv$barpm
tempo2008.loc.cv$delpred <-tempo2008.loc.cv$pred.m1.both-tempo2008.loc.cv$barpred
mod_temporal.loc.cv <- lm(delpm ~ delpred, data=tempo2008.loc.cv)
res[res$year=="2008", 'm1cv.loc.R2.t'] <- print(summary(lm(delpm ~ delpred, data=tempo2008.loc.cv))$r.squared)
#############save midpoint
saveRDS(res, "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/res.AQ.2008.rds")
saveRDS(res, "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/resALL.AQ.rds")
saveRDS(m1.2008.cv.loc,"/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod1.AQ.2008.predCV.rds")
###############
#MOD2
###############
m2.2008<-readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod2.AQ.2008.rds")
m2.2008[,elev.s:= scale(elev)]
m2.2008[,tden.s:= scale(tden)]
m2.2008[,pden.s:= scale(pden)]
m2.2008[,dist2A1.s:= scale(dist2A1)]
m2.2008[,dist2water.s:= scale(dist2water)]
m2.2008[,dist2rail.s:= scale(dist2rail)]
m2.2008[,Dist2road.s:= scale(Dist2road)]
m2.2008[,ndvi.s:= scale(ndvi)]
m2.2008[,MeanPbl.s:= scale(MeanPbl)]
m2.2008[,p_ind.s:= scale(p_ind)]
m2.2008[,p_for.s:= scale(p_for)]
m2.2008[,p_farm.s:= scale(p_farm)]
m2.2008[,p_dos.s:= scale(p_dos)]
m2.2008[,p_dev.s:= scale(p_dev)]
m2.2008[,p_os.s:= scale(p_os)]
m2.2008[,tempa.s:= scale(tempa)]
m2.2008[,WDa.s:= scale(WDa)]
m2.2008[,WSa.s:= scale(WSa)]
m2.2008[,RHa.s:= scale(RHa)]
m2.2008[,Raina.s:= scale(Raina)]
m2.2008[,NO2a.s:= scale(NO2a)]
#generate predictions
m2.2008[, pred.m2 := predict(object=m1.fit.2008,newdata=m2.2008,allow.new.levels=TRUE,re.form=NULL)]
describe(m2.2008$pred.m2)
#delete implossible valuesOA[24~
m2.2008 <- m2.2008[pred.m2 > 0.00000000000001 , ]
m2.2008 <- m2.2008[pred.m2 < 1500 , ]
saveRDS(m2.2008,"/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod2.AQ.2008.pred2.rds")
#-------------->prepare for mod3
m2.2008[, bimon := (m + 1) %/% 2]
setkey(m2.2008,day, aodid)
m2.2008<-m2.2008[!is.na(meanPM10)]
#run the lmer part regressing stage 2 pred Vs mean pm
#in israel check per month, also check 30km band and other methods for meanpm
m2.smooth = lme(pred.m2 ~ meanPM10,random = list(aodid= ~1 + meanPM10),control=lmeControl(opt = "optim"), data= m2.2008 )
#xm2.smooth = lmer(pred.m2 ~ meanPM10+(1+ meanPM10|aodid), data= m2.2008 )
#correlate to see everything from mod2 and the mpm works
m2.2008[, pred.t31 := predict(m2.smooth)]
m2.2008[, resid := residuals(m2.smooth)]
res[res$year=="2008", 'm3.t31'] <- print(summary(lm(pred.m2~pred.t31,data=m2.2008))$r.squared)
#split the files to the separate bi monthly datsets
T2008_bimon1 <- subset(m2.2008 ,m2.2008$bimon == "1")
T2008_bimon2 <- subset(m2.2008 ,m2.2008$bimon == "2")
T2008_bimon3 <- subset(m2.2008 ,m2.2008$bimon == "3")
T2008_bimon4 <- subset(m2.2008 ,m2.2008$bimon == "4")
T2008_bimon5 <- subset(m2.2008 ,m2.2008$bimon == "5")
T2008_bimon6 <- subset(m2.2008 ,m2.2008$bimon == "6")
#run the separate splines (smooth) for x and y for each bimon
#whats the default band (distance) that the spline goes out and uses
fit2_1 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2008_bimon1 )
fit2_2 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2008_bimon2 )
fit2_3 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2008_bimon3 )
fit2_4 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2008_bimon4 )
fit2_5 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2008_bimon5 )
fit2_6 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2008_bimon6 )
#get the predicted-fitted
Xpred_1 <- (T2008_bimon1$pred.t31 - fit2_1$fitted)
Xpred_2 <- (T2008_bimon2$pred.t31 - fit2_2$fitted)
Xpred_3 <- (T2008_bimon3$pred.t31 - fit2_3$fitted)
Xpred_4 <- (T2008_bimon4$pred.t31 - fit2_4$fitted)
Xpred_5 <- (T2008_bimon5$pred.t31 - fit2_5$fitted)
Xpred_6 <- (T2008_bimon6$pred.t31 - fit2_6$fitted)
#remerge to 1 file
m2.2008$pred.t32 <- c( Xpred_1,Xpred_2, Xpred_3, Xpred_4, Xpred_5, Xpred_6)
#this is important so that its sorted as in the first gamm
setkey(m2.2008,day, aodid)
#rerun the lme on the predictions including the spatial spline (smooth)
Final_pred_2008 <- lme(pred.t32 ~ meanPM10 ,random = list(aodid= ~1 + meanPM10 ),control=lmeControl(opt = "optim"),data= m2.2008 )
m2.2008[, pred.t33 := predict(Final_pred_2008)]
#check correlations
res[res$year=="2008", 'm3.t33'] <- print(summary(lm(pred.m2 ~ pred.t33,data=m2.2008))$r.squared)
#------------------------>>>
#import mod3
data.m3 <- readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod3.AQ.2008.rds")
#for PM10
data.m3 <- data.m3[,c(1,2,5,29:32,52,53),with=FALSE]
data.m3[, bimon := (m + 1) %/% 2]
setkey(data.m3,day, aodid)
data.m3<-data.m3[!is.na(meanPM10)]
#generate m.3 initial pred
data.m3$pred.m3.mix <- predict(Final_pred_2008,data.m3)
#create unique grid
ugrid <-data.m3 %>%
group_by(aodid) %>%
summarise(lat_aod = mean(lat_aod, na.rm=TRUE), long_aod = mean(long_aod, na.rm=TRUE),x_aod_ITM = mean(x_aod_ITM, na.rm=TRUE), y_aod_ITM = mean(y_aod_ITM, na.rm=TRUE))
#### PREDICT Gam part
#split back into bimons to include the gam prediction in final prediction
data.m3_bimon1 <- data.m3[bimon == 1, ]
data.m3_bimon2 <- data.m3[bimon == 2, ]
data.m3_bimon3 <- data.m3[bimon == 3, ]
data.m3_bimon4 <- data.m3[bimon == 4, ]
data.m3_bimon5 <- data.m3[bimon == 5, ]
data.m3_bimon6 <- data.m3[bimon == 6, ]
#addin unique grid to each bimon
uniq_gid_bimon1 <- ugrid
uniq_gid_bimon2 <- ugrid
uniq_gid_bimon3 <- ugrid
uniq_gid_bimon4 <- ugrid
uniq_gid_bimon5 <- ugrid
uniq_gid_bimon6 <- ugrid
#get predictions for Bimon residuals
uniq_gid_bimon1$gpred <- predict.gam(fit2_1,uniq_gid_bimon1)
uniq_gid_bimon2$gpred <- predict.gam(fit2_2,uniq_gid_bimon2)
uniq_gid_bimon3$gpred <- predict.gam(fit2_3,uniq_gid_bimon3)
uniq_gid_bimon4$gpred <- predict.gam(fit2_4,uniq_gid_bimon4)
uniq_gid_bimon5$gpred <- predict.gam(fit2_5,uniq_gid_bimon5)
uniq_gid_bimon6$gpred <- predict.gam(fit2_6,uniq_gid_bimon6)
#merge things back togheter
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> merges
setkey(uniq_gid_bimon1,aodid)
setkey(data.m3_bimon1,aodid)
data.m3_bimon1 <- merge(data.m3_bimon1, uniq_gid_bimon1[,list(aodid,gpred)], all.x = T)
setkey(uniq_gid_bimon2,aodid)
setkey(data.m3_bimon2,aodid)
data.m3_bimon2 <- merge(data.m3_bimon2, uniq_gid_bimon2[,list(aodid,gpred)], all.x = T)
setkey(uniq_gid_bimon3,aodid)
setkey(data.m3_bimon3,aodid)
data.m3_bimon3 <- merge(data.m3_bimon3, uniq_gid_bimon3[,list(aodid,gpred)], all.x = T)
setkey(uniq_gid_bimon4,aodid)
setkey(data.m3_bimon4,aodid)
data.m3_bimon4 <- merge(data.m3_bimon4, uniq_gid_bimon4[,list(aodid,gpred)], all.x = T)
setkey(uniq_gid_bimon5,aodid)
setkey(data.m3_bimon5,aodid)
data.m3_bimon5 <- merge(data.m3_bimon5, uniq_gid_bimon5[,list(aodid,gpred)], all.x = T)
setkey(uniq_gid_bimon6,aodid)
setkey(data.m3_bimon6,aodid)
data.m3_bimon6 <- merge(data.m3_bimon6, uniq_gid_bimon6[,list(aodid,gpred)], all.x = T)
#reattach all parts
mod3 <- rbind(data.m3_bimon1,data.m3_bimon2,data.m3_bimon3,data.m3_bimon4,data.m3_bimon5,data.m3_bimon6)
# create pred.m3
mod3$pred.m3 <-mod3$pred.m3.mix+mod3$gpred
#hist(mod3$pred.m3)
#describe(mod3$pred.m3)
#recode negative into zero
mod3 <- mod3[pred.m3 >= 0]
saveRDS(mod3,"/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod3.AQ.2008.pred.rds")
#clean
keep(mod3,res,rmse, sure=TRUE)
gc()
#########################
#prepare for m3.R2
#########################
#load mod1
mod1<- readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod1.AQ.2008.pred.rds")
mod1[,aodid:= paste(mod1$long_aod,mod1$lat_aod,sep="-")]
mod1<-mod1[,c("aodid","day","PM10","pred.m1","stn"),with=FALSE]
#R2.m3
setkey(mod3,day,aodid)
setkey(mod1,day,aodid)
m1.2008 <- merge(mod1,mod3[, list(day,aodid,pred.m3)], all.x = T)
m3.fit.2008<- summary(lm(PM10~pred.m3,data=m1.2008))
res[res$year=="2008", 'm3.R2'] <- print(summary(lm(PM10~pred.m3,data=m1.2008))$r.squared)
#RMSPE
res[res$year=="2008", 'm3.PE'] <- print(rmse(residuals(m3.fit.2008)))
#spatial
###to check
spatial2008<-m1.2008 %>%
group_by(stn) %>%
summarise(barpm = mean(PM10, na.rm=TRUE), barpred = mean(pred.m3, na.rm=TRUE))
m1.fit.2008.spat<- lm(barpm ~ barpred, data=spatial2008)
res[res$year=="2008", 'm3.R2.s'] <- print(summary(lm(barpm ~ barpred, data=spatial2008))$r.squared)
res[res$year=="2008", 'm3.PE.s'] <- print(rmse(residuals(m1.fit.2008.spat)))
#temporal
tempo2008<-left_join(m1.2008,spatial2008)
tempo2008$delpm <-tempo2008$PM10-tempo2008$barpm
tempo2008$delpred <-tempo2008$pred.m3-tempo2008$barpred
mod_temporal <- lm(delpm ~ delpred, data=tempo2008)
res[res$year=="2008", 'm3.R2.t'] <- print(summary(lm(delpm ~ delpred, data=tempo2008))$r.squared)
#############save midpoint
saveRDS(res, "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/res.AQ.2008.rds")
saveRDS(res, "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/resALL.AQ.rds")
#########################
#import mod2
mod2<- readRDS( "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod2.AQ.2008.pred2.rds")
mod2<-mod2[,c("aodid","day","pred.m2"),with=FALSE]
#----------------> store the best available
mod3best <- mod3[, list(aodid, x_aod_ITM, y_aod_ITM, day, pred.m3)]
setkey(mod3best, day, aodid)
setkey(mod2, day, aodid)
mod3best <- merge(mod3best, mod2[,list(aodid, day, pred.m2)], all.x = T)
#reload mod1
mod1<- readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod1.AQ.2008.pred.rds")
mod1$aodid<-paste(mod1$long_aod,mod1$lat_aod,sep="-")
mod1<-mod1[,c("aodid","day","PM10","pred.m1"),with=FALSE]
setkey(mod1,day,aodid)
mod3best <- merge(mod3best, mod1, all.x = T)
mod3best[,bestpred := pred.m3]
mod3best[!is.na(pred.m2),bestpred := pred.m2]
mod3best[!is.na(pred.m1),bestpred := pred.m1]
#save
saveRDS(mod3best,"/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod3.AQ.2008.FINAL.rds")
#save for GIS
write.csv(mod3best[, list(LTPM = mean(bestpred, na.rm = T),
predvariance = var(bestpred, na.rm = T),
predmin = min(bestpred, na.rm = T),
predmax = max(bestpred, na.rm = T),
npred = sum(!is.na(bestpred)),
npred.m1 = sum(!is.na(pred.m1)),
npred.m2 = sum(!is.na(pred.m2)),
npred.m3 = sum(!is.na(pred.m3)),
x_aod_ITM = x_aod_ITM[1], y_aod_ITM = y_aod_ITM[1]),by=aodid], "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/LTPM3.AQ.2008.csv", row.names = F)
keep(res, sure=TRUE)
c()
###############
#LIBS
###############
library(lme4);library(reshape);library(foreign) ;library(ggplot2);library(plyr);library(data.table);library(reshape2);library(Hmisc);library(mgcv);library(gdata);library(car);library(dplyr);library(ggmap);library(broom);library(splines)
#sourcing
source("/media/NAS/Uni/org/files/Uni/Projects/code/$Rsnips/CV_splits.r")
source("/media/NAS/Uni/org/files/Uni/Projects/code/$Rsnips/rmspe.r")
#if needed load res table
#res<- readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/resALL.AQ.rds")
### import data
m1.2009 <-readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod1.AQ.2009.rds")
################# clean BAD STN PM10 and check if improved model?
raWDaf <- ddply(m1.2009, c( "stn"),
function(x) {
mod1 <- lm(PM10 ~ aod, data=x)
data.frame(R2 = round(summary(mod1)$r.squared, 5),
nsamps = length(summary(mod1)$resid))
})
raWDaf
raWDaf<-as.data.table(raWDaf)
bad<- raWDaf[R2< 0.01]
bad[,badid := paste(stn,sep="-")]
#################BAD STN
m1.2009[,badid := paste(stn,sep="-")]
####Take out bad stations
m1.2009 <- m1.2009[!(m1.2009$badid %in% bad$badid), ]
#scale vars
m1.2009[,elev.s:= scale(elev)]
m1.2009[,tden.s:= scale(tden)]
m1.2009[,pden.s:= scale(pden)]
m1.2009[,dist2A1.s:= scale(dist2A1)]
m1.2009[,dist2water.s:= scale(dist2water)]
m1.2009[,dist2rail.s:= scale(dist2rail)]
m1.2009[,Dist2road.s:= scale(Dist2road)]
m1.2009[,ndvi.s:= scale(ndvi)]
m1.2009[,MeanPbl.s:= scale(MeanPbl)]
m1.2009[,p_ind.s:= scale(p_ind)]
m1.2009[,p_for.s:= scale(p_for)]
m1.2009[,p_farm.s:= scale(p_farm)]
m1.2009[,p_dos.s:= scale(p_dos)]
m1.2009[,p_dev.s:= scale(p_dev)]
m1.2009[,p_os.s:= scale(p_os)]
m1.2009[,tempa.s:= scale(tempa)]
m1.2009[,WDa.s:= scale(WDa)]
m1.2009[,WSa.s:= scale(WSa)]
m1.2009[,RHa.s:= scale(RHa)]
m1.2009[,Raina.s:= scale(Raina)]
m1.2009[,NO2a.s:= scale(NO2a)]
m1.formula <- as.formula(PM10~ aod
+tempa.s+WDa.s+WSa.s+Dust+MeanPbl.s #temporal
+elev.s+tden.s+pden.s+Dist2road.s+ndvi.s #spatial
+p_os.s #+p_dev.s+p_dos.s+p_farm.s+p_for.s+p_ind.s #land use
#+aod*Dust #interactions
+(1+aod|day/reg_num)) #+(1|stn) !!! stn screws up mod3
#full fit
m1.fit.2009 <- lmer(m1.formula,data=m1.2009,weights=normwt)
m1.2009$pred.m1 <- predict(m1.fit.2009)
res[res$year=="2009", 'm1.R2'] <- print(summary(lm(PM10~pred.m1,data=m1.2009))$r.squared)
#RMSPE
res[res$year=="2009", 'm1.PE'] <- print(rmse(residuals(m1.fit.2009)))
#spatial
###to check
spatial2009<-m1.2009 %>%
group_by(stn) %>%
summarise(barpm = mean(PM10, na.rm=TRUE), barpred = mean(pred.m1, na.rm=TRUE))
m1.fit.2009.spat<- lm(barpm ~ barpred, data=spatial2009)
res[res$year=="2009", 'm1.R2.s'] <- print(summary(lm(barpm ~ barpred, data=spatial2009))$r.squared)
res[res$year=="2009", 'm1.PE.s'] <- print(rmse(residuals(m1.fit.2009.spat)))
#temporal
tempo2009<-left_join(m1.2009,spatial2009)
tempo2009$delpm <-tempo2009$PM10-tempo2009$barpm
tempo2009$delpred <-tempo2009$pred.m1-tempo2009$barpred
mod_temporal <- lm(delpm ~ delpred, data=tempo2009)
res[res$year=="2009", 'm1.R2.t'] <- print(summary(lm(delpm ~ delpred, data=tempo2009))$r.squared)
saveRDS(m1.2009,"/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod1.AQ.2009.pred.rds")
#---------------->>>> CV
#s1
splits_s1 <- splitdf(m1.2009)
test_s1 <- splits_s1$testset
train_s1 <- splits_s1$trainset
out_train_s1 <- lmer(m1.formula,data = train_s1,weights=normwt)
test_s1$pred.m1.cv <- predict(object=out_train_s1 ,newdata=test_s1,allow.new.levels=TRUE,re.form=NULL )
test_s1$iter<-"s1"
#s2
splits_s2 <- splitdf(m1.2009)
test_s2 <- splits_s2$testset
train_s2 <- splits_s2$trainset
out_train_s2 <- lmer(m1.formula,data = train_s2,weights=normwt)
test_s2$pred.m1.cv <- predict(object=out_train_s2 ,newdata=test_s2,allow.new.levels=TRUE,re.form=NULL )
test_s2$iter<-"s2"
#s3
splits_s3 <- splitdf(m1.2009)
test_s3 <- splits_s3$testset
train_s3 <- splits_s3$trainset
out_train_s3 <- lmer(m1.formula,data = train_s3,weights=normwt)
test_s3$pred.m1.cv <- predict(object=out_train_s3 ,newdata=test_s3,allow.new.levels=TRUE,re.form=NULL )
test_s3$iter<-"s3"
#s4
splits_s4 <- splitdf(m1.2009)
test_s4 <- splits_s4$testset
train_s4 <- splits_s4$trainset
out_train_s4 <- lmer(m1.formula,data = train_s4,weights=normwt)
test_s4$pred.m1.cv <- predict(object=out_train_s4 ,newdata=test_s4,allow.new.levels=TRUE,re.form=NULL )
test_s4$iter<-"s4"
#s5
splits_s5 <- splitdf(m1.2009)
test_s5 <- splits_s5$testset
train_s5 <- splits_s5$trainset
out_train_s5 <- lmer(m1.formula,data = train_s5,weights=normwt)
test_s5$pred.m1.cv <- predict(object=out_train_s5 ,newdata=test_s5,allow.new.levels=TRUE,re.form=NULL )
test_s5$iter<-"s5"
#s6
splits_s6 <- splitdf(m1.2009)
test_s6 <- splits_s6$testset
train_s6 <- splits_s6$trainset
out_train_s6 <- lmer(m1.formula,data = train_s6,weights=normwt)
test_s6$pred.m1.cv <- predict(object=out_train_s6 ,newdata=test_s6,allow.new.levels=TRUE,re.form=NULL )
test_s6$iter<-"s6"
#s7
splits_s7 <- splitdf(m1.2009)
test_s7 <- splits_s7$testset
train_s7 <- splits_s7$trainset
out_train_s7 <- lmer(m1.formula,data = train_s7,weights=normwt)
test_s7$pred.m1.cv <- predict(object=out_train_s7 ,newdata=test_s7,allow.new.levels=TRUE,re.form=NULL )
test_s7$iter<-"s7"
#s8
splits_s8 <- splitdf(m1.2009)
test_s8 <- splits_s8$testset
train_s8 <- splits_s8$trainset
out_train_s8 <- lmer(m1.formula,data = train_s8,weights=normwt)
test_s8$pred.m1.cv <- predict(object=out_train_s8 ,newdata=test_s8,allow.new.levels=TRUE,re.form=NULL )
test_s8$iter<-"s8"
#s9
splits_s9 <- splitdf(m1.2009)
test_s9 <- splits_s9$testset
train_s9 <- splits_s9$trainset
out_train_s9 <- lmer(m1.formula,data = train_s9,weights=normwt)
test_s9$pred.m1.cv <- predict(object=out_train_s9 ,newdata=test_s9,allow.new.levels=TRUE,re.form=NULL )
test_s9$iter<-"s9"
#s10
splits_s10 <- splitdf(m1.2009)
test_s10 <- splits_s10$testset
train_s10 <- splits_s10$trainset
out_train_s10 <- lmer(m1.formula,data = train_s10,weights=normwt)
test_s10$pred.m1.cv <- predict(object=out_train_s10 ,newdata=test_s10,allow.new.levels=TRUE,re.form=NULL )
test_s10$iter<-"s10"
#BIND 1 dataset
m1.2009.cv<- data.table(rbind(test_s1,test_s2,test_s3,test_s4,test_s5,test_s6,test_s7,test_s8,test_s9, test_s10))
# cleanup (remove from WS) objects from CV
rm(list = ls(pattern = "train_|test_"))
#table updates
m1.fit.2009.cv<-lm(PM10~pred.m1.cv,data=m1.2009.cv)
res[res$year=="2009", 'm1cv.R2'] <- print(summary(lm(PM10~pred.m1.cv,data=m1.2009.cv))$r.squared)
res[res$year=="2009", 'm1cv.I'] <-print(summary(lm(PM10~pred.m1.cv,data=m1.2009.cv))$coef[1,1])
res[res$year=="2009", 'm1cv.I.se'] <-print(summary(lm(PM10~pred.m1.cv,data=m1.2009.cv))$coef[1,2])
res[res$year=="2009", 'm1cv.S'] <-print(summary(lm(PM10~pred.m1.cv,data=m1.2009.cv))$coef[2,1])
res[res$year=="2009", 'm1cv.S.se'] <-print(summary(lm(PM10~pred.m1.cv,data=m1.2009.cv))$coef[2,2])
#RMSPE
res[res$year=="2009", 'm1cv.PE'] <- print(rmse(residuals(m1.fit.2009.cv)))
#spatial
spatial2009.cv<-m1.2009.cv %>%
group_by(stn) %>%
summarise(barpm = mean(PM10, na.rm=TRUE), barpred = mean(pred.m1, na.rm=TRUE))
m1.fit.2009.cv.s <- lm(barpm ~ barpred, data=spatial2009.cv)
res[res$year=="2009", 'm1cv.R2.s'] <- print(summary(lm(barpm ~ barpred, data=spatial2009.cv))$r.squared)
res[res$year=="2009", 'm1cv.PE.s'] <- print(rmse(residuals(m1.fit.2009.cv.s)))
#temporal
tempo2009.cv<-left_join(m1.2009.cv,spatial2009.cv)
tempo2009.cv$delpm <-tempo2009.cv$PM10-tempo2009.cv$barpm
tempo2009.cv$delpred <-tempo2009.cv$pred.m1.cv-tempo2009.cv$barpred
mod_temporal.cv <- lm(delpm ~ delpred, data=tempo2009.cv)
res[res$year=="2009", 'm1cv.R2.t'] <- print(summary(lm(delpm ~ delpred, data=tempo2009.cv))$r.squared)
#-------->>> loc stage
luf<-fread("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN004_LU_full_dataset/local.csv")
setnames(luf,"tden","loc.tden")
setnames(luf,"elev50","loc.elev")
#add 50m LU to CV data
setkey(m1.2009.cv,stn)
setkey(luf,stn)
m1.2009.cv.loc <- merge(m1.2009.cv, luf, all.x = T)
#m1.2009.cv.loc<-na.omit(m1.2009.cv.loc)
#create residual mp3 variable
m1.2009.cv.loc$res.m1<-m1.2009.cv.loc$PM10-m1.2009.cv.loc$pred.m1.cv
#The GAM model
gam.out<-gam(res.m1~s(loc.tden)+s(tden,MeanPbl)+s(loc.tden,WSa)+s(loc_p_os,fx=FALSE,k=4,bs='cr')+s(loc.elev,fx=FALSE,k=4,bs='cr')+s(dA1,fx=FALSE,k=4,bs='cr')+s(dsea,fx=FALSE,k=4,bs='cr'),data=m1.2009.cv.loc)
#plot(bp.model.ps)
#summary(bp.model.ps)
## reg
m1.2009.cv.loc$pred.m1.loc <-predict(gam.out)
m1.2009.cv.loc$pred.m1.both <- m1.2009.cv.loc$pred.m1.cv + m1.2009.cv.loc$pred.m1.loc
res[res$year=="2009", 'm1cv.loc.R2'] <- print(summary(lm(PM10~pred.m1.both,data=m1.2009.cv.loc))$r.squared)
res[res$year=="2009", 'm1cv.loc.I'] <-print(summary(lm(PM10~pred.m1.both,data=m1.2009.cv.loc))$coef[1,1])
res[res$year=="2009", 'm1cv.loc.I.se'] <-print(summary(lm(PM10~pred.m1.both,data=m1.2009.cv.loc))$coef[1,2])
res[res$year=="2009", 'm1cv.loc.S'] <-print(summary(lm(PM10~pred.m1.both,data=m1.2009.cv.loc))$coef[2,1])
res[res$year=="2009", 'm1cv.loc.S.se'] <-print(summary(lm(PM10~pred.m1.both,data=m1.2009.cv.loc))$coef[2,2])
#RMSPE
res[res$year=="2009", 'm1cv.loc.PE'] <- print(rmse(residuals(m1.fit.2009.cv)))
#spatial
spatial2009.cv.loc<-m1.2009.cv.loc %>%
group_by(stn) %>%
summarise(barpm = mean(PM10, na.rm=TRUE), barpred = mean(pred.m1, na.rm=TRUE))
m1.fit.2009.cv.loc.s <- lm(barpm ~ barpred, data=spatial2009.cv.loc)
res[res$year=="2009", 'm1cv.loc.R2.s'] <- print(summary(lm(barpm ~ barpred, data=spatial2009.cv.loc))$r.squared)
res[res$year=="2009", 'm1cv.loc.PE.s'] <- print(rmse(residuals(m1.fit.2009.cv.loc.s)))
#temporal
tempo2009.loc.cv<-left_join(m1.2009.cv.loc,spatial2009.cv.loc)
tempo2009.loc.cv$delpm <-tempo2009.loc.cv$PM10-tempo2009.loc.cv$barpm
tempo2009.loc.cv$delpred <-tempo2009.loc.cv$pred.m1.both-tempo2009.loc.cv$barpred
mod_temporal.loc.cv <- lm(delpm ~ delpred, data=tempo2009.loc.cv)
res[res$year=="2009", 'm1cv.loc.R2.t'] <- print(summary(lm(delpm ~ delpred, data=tempo2009.loc.cv))$r.squared)
#############save midpoint
saveRDS(res, "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/res.AQ.2009.rds")
saveRDS(res, "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/resALL.AQ.rds")
saveRDS(m1.2009.cv.loc,"/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod1.AQ.2009.predCV.rds")
###############
#MOD2
###############
m2.2009<-readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod2.AQ.2009.rds")
m2.2009[,elev.s:= scale(elev)]
m2.2009[,tden.s:= scale(tden)]
m2.2009[,pden.s:= scale(pden)]
m2.2009[,dist2A1.s:= scale(dist2A1)]
m2.2009[,dist2water.s:= scale(dist2water)]
m2.2009[,dist2rail.s:= scale(dist2rail)]
m2.2009[,Dist2road.s:= scale(Dist2road)]
m2.2009[,ndvi.s:= scale(ndvi)]
m2.2009[,MeanPbl.s:= scale(MeanPbl)]
m2.2009[,p_ind.s:= scale(p_ind)]
m2.2009[,p_for.s:= scale(p_for)]
m2.2009[,p_farm.s:= scale(p_farm)]
m2.2009[,p_dos.s:= scale(p_dos)]
m2.2009[,p_dev.s:= scale(p_dev)]
m2.2009[,p_os.s:= scale(p_os)]
m2.2009[,tempa.s:= scale(tempa)]
m2.2009[,WDa.s:= scale(WDa)]
m2.2009[,WSa.s:= scale(WSa)]
m2.2009[,RHa.s:= scale(RHa)]
m2.2009[,Raina.s:= scale(Raina)]
m2.2009[,NO2a.s:= scale(NO2a)]
#generate predictions
m2.2009[, pred.m2 := predict(object=m1.fit.2009,newdata=m2.2009,allow.new.levels=TRUE,re.form=NULL)]
describe(m2.2009$pred.m2)
#delete implossible valuesOA[24~
m2.2009 <- m2.2009[pred.m2 > 0.00000000000001 , ]
m2.2009 <- m2.2009[pred.m2 < 1500 , ]
saveRDS(m2.2009,"/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod2.AQ.2009.pred2.rds")
#-------------->prepare for mod3
m2.2009[, bimon := (m + 1) %/% 2]
setkey(m2.2009,day, aodid)
m2.2009<-m2.2009[!is.na(meanPM10)]
#run the lmer part regressing stage 2 pred Vs mean pm
#in israel check per month, also check 30km band and other methods for meanpm
m2.smooth = lme(pred.m2 ~ meanPM10,random = list(aodid= ~1 + meanPM10),control=lmeControl(opt = "optim"), data= m2.2009 )
#xm2.smooth = lmer(pred.m2 ~ meanPM10+(1+ meanPM10|aodid), data= m2.2009 )
#correlate to see everything from mod2 and the mpm works
m2.2009[, pred.t31 := predict(m2.smooth)]
m2.2009[, resid := residuals(m2.smooth)]
res[res$year=="2009", 'm3.t31'] <- print(summary(lm(pred.m2~pred.t31,data=m2.2009))$r.squared)
#split the files to the separate bi monthly datsets
T2009_bimon1 <- subset(m2.2009 ,m2.2009$bimon == "1")
T2009_bimon2 <- subset(m2.2009 ,m2.2009$bimon == "2")
T2009_bimon3 <- subset(m2.2009 ,m2.2009$bimon == "3")
T2009_bimon4 <- subset(m2.2009 ,m2.2009$bimon == "4")
T2009_bimon5 <- subset(m2.2009 ,m2.2009$bimon == "5")
T2009_bimon6 <- subset(m2.2009 ,m2.2009$bimon == "6")
#run the separate splines (smooth) for x and y for each bimon
#whats the default band (distance) that the spline goes out and uses
fit2_1 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2009_bimon1 )
fit2_2 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2009_bimon2 )
fit2_3 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2009_bimon3 )
fit2_4 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2009_bimon4 )
fit2_5 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2009_bimon5 )
fit2_6 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2009_bimon6 )
#get the predicted-fitted
Xpred_1 <- (T2009_bimon1$pred.t31 - fit2_1$fitted)
Xpred_2 <- (T2009_bimon2$pred.t31 - fit2_2$fitted)
Xpred_3 <- (T2009_bimon3$pred.t31 - fit2_3$fitted)
Xpred_4 <- (T2009_bimon4$pred.t31 - fit2_4$fitted)
Xpred_5 <- (T2009_bimon5$pred.t31 - fit2_5$fitted)
Xpred_6 <- (T2009_bimon6$pred.t31 - fit2_6$fitted)
#remerge to 1 file
m2.2009$pred.t32 <- c( Xpred_1,Xpred_2, Xpred_3, Xpred_4, Xpred_5, Xpred_6)
#this is important so that its sorted as in the first gamm
setkey(m2.2009,day, aodid)
#rerun the lme on the predictions including the spatial spline (smooth)
Final_pred_2009 <- lme(pred.t32 ~ meanPM10 ,random = list(aodid= ~1 + meanPM10 ),control=lmeControl(opt = "optim"),data= m2.2009 )
m2.2009[, pred.t33 := predict(Final_pred_2009)]
#check correlations
res[res$year=="2009", 'm3.t33'] <- print(summary(lm(pred.m2 ~ pred.t33,data=m2.2009))$r.squared)
#------------------------>>>
#import mod3
data.m3 <- readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod3.AQ.2009.rds")
#for PM10
data.m3 <- data.m3[,c(1,2,5,29:32,52,53),with=FALSE]
data.m3[, bimon := (m + 1) %/% 2]
setkey(data.m3,day, aodid)
data.m3<-data.m3[!is.na(meanPM10)]
#generate m.3 initial pred
data.m3$pred.m3.mix <- predict(Final_pred_2009,data.m3)
#create unique grid
ugrid <-data.m3 %>%
group_by(aodid) %>%
summarise(lat_aod = mean(lat_aod, na.rm=TRUE), long_aod = mean(long_aod, na.rm=TRUE),x_aod_ITM = mean(x_aod_ITM, na.rm=TRUE), y_aod_ITM = mean(y_aod_ITM, na.rm=TRUE))
#### PREDICT Gam part
#split back into bimons to include the gam prediction in final prediction
data.m3_bimon1 <- data.m3[bimon == 1, ]
data.m3_bimon2 <- data.m3[bimon == 2, ]
data.m3_bimon3 <- data.m3[bimon == 3, ]
data.m3_bimon4 <- data.m3[bimon == 4, ]
data.m3_bimon5 <- data.m3[bimon == 5, ]
data.m3_bimon6 <- data.m3[bimon == 6, ]
#addin unique grid to each bimon
uniq_gid_bimon1 <- ugrid
uniq_gid_bimon2 <- ugrid
uniq_gid_bimon3 <- ugrid
uniq_gid_bimon4 <- ugrid
uniq_gid_bimon5 <- ugrid
uniq_gid_bimon6 <- ugrid
#get predictions for Bimon residuals
uniq_gid_bimon1$gpred <- predict.gam(fit2_1,uniq_gid_bimon1)
uniq_gid_bimon2$gpred <- predict.gam(fit2_2,uniq_gid_bimon2)
uniq_gid_bimon3$gpred <- predict.gam(fit2_3,uniq_gid_bimon3)
uniq_gid_bimon4$gpred <- predict.gam(fit2_4,uniq_gid_bimon4)
uniq_gid_bimon5$gpred <- predict.gam(fit2_5,uniq_gid_bimon5)
uniq_gid_bimon6$gpred <- predict.gam(fit2_6,uniq_gid_bimon6)
#merge things back togheter
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> merges
setkey(uniq_gid_bimon1,aodid)
setkey(data.m3_bimon1,aodid)
data.m3_bimon1 <- merge(data.m3_bimon1, uniq_gid_bimon1[,list(aodid,gpred)], all.x = T)
setkey(uniq_gid_bimon2,aodid)
setkey(data.m3_bimon2,aodid)
data.m3_bimon2 <- merge(data.m3_bimon2, uniq_gid_bimon2[,list(aodid,gpred)], all.x = T)
setkey(uniq_gid_bimon3,aodid)
setkey(data.m3_bimon3,aodid)
data.m3_bimon3 <- merge(data.m3_bimon3, uniq_gid_bimon3[,list(aodid,gpred)], all.x = T)
setkey(uniq_gid_bimon4,aodid)
setkey(data.m3_bimon4,aodid)
data.m3_bimon4 <- merge(data.m3_bimon4, uniq_gid_bimon4[,list(aodid,gpred)], all.x = T)
setkey(uniq_gid_bimon5,aodid)
setkey(data.m3_bimon5,aodid)
data.m3_bimon5 <- merge(data.m3_bimon5, uniq_gid_bimon5[,list(aodid,gpred)], all.x = T)
setkey(uniq_gid_bimon6,aodid)
setkey(data.m3_bimon6,aodid)
data.m3_bimon6 <- merge(data.m3_bimon6, uniq_gid_bimon6[,list(aodid,gpred)], all.x = T)
#reattach all parts
mod3 <- rbind(data.m3_bimon1,data.m3_bimon2,data.m3_bimon3,data.m3_bimon4,data.m3_bimon5,data.m3_bimon6)
# create pred.m3
mod3$pred.m3 <-mod3$pred.m3.mix+mod3$gpred
#hist(mod3$pred.m3)
#describe(mod3$pred.m3)
#recode negative into zero
mod3 <- mod3[pred.m3 >= 0]
saveRDS(mod3,"/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod3.AQ.2009.pred.rds")
#clean
keep(mod3,res,rmse, sure=TRUE)
gc()
#########################
#prepare for m3.R2
#########################
#load mod1
mod1<- readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod1.AQ.2009.pred.rds")
mod1[,aodid:= paste(mod1$long_aod,mod1$lat_aod,sep="-")]
mod1<-mod1[,c("aodid","day","PM10","pred.m1","stn"),with=FALSE]
#R2.m3
setkey(mod3,day,aodid)
setkey(mod1,day,aodid)
m1.2009 <- merge(mod1,mod3[, list(day,aodid,pred.m3)], all.x = T)
m3.fit.2009<- summary(lm(PM10~pred.m3,data=m1.2009))
res[res$year=="2009", 'm3.R2'] <- print(summary(lm(PM10~pred.m3,data=m1.2009))$r.squared)
#RMSPE
res[res$year=="2009", 'm3.PE'] <- print(rmse(residuals(m3.fit.2009)))
#spatial
###to check
spatial2009<-m1.2009 %>%
group_by(stn) %>%
summarise(barpm = mean(PM10, na.rm=TRUE), barpred = mean(pred.m3, na.rm=TRUE))
m1.fit.2009.spat<- lm(barpm ~ barpred, data=spatial2009)
res[res$year=="2009", 'm3.R2.s'] <- print(summary(lm(barpm ~ barpred, data=spatial2009))$r.squared)
res[res$year=="2009", 'm3.PE.s'] <- print(rmse(residuals(m1.fit.2009.spat)))
#temporal
tempo2009<-left_join(m1.2009,spatial2009)
tempo2009$delpm <-tempo2009$PM10-tempo2009$barpm
tempo2009$delpred <-tempo2009$pred.m3-tempo2009$barpred
mod_temporal <- lm(delpm ~ delpred, data=tempo2009)
res[res$year=="2009", 'm3.R2.t'] <- print(summary(lm(delpm ~ delpred, data=tempo2009))$r.squared)
#############save midpoint
saveRDS(res, "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/res.AQ.2009.rds")
saveRDS(res, "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/resALL.AQ.rds")
#########################
#import mod2
mod2<- readRDS( "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod2.AQ.2009.pred2.rds")
mod2<-mod2[,c("aodid","day","pred.m2"),with=FALSE]
#----------------> store the best available
mod3best <- mod3[, list(aodid, x_aod_ITM, y_aod_ITM, day, pred.m3)]
setkey(mod3best, day, aodid)
setkey(mod2, day, aodid)
mod3best <- merge(mod3best, mod2[,list(aodid, day, pred.m2)], all.x = T)
#reload mod1
mod1<- readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod1.AQ.2009.pred.rds")
mod1$aodid<-paste(mod1$long_aod,mod1$lat_aod,sep="-")
mod1<-mod1[,c("aodid","day","PM10","pred.m1"),with=FALSE]
setkey(mod1,day,aodid)
mod3best <- merge(mod3best, mod1, all.x = T)
mod3best[,bestpred := pred.m3]
mod3best[!is.na(pred.m2),bestpred := pred.m2]
mod3best[!is.na(pred.m1),bestpred := pred.m1]
#save
saveRDS(mod3best,"/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod3.AQ.2009.FINAL.rds")
#save for GIS
write.csv(mod3best[, list(LTPM = mean(bestpred, na.rm = T),
predvariance = var(bestpred, na.rm = T),
predmin = min(bestpred, na.rm = T),
predmax = max(bestpred, na.rm = T),
npred = sum(!is.na(bestpred)),
npred.m1 = sum(!is.na(pred.m1)),
npred.m2 = sum(!is.na(pred.m2)),
npred.m3 = sum(!is.na(pred.m3)),
x_aod_ITM = x_aod_ITM[1], y_aod_ITM = y_aod_ITM[1]),by=aodid], "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/LTPM3.AQ.2009.csv", row.names = F)
keep(res, sure=TRUE)
c()
###############
#LIBS
###############
library(lme4);library(reshape);library(foreign) ;library(ggplot2);library(plyr);library(data.table);library(reshape2);library(Hmisc);library(mgcv);library(gdata);library(car);library(dplyr);library(ggmap);library(broom);library(splines)
#sourcing
source("/media/NAS/Uni/org/files/Uni/Projects/code/$Rsnips/CV_splits.r")
source("/media/NAS/Uni/org/files/Uni/Projects/code/$Rsnips/rmspe.r")
#if needed load res table
#res<- readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/resALL.AQ.rds")
### import data
m1.2010 <-readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod1.AQ.2010.rds")
################# clean BAD STN PM10 and check if improved model?
raWDaf <- ddply(m1.2010, c( "stn"),
function(x) {
mod1 <- lm(PM10 ~ aod, data=x)
data.frame(R2 = round(summary(mod1)$r.squared, 5),
nsamps = length(summary(mod1)$resid))
})
raWDaf
raWDaf<-as.data.table(raWDaf)
bad<- raWDaf[R2< 0.01]
bad[,badid := paste(stn,sep="-")]
#################BAD STN
m1.2010[,badid := paste(stn,sep="-")]
####Take out bad stations
m1.2010 <- m1.2010[!(m1.2010$badid %in% bad$badid), ]
#scale vars
m1.2010[,elev.s:= scale(elev)]
m1.2010[,tden.s:= scale(tden)]
m1.2010[,pden.s:= scale(pden)]
m1.2010[,dist2A1.s:= scale(dist2A1)]
m1.2010[,dist2water.s:= scale(dist2water)]
m1.2010[,dist2rail.s:= scale(dist2rail)]
m1.2010[,Dist2road.s:= scale(Dist2road)]
m1.2010[,ndvi.s:= scale(ndvi)]
m1.2010[,MeanPbl.s:= scale(MeanPbl)]
m1.2010[,p_ind.s:= scale(p_ind)]
m1.2010[,p_for.s:= scale(p_for)]
m1.2010[,p_farm.s:= scale(p_farm)]
m1.2010[,p_dos.s:= scale(p_dos)]
m1.2010[,p_dev.s:= scale(p_dev)]
m1.2010[,p_os.s:= scale(p_os)]
m1.2010[,tempa.s:= scale(tempa)]
m1.2010[,WDa.s:= scale(WDa)]
m1.2010[,WSa.s:= scale(WSa)]
m1.2010[,RHa.s:= scale(RHa)]
m1.2010[,Raina.s:= scale(Raina)]
m1.2010[,NO2a.s:= scale(NO2a)]
m1.formula <- as.formula(PM10~ aod
+tempa.s+WDa.s+WSa.s+Dust+MeanPbl.s #temporal
+elev.s+tden.s+pden.s+Dist2road.s+ndvi.s #spatial
+p_os.s #+p_dev.s+p_dos.s+p_farm.s+p_for.s+p_ind.s #land use
#+aod*Dust #interactions
+(1+aod|day/reg_num)) #+(1|stn) !!! stn screws up mod3
#full fit
m1.fit.2010 <- lmer(m1.formula,data=m1.2010,weights=normwt)
m1.2010$pred.m1 <- predict(m1.fit.2010)
res[res$year=="2010", 'm1.R2'] <- print(summary(lm(PM10~pred.m1,data=m1.2010))$r.squared)
#RMSPE
res[res$year=="2010", 'm1.PE'] <- print(rmse(residuals(m1.fit.2010)))
#spatial
###to check
spatial2010<-m1.2010 %>%
group_by(stn) %>%
summarise(barpm = mean(PM10, na.rm=TRUE), barpred = mean(pred.m1, na.rm=TRUE))
m1.fit.2010.spat<- lm(barpm ~ barpred, data=spatial2010)
res[res$year=="2010", 'm1.R2.s'] <- print(summary(lm(barpm ~ barpred, data=spatial2010))$r.squared)
res[res$year=="2010", 'm1.PE.s'] <- print(rmse(residuals(m1.fit.2010.spat)))
#temporal
tempo2010<-left_join(m1.2010,spatial2010)
tempo2010$delpm <-tempo2010$PM10-tempo2010$barpm
tempo2010$delpred <-tempo2010$pred.m1-tempo2010$barpred
mod_temporal <- lm(delpm ~ delpred, data=tempo2010)
res[res$year=="2010", 'm1.R2.t'] <- print(summary(lm(delpm ~ delpred, data=tempo2010))$r.squared)
saveRDS(m1.2010,"/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod1.AQ.2010.pred.rds")
#---------------->>>> CV
#s1
splits_s1 <- splitdf(m1.2010)
test_s1 <- splits_s1$testset
train_s1 <- splits_s1$trainset
out_train_s1 <- lmer(m1.formula,data = train_s1,weights=normwt)
test_s1$pred.m1.cv <- predict(object=out_train_s1 ,newdata=test_s1,allow.new.levels=TRUE,re.form=NULL )
test_s1$iter<-"s1"
#s2
splits_s2 <- splitdf(m1.2010)
test_s2 <- splits_s2$testset
train_s2 <- splits_s2$trainset
out_train_s2 <- lmer(m1.formula,data = train_s2,weights=normwt)
test_s2$pred.m1.cv <- predict(object=out_train_s2 ,newdata=test_s2,allow.new.levels=TRUE,re.form=NULL )
test_s2$iter<-"s2"
#s3
splits_s3 <- splitdf(m1.2010)
test_s3 <- splits_s3$testset
train_s3 <- splits_s3$trainset
out_train_s3 <- lmer(m1.formula,data = train_s3,weights=normwt)
test_s3$pred.m1.cv <- predict(object=out_train_s3 ,newdata=test_s3,allow.new.levels=TRUE,re.form=NULL )
test_s3$iter<-"s3"
#s4
splits_s4 <- splitdf(m1.2010)
test_s4 <- splits_s4$testset
train_s4 <- splits_s4$trainset
out_train_s4 <- lmer(m1.formula,data = train_s4,weights=normwt)
test_s4$pred.m1.cv <- predict(object=out_train_s4 ,newdata=test_s4,allow.new.levels=TRUE,re.form=NULL )
test_s4$iter<-"s4"
#s5
splits_s5 <- splitdf(m1.2010)
test_s5 <- splits_s5$testset
train_s5 <- splits_s5$trainset
out_train_s5 <- lmer(m1.formula,data = train_s5,weights=normwt)
test_s5$pred.m1.cv <- predict(object=out_train_s5 ,newdata=test_s5,allow.new.levels=TRUE,re.form=NULL )
test_s5$iter<-"s5"
#s6
splits_s6 <- splitdf(m1.2010)
test_s6 <- splits_s6$testset
train_s6 <- splits_s6$trainset
out_train_s6 <- lmer(m1.formula,data = train_s6,weights=normwt)
test_s6$pred.m1.cv <- predict(object=out_train_s6 ,newdata=test_s6,allow.new.levels=TRUE,re.form=NULL )
test_s6$iter<-"s6"
#s7
splits_s7 <- splitdf(m1.2010)
test_s7 <- splits_s7$testset
train_s7 <- splits_s7$trainset
out_train_s7 <- lmer(m1.formula,data = train_s7,weights=normwt)
test_s7$pred.m1.cv <- predict(object=out_train_s7 ,newdata=test_s7,allow.new.levels=TRUE,re.form=NULL )
test_s7$iter<-"s7"
#s8
splits_s8 <- splitdf(m1.2010)
test_s8 <- splits_s8$testset
train_s8 <- splits_s8$trainset
out_train_s8 <- lmer(m1.formula,data = train_s8,weights=normwt)
test_s8$pred.m1.cv <- predict(object=out_train_s8 ,newdata=test_s8,allow.new.levels=TRUE,re.form=NULL )
test_s8$iter<-"s8"
#s9
splits_s9 <- splitdf(m1.2010)
test_s9 <- splits_s9$testset
train_s9 <- splits_s9$trainset
out_train_s9 <- lmer(m1.formula,data = train_s9,weights=normwt)
test_s9$pred.m1.cv <- predict(object=out_train_s9 ,newdata=test_s9,allow.new.levels=TRUE,re.form=NULL )
test_s9$iter<-"s9"
#s10
splits_s10 <- splitdf(m1.2010)
test_s10 <- splits_s10$testset
train_s10 <- splits_s10$trainset
out_train_s10 <- lmer(m1.formula,data = train_s10,weights=normwt)
test_s10$pred.m1.cv <- predict(object=out_train_s10 ,newdata=test_s10,allow.new.levels=TRUE,re.form=NULL )
test_s10$iter<-"s10"
#BIND 1 dataset
m1.2010.cv<- data.table(rbind(test_s1,test_s2,test_s3,test_s4,test_s5,test_s6,test_s7,test_s8,test_s9, test_s10))
# cleanup (remove from WS) objects from CV
rm(list = ls(pattern = "train_|test_"))
#table updates
m1.fit.2010.cv<-lm(PM10~pred.m1.cv,data=m1.2010.cv)
res[res$year=="2010", 'm1cv.R2'] <- print(summary(lm(PM10~pred.m1.cv,data=m1.2010.cv))$r.squared)
res[res$year=="2010", 'm1cv.I'] <-print(summary(lm(PM10~pred.m1.cv,data=m1.2010.cv))$coef[1,1])
res[res$year=="2010", 'm1cv.I.se'] <-print(summary(lm(PM10~pred.m1.cv,data=m1.2010.cv))$coef[1,2])
res[res$year=="2010", 'm1cv.S'] <-print(summary(lm(PM10~pred.m1.cv,data=m1.2010.cv))$coef[2,1])
res[res$year=="2010", 'm1cv.S.se'] <-print(summary(lm(PM10~pred.m1.cv,data=m1.2010.cv))$coef[2,2])
#RMSPE
res[res$year=="2010", 'm1cv.PE'] <- print(rmse(residuals(m1.fit.2010.cv)))
#spatial
spatial2010.cv<-m1.2010.cv %>%
group_by(stn) %>%
summarise(barpm = mean(PM10, na.rm=TRUE), barpred = mean(pred.m1, na.rm=TRUE))
m1.fit.2010.cv.s <- lm(barpm ~ barpred, data=spatial2010.cv)
res[res$year=="2010", 'm1cv.R2.s'] <- print(summary(lm(barpm ~ barpred, data=spatial2010.cv))$r.squared)
res[res$year=="2010", 'm1cv.PE.s'] <- print(rmse(residuals(m1.fit.2010.cv.s)))
#temporal
tempo2010.cv<-left_join(m1.2010.cv,spatial2010.cv)
tempo2010.cv$delpm <-tempo2010.cv$PM10-tempo2010.cv$barpm
tempo2010.cv$delpred <-tempo2010.cv$pred.m1.cv-tempo2010.cv$barpred
mod_temporal.cv <- lm(delpm ~ delpred, data=tempo2010.cv)
res[res$year=="2010", 'm1cv.R2.t'] <- print(summary(lm(delpm ~ delpred, data=tempo2010.cv))$r.squared)
#-------->>> loc stage
luf<-fread("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN004_LU_full_dataset/local.csv")
setnames(luf,"tden","loc.tden")
setnames(luf,"elev50","loc.elev")
#add 50m LU to CV data
setkey(m1.2010.cv,stn)
setkey(luf,stn)
m1.2010.cv.loc <- merge(m1.2010.cv, luf, all.x = T)
#m1.2010.cv.loc<-na.omit(m1.2010.cv.loc)
#create residual mp3 variable
m1.2010.cv.loc$res.m1<-m1.2010.cv.loc$PM10-m1.2010.cv.loc$pred.m1.cv
#The GAM model
gam.out<-gam(res.m1~s(loc.tden)+s(tden,MeanPbl)+s(loc.tden,WSa)+s(loc_p_os,fx=FALSE,k=4,bs='cr')+s(loc.elev,fx=FALSE,k=4,bs='cr')+s(dA1,fx=FALSE,k=4,bs='cr')+s(dsea,fx=FALSE,k=4,bs='cr'),data=m1.2010.cv.loc)
#plot(bp.model.ps)
#summary(bp.model.ps)
## reg
m1.2010.cv.loc$pred.m1.loc <-predict(gam.out)
m1.2010.cv.loc$pred.m1.both <- m1.2010.cv.loc$pred.m1.cv + m1.2010.cv.loc$pred.m1.loc
res[res$year=="2010", 'm1cv.loc.R2'] <- print(summary(lm(PM10~pred.m1.both,data=m1.2010.cv.loc))$r.squared)
res[res$year=="2010", 'm1cv.loc.I'] <-print(summary(lm(PM10~pred.m1.both,data=m1.2010.cv.loc))$coef[1,1])
res[res$year=="2010", 'm1cv.loc.I.se'] <-print(summary(lm(PM10~pred.m1.both,data=m1.2010.cv.loc))$coef[1,2])
res[res$year=="2010", 'm1cv.loc.S'] <-print(summary(lm(PM10~pred.m1.both,data=m1.2010.cv.loc))$coef[2,1])
res[res$year=="2010", 'm1cv.loc.S.se'] <-print(summary(lm(PM10~pred.m1.both,data=m1.2010.cv.loc))$coef[2,2])
#RMSPE
res[res$year=="2010", 'm1cv.loc.PE'] <- print(rmse(residuals(m1.fit.2010.cv)))
#spatial
spatial2010.cv.loc<-m1.2010.cv.loc %>%
group_by(stn) %>%
summarise(barpm = mean(PM10, na.rm=TRUE), barpred = mean(pred.m1, na.rm=TRUE))
m1.fit.2010.cv.loc.s <- lm(barpm ~ barpred, data=spatial2010.cv.loc)
res[res$year=="2010", 'm1cv.loc.R2.s'] <- print(summary(lm(barpm ~ barpred, data=spatial2010.cv.loc))$r.squared)
res[res$year=="2010", 'm1cv.loc.PE.s'] <- print(rmse(residuals(m1.fit.2010.cv.loc.s)))
#temporal
tempo2010.loc.cv<-left_join(m1.2010.cv.loc,spatial2010.cv.loc)
tempo2010.loc.cv$delpm <-tempo2010.loc.cv$PM10-tempo2010.loc.cv$barpm
tempo2010.loc.cv$delpred <-tempo2010.loc.cv$pred.m1.both-tempo2010.loc.cv$barpred
mod_temporal.loc.cv <- lm(delpm ~ delpred, data=tempo2010.loc.cv)
res[res$year=="2010", 'm1cv.loc.R2.t'] <- print(summary(lm(delpm ~ delpred, data=tempo2010.loc.cv))$r.squared)
#############save midpoint
saveRDS(res, "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/res.AQ.2010.rds")
saveRDS(res, "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/resALL.AQ.rds")
saveRDS(m1.2010.cv.loc,"/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod1.AQ.2010.predCV.rds")
###############
#MOD2
###############
m2.2010<-readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod2.AQ.2010.rds")
m2.2010[,elev.s:= scale(elev)]
m2.2010[,tden.s:= scale(tden)]
m2.2010[,pden.s:= scale(pden)]
m2.2010[,dist2A1.s:= scale(dist2A1)]
m2.2010[,dist2water.s:= scale(dist2water)]
m2.2010[,dist2rail.s:= scale(dist2rail)]
m2.2010[,Dist2road.s:= scale(Dist2road)]
m2.2010[,ndvi.s:= scale(ndvi)]
m2.2010[,MeanPbl.s:= scale(MeanPbl)]
m2.2010[,p_ind.s:= scale(p_ind)]
m2.2010[,p_for.s:= scale(p_for)]
m2.2010[,p_farm.s:= scale(p_farm)]
m2.2010[,p_dos.s:= scale(p_dos)]
m2.2010[,p_dev.s:= scale(p_dev)]
m2.2010[,p_os.s:= scale(p_os)]
m2.2010[,tempa.s:= scale(tempa)]
m2.2010[,WDa.s:= scale(WDa)]
m2.2010[,WSa.s:= scale(WSa)]
m2.2010[,RHa.s:= scale(RHa)]
m2.2010[,Raina.s:= scale(Raina)]
m2.2010[,NO2a.s:= scale(NO2a)]
#generate predictions
m2.2010[, pred.m2 := predict(object=m1.fit.2010,newdata=m2.2010,allow.new.levels=TRUE,re.form=NULL)]
describe(m2.2010$pred.m2)
#delete implossible valuesOA[24~
m2.2010 <- m2.2010[pred.m2 > 0.00000000000001 , ]
m2.2010 <- m2.2010[pred.m2 < 1500 , ]
saveRDS(m2.2010,"/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod2.AQ.2010.pred2.rds")
#-------------->prepare for mod3
m2.2010[, bimon := (m + 1) %/% 2]
setkey(m2.2010,day, aodid)
m2.2010<-m2.2010[!is.na(meanPM10)]
#run the lmer part regressing stage 2 pred Vs mean pm
#in israel check per month, also check 30km band and other methods for meanpm
m2.smooth = lme(pred.m2 ~ meanPM10,random = list(aodid= ~1 + meanPM10),control=lmeControl(opt = "optim"), data= m2.2010 )
#xm2.smooth = lmer(pred.m2 ~ meanPM10+(1+ meanPM10|aodid), data= m2.2010 )
#correlate to see everything from mod2 and the mpm works
m2.2010[, pred.t31 := predict(m2.smooth)]
m2.2010[, resid := residuals(m2.smooth)]
res[res$year=="2010", 'm3.t31'] <- print(summary(lm(pred.m2~pred.t31,data=m2.2010))$r.squared)
#split the files to the separate bi monthly datsets
T2010_bimon1 <- subset(m2.2010 ,m2.2010$bimon == "1")
T2010_bimon2 <- subset(m2.2010 ,m2.2010$bimon == "2")
T2010_bimon3 <- subset(m2.2010 ,m2.2010$bimon == "3")
T2010_bimon4 <- subset(m2.2010 ,m2.2010$bimon == "4")
T2010_bimon5 <- subset(m2.2010 ,m2.2010$bimon == "5")
T2010_bimon6 <- subset(m2.2010 ,m2.2010$bimon == "6")
#run the separate splines (smooth) for x and y for each bimon
#whats the default band (distance) that the spline goes out and uses
fit2_1 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2010_bimon1 )
fit2_2 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2010_bimon2 )
fit2_3 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2010_bimon3 )
fit2_4 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2010_bimon4 )
fit2_5 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2010_bimon5 )
fit2_6 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2010_bimon6 )
#get the predicted-fitted
Xpred_1 <- (T2010_bimon1$pred.t31 - fit2_1$fitted)
Xpred_2 <- (T2010_bimon2$pred.t31 - fit2_2$fitted)
Xpred_3 <- (T2010_bimon3$pred.t31 - fit2_3$fitted)
Xpred_4 <- (T2010_bimon4$pred.t31 - fit2_4$fitted)
Xpred_5 <- (T2010_bimon5$pred.t31 - fit2_5$fitted)
Xpred_6 <- (T2010_bimon6$pred.t31 - fit2_6$fitted)
#remerge to 1 file
m2.2010$pred.t32 <- c( Xpred_1,Xpred_2, Xpred_3, Xpred_4, Xpred_5, Xpred_6)
#this is important so that its sorted as in the first gamm
setkey(m2.2010,day, aodid)
#rerun the lme on the predictions including the spatial spline (smooth)
Final_pred_2010 <- lme(pred.t32 ~ meanPM10 ,random = list(aodid= ~1 + meanPM10 ),control=lmeControl(opt = "optim"),data= m2.2010 )
m2.2010[, pred.t33 := predict(Final_pred_2010)]
#check correlations
res[res$year=="2010", 'm3.t33'] <- print(summary(lm(pred.m2 ~ pred.t33,data=m2.2010))$r.squared)
#------------------------>>>
#import mod3
data.m3 <- readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod3.AQ.2010.rds")
#for PM10
data.m3 <- data.m3[,c(1,2,5,29:32,52,53),with=FALSE]
data.m3[, bimon := (m + 1) %/% 2]
setkey(data.m3,day, aodid)
data.m3<-data.m3[!is.na(meanPM10)]
#generate m.3 initial pred
data.m3$pred.m3.mix <- predict(Final_pred_2010,data.m3)
#create unique grid
ugrid <-data.m3 %>%
group_by(aodid) %>%
summarise(lat_aod = mean(lat_aod, na.rm=TRUE), long_aod = mean(long_aod, na.rm=TRUE),x_aod_ITM = mean(x_aod_ITM, na.rm=TRUE), y_aod_ITM = mean(y_aod_ITM, na.rm=TRUE))
#### PREDICT Gam part
#split back into bimons to include the gam prediction in final prediction
data.m3_bimon1 <- data.m3[bimon == 1, ]
data.m3_bimon2 <- data.m3[bimon == 2, ]
data.m3_bimon3 <- data.m3[bimon == 3, ]
data.m3_bimon4 <- data.m3[bimon == 4, ]
data.m3_bimon5 <- data.m3[bimon == 5, ]
data.m3_bimon6 <- data.m3[bimon == 6, ]
#addin unique grid to each bimon
uniq_gid_bimon1 <- ugrid
uniq_gid_bimon2 <- ugrid
uniq_gid_bimon3 <- ugrid
uniq_gid_bimon4 <- ugrid
uniq_gid_bimon5 <- ugrid
uniq_gid_bimon6 <- ugrid
#get predictions for Bimon residuals
uniq_gid_bimon1$gpred <- predict.gam(fit2_1,uniq_gid_bimon1)
uniq_gid_bimon2$gpred <- predict.gam(fit2_2,uniq_gid_bimon2)
uniq_gid_bimon3$gpred <- predict.gam(fit2_3,uniq_gid_bimon3)
uniq_gid_bimon4$gpred <- predict.gam(fit2_4,uniq_gid_bimon4)
uniq_gid_bimon5$gpred <- predict.gam(fit2_5,uniq_gid_bimon5)
uniq_gid_bimon6$gpred <- predict.gam(fit2_6,uniq_gid_bimon6)
#merge things back togheter
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> merges
setkey(uniq_gid_bimon1,aodid)
setkey(data.m3_bimon1,aodid)
data.m3_bimon1 <- merge(data.m3_bimon1, uniq_gid_bimon1[,list(aodid,gpred)], all.x = T)
setkey(uniq_gid_bimon2,aodid)
setkey(data.m3_bimon2,aodid)
data.m3_bimon2 <- merge(data.m3_bimon2, uniq_gid_bimon2[,list(aodid,gpred)], all.x = T)
setkey(uniq_gid_bimon3,aodid)
setkey(data.m3_bimon3,aodid)
data.m3_bimon3 <- merge(data.m3_bimon3, uniq_gid_bimon3[,list(aodid,gpred)], all.x = T)
setkey(uniq_gid_bimon4,aodid)
setkey(data.m3_bimon4,aodid)
data.m3_bimon4 <- merge(data.m3_bimon4, uniq_gid_bimon4[,list(aodid,gpred)], all.x = T)
setkey(uniq_gid_bimon5,aodid)
setkey(data.m3_bimon5,aodid)
data.m3_bimon5 <- merge(data.m3_bimon5, uniq_gid_bimon5[,list(aodid,gpred)], all.x = T)
setkey(uniq_gid_bimon6,aodid)
setkey(data.m3_bimon6,aodid)
data.m3_bimon6 <- merge(data.m3_bimon6, uniq_gid_bimon6[,list(aodid,gpred)], all.x = T)
#reattach all parts
mod3 <- rbind(data.m3_bimon1,data.m3_bimon2,data.m3_bimon3,data.m3_bimon4,data.m3_bimon5,data.m3_bimon6)
# create pred.m3
mod3$pred.m3 <-mod3$pred.m3.mix+mod3$gpred
#hist(mod3$pred.m3)
#describe(mod3$pred.m3)
#recode negative into zero
mod3 <- mod3[pred.m3 >= 0]
saveRDS(mod3,"/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod3.AQ.2010.pred.rds")
#clean
keep(mod3,res,rmse, sure=TRUE)
gc()
#########################
#prepare for m3.R2
#########################
#load mod1
mod1<- readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod1.AQ.2010.pred.rds")
mod1[,aodid:= paste(mod1$long_aod,mod1$lat_aod,sep="-")]
mod1<-mod1[,c("aodid","day","PM10","pred.m1","stn"),with=FALSE]
#R2.m3
setkey(mod3,day,aodid)
setkey(mod1,day,aodid)
m1.2010 <- merge(mod1,mod3[, list(day,aodid,pred.m3)], all.x = T)
m3.fit.2010<- summary(lm(PM10~pred.m3,data=m1.2010))
res[res$year=="2010", 'm3.R2'] <- print(summary(lm(PM10~pred.m3,data=m1.2010))$r.squared)
#RMSPE
res[res$year=="2010", 'm3.PE'] <- print(rmse(residuals(m3.fit.2010)))
#spatial
###to check
spatial2010<-m1.2010 %>%
group_by(stn) %>%
summarise(barpm = mean(PM10, na.rm=TRUE), barpred = mean(pred.m3, na.rm=TRUE))
m1.fit.2010.spat<- lm(barpm ~ barpred, data=spatial2010)
res[res$year=="2010", 'm3.R2.s'] <- print(summary(lm(barpm ~ barpred, data=spatial2010))$r.squared)
res[res$year=="2010", 'm3.PE.s'] <- print(rmse(residuals(m1.fit.2010.spat)))
#temporal
tempo2010<-left_join(m1.2010,spatial2010)
tempo2010$delpm <-tempo2010$PM10-tempo2010$barpm
tempo2010$delpred <-tempo2010$pred.m3-tempo2010$barpred
mod_temporal <- lm(delpm ~ delpred, data=tempo2010)
res[res$year=="2010", 'm3.R2.t'] <- print(summary(lm(delpm ~ delpred, data=tempo2010))$r.squared)
#############save midpoint
saveRDS(res, "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/res.AQ.2010.rds")
saveRDS(res, "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/resALL.AQ.rds")
#########################
#import mod2
mod2<- readRDS( "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod2.AQ.2010.pred2.rds")
mod2<-mod2[,c("aodid","day","pred.m2"),with=FALSE]
#----------------> store the best available
mod3best <- mod3[, list(aodid, x_aod_ITM, y_aod_ITM, day, pred.m3)]
setkey(mod3best, day, aodid)
setkey(mod2, day, aodid)
mod3best <- merge(mod3best, mod2[,list(aodid, day, pred.m2)], all.x = T)
#reload mod1
mod1<- readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod1.AQ.2010.pred.rds")
mod1$aodid<-paste(mod1$long_aod,mod1$lat_aod,sep="-")
mod1<-mod1[,c("aodid","day","PM10","pred.m1"),with=FALSE]
setkey(mod1,day,aodid)
mod3best <- merge(mod3best, mod1, all.x = T)
mod3best[,bestpred := pred.m3]
mod3best[!is.na(pred.m2),bestpred := pred.m2]
mod3best[!is.na(pred.m1),bestpred := pred.m1]
#save
saveRDS(mod3best,"/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod3.AQ.2010.FINAL.rds")
#save for GIS
write.csv(mod3best[, list(LTPM = mean(bestpred, na.rm = T),
predvariance = var(bestpred, na.rm = T),
predmin = min(bestpred, na.rm = T),
predmax = max(bestpred, na.rm = T),
npred = sum(!is.na(bestpred)),
npred.m1 = sum(!is.na(pred.m1)),
npred.m2 = sum(!is.na(pred.m2)),
npred.m3 = sum(!is.na(pred.m3)),
x_aod_ITM = x_aod_ITM[1], y_aod_ITM = y_aod_ITM[1]),by=aodid], "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/LTPM3.AQ.2010.csv", row.names = F)
keep(res, sure=TRUE)
c()
###############
#LIBS
###############
library(lme4);library(reshape);library(foreign) ;library(ggplot2);library(plyr);library(data.table);library(reshape2);library(Hmisc);library(mgcv);library(gdata);library(car);library(dplyr);library(ggmap);library(broom);library(splines)
#sourcing
source("/media/NAS/Uni/org/files/Uni/Projects/code/$Rsnips/CV_splits.r")
source("/media/NAS/Uni/org/files/Uni/Projects/code/$Rsnips/rmspe.r")
#if needed load res table
#res<- readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/resALL.AQ.rds")
### import data
m1.2011 <-readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod1.AQ.2011.rds")
################# clean BAD STN PM10 and check if improved model?
raWDaf <- ddply(m1.2011, c( "stn"),
function(x) {
mod1 <- lm(PM10 ~ aod, data=x)
data.frame(R2 = round(summary(mod1)$r.squared, 5),
nsamps = length(summary(mod1)$resid))
})
raWDaf
raWDaf<-as.data.table(raWDaf)
bad<- raWDaf[R2< 0.01]
bad[,badid := paste(stn,sep="-")]
#################BAD STN
m1.2011[,badid := paste(stn,sep="-")]
####Take out bad stations
m1.2011 <- m1.2011[!(m1.2011$badid %in% bad$badid), ]
#scale vars
m1.2011[,elev.s:= scale(elev)]
m1.2011[,tden.s:= scale(tden)]
m1.2011[,pden.s:= scale(pden)]
m1.2011[,dist2A1.s:= scale(dist2A1)]
m1.2011[,dist2water.s:= scale(dist2water)]
m1.2011[,dist2rail.s:= scale(dist2rail)]
m1.2011[,Dist2road.s:= scale(Dist2road)]
m1.2011[,ndvi.s:= scale(ndvi)]
m1.2011[,MeanPbl.s:= scale(MeanPbl)]
m1.2011[,p_ind.s:= scale(p_ind)]
m1.2011[,p_for.s:= scale(p_for)]
m1.2011[,p_farm.s:= scale(p_farm)]
m1.2011[,p_dos.s:= scale(p_dos)]
m1.2011[,p_dev.s:= scale(p_dev)]
m1.2011[,p_os.s:= scale(p_os)]
m1.2011[,tempa.s:= scale(tempa)]
m1.2011[,WDa.s:= scale(WDa)]
m1.2011[,WSa.s:= scale(WSa)]
m1.2011[,RHa.s:= scale(RHa)]
m1.2011[,Raina.s:= scale(Raina)]
m1.2011[,NO2a.s:= scale(NO2a)]
m1.formula <- as.formula(PM10~ aod
+tempa.s+WDa.s+WSa.s+Dust+MeanPbl.s #temporal
+elev.s+tden.s+pden.s+Dist2road.s+ndvi.s #spatial
+p_os.s #+p_dev.s+p_dos.s+p_farm.s+p_for.s+p_ind.s #land use
#+aod*Dust #interactions
+(1+aod|day/reg_num)) #+(1|stn) !!! stn screws up mod3
#full fit
m1.fit.2011 <- lmer(m1.formula,data=m1.2011,weights=normwt)
m1.2011$pred.m1 <- predict(m1.fit.2011)
res[res$year=="2011", 'm1.R2'] <- print(summary(lm(PM10~pred.m1,data=m1.2011))$r.squared)
#RMSPE
res[res$year=="2011", 'm1.PE'] <- print(rmse(residuals(m1.fit.2011)))
#spatial
###to check
spatial2011<-m1.2011 %>%
group_by(stn) %>%
summarise(barpm = mean(PM10, na.rm=TRUE), barpred = mean(pred.m1, na.rm=TRUE))
m1.fit.2011.spat<- lm(barpm ~ barpred, data=spatial2011)
res[res$year=="2011", 'm1.R2.s'] <- print(summary(lm(barpm ~ barpred, data=spatial2011))$r.squared)
res[res$year=="2011", 'm1.PE.s'] <- print(rmse(residuals(m1.fit.2011.spat)))
#temporal
tempo2011<-left_join(m1.2011,spatial2011)
tempo2011$delpm <-tempo2011$PM10-tempo2011$barpm
tempo2011$delpred <-tempo2011$pred.m1-tempo2011$barpred
mod_temporal <- lm(delpm ~ delpred, data=tempo2011)
res[res$year=="2011", 'm1.R2.t'] <- print(summary(lm(delpm ~ delpred, data=tempo2011))$r.squared)
saveRDS(m1.2011,"/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod1.AQ.2011.pred.rds")
#---------------->>>> CV
#s1
splits_s1 <- splitdf(m1.2011)
test_s1 <- splits_s1$testset
train_s1 <- splits_s1$trainset
out_train_s1 <- lmer(m1.formula,data = train_s1,weights=normwt)
test_s1$pred.m1.cv <- predict(object=out_train_s1 ,newdata=test_s1,allow.new.levels=TRUE,re.form=NULL )
test_s1$iter<-"s1"
#s2
splits_s2 <- splitdf(m1.2011)
test_s2 <- splits_s2$testset
train_s2 <- splits_s2$trainset
out_train_s2 <- lmer(m1.formula,data = train_s2,weights=normwt)
test_s2$pred.m1.cv <- predict(object=out_train_s2 ,newdata=test_s2,allow.new.levels=TRUE,re.form=NULL )
test_s2$iter<-"s2"
#s3
splits_s3 <- splitdf(m1.2011)
test_s3 <- splits_s3$testset
train_s3 <- splits_s3$trainset
out_train_s3 <- lmer(m1.formula,data = train_s3,weights=normwt)
test_s3$pred.m1.cv <- predict(object=out_train_s3 ,newdata=test_s3,allow.new.levels=TRUE,re.form=NULL )
test_s3$iter<-"s3"
#s4
splits_s4 <- splitdf(m1.2011)
test_s4 <- splits_s4$testset
train_s4 <- splits_s4$trainset
out_train_s4 <- lmer(m1.formula,data = train_s4,weights=normwt)
test_s4$pred.m1.cv <- predict(object=out_train_s4 ,newdata=test_s4,allow.new.levels=TRUE,re.form=NULL )
test_s4$iter<-"s4"
#s5
splits_s5 <- splitdf(m1.2011)
test_s5 <- splits_s5$testset
train_s5 <- splits_s5$trainset
out_train_s5 <- lmer(m1.formula,data = train_s5,weights=normwt)
test_s5$pred.m1.cv <- predict(object=out_train_s5 ,newdata=test_s5,allow.new.levels=TRUE,re.form=NULL )
test_s5$iter<-"s5"
#s6
splits_s6 <- splitdf(m1.2011)
test_s6 <- splits_s6$testset
train_s6 <- splits_s6$trainset
out_train_s6 <- lmer(m1.formula,data = train_s6,weights=normwt)
test_s6$pred.m1.cv <- predict(object=out_train_s6 ,newdata=test_s6,allow.new.levels=TRUE,re.form=NULL )
test_s6$iter<-"s6"
#s7
splits_s7 <- splitdf(m1.2011)
test_s7 <- splits_s7$testset
train_s7 <- splits_s7$trainset
out_train_s7 <- lmer(m1.formula,data = train_s7,weights=normwt)
test_s7$pred.m1.cv <- predict(object=out_train_s7 ,newdata=test_s7,allow.new.levels=TRUE,re.form=NULL )
test_s7$iter<-"s7"
#s8
splits_s8 <- splitdf(m1.2011)
test_s8 <- splits_s8$testset
train_s8 <- splits_s8$trainset
out_train_s8 <- lmer(m1.formula,data = train_s8,weights=normwt)
test_s8$pred.m1.cv <- predict(object=out_train_s8 ,newdata=test_s8,allow.new.levels=TRUE,re.form=NULL )
test_s8$iter<-"s8"
#s9
splits_s9 <- splitdf(m1.2011)
test_s9 <- splits_s9$testset
train_s9 <- splits_s9$trainset
out_train_s9 <- lmer(m1.formula,data = train_s9,weights=normwt)
test_s9$pred.m1.cv <- predict(object=out_train_s9 ,newdata=test_s9,allow.new.levels=TRUE,re.form=NULL )
test_s9$iter<-"s9"
#s10
splits_s10 <- splitdf(m1.2011)
test_s10 <- splits_s10$testset
train_s10 <- splits_s10$trainset
out_train_s10 <- lmer(m1.formula,data = train_s10,weights=normwt)
test_s10$pred.m1.cv <- predict(object=out_train_s10 ,newdata=test_s10,allow.new.levels=TRUE,re.form=NULL )
test_s10$iter<-"s10"
#BIND 1 dataset
m1.2011.cv<- data.table(rbind(test_s1,test_s2,test_s3,test_s4,test_s5,test_s6,test_s7,test_s8,test_s9, test_s10))
# cleanup (remove from WS) objects from CV
rm(list = ls(pattern = "train_|test_"))
#table updates
m1.fit.2011.cv<-lm(PM10~pred.m1.cv,data=m1.2011.cv)
res[res$year=="2011", 'm1cv.R2'] <- print(summary(lm(PM10~pred.m1.cv,data=m1.2011.cv))$r.squared)
res[res$year=="2011", 'm1cv.I'] <-print(summary(lm(PM10~pred.m1.cv,data=m1.2011.cv))$coef[1,1])
res[res$year=="2011", 'm1cv.I.se'] <-print(summary(lm(PM10~pred.m1.cv,data=m1.2011.cv))$coef[1,2])
res[res$year=="2011", 'm1cv.S'] <-print(summary(lm(PM10~pred.m1.cv,data=m1.2011.cv))$coef[2,1])
res[res$year=="2011", 'm1cv.S.se'] <-print(summary(lm(PM10~pred.m1.cv,data=m1.2011.cv))$coef[2,2])
#RMSPE
res[res$year=="2011", 'm1cv.PE'] <- print(rmse(residuals(m1.fit.2011.cv)))
#spatial
spatial2011.cv<-m1.2011.cv %>%
group_by(stn) %>%
summarise(barpm = mean(PM10, na.rm=TRUE), barpred = mean(pred.m1, na.rm=TRUE))
m1.fit.2011.cv.s <- lm(barpm ~ barpred, data=spatial2011.cv)
res[res$year=="2011", 'm1cv.R2.s'] <- print(summary(lm(barpm ~ barpred, data=spatial2011.cv))$r.squared)
res[res$year=="2011", 'm1cv.PE.s'] <- print(rmse(residuals(m1.fit.2011.cv.s)))
#temporal
tempo2011.cv<-left_join(m1.2011.cv,spatial2011.cv)
tempo2011.cv$delpm <-tempo2011.cv$PM10-tempo2011.cv$barpm
tempo2011.cv$delpred <-tempo2011.cv$pred.m1.cv-tempo2011.cv$barpred
mod_temporal.cv <- lm(delpm ~ delpred, data=tempo2011.cv)
res[res$year=="2011", 'm1cv.R2.t'] <- print(summary(lm(delpm ~ delpred, data=tempo2011.cv))$r.squared)
#-------->>> loc stage
luf<-fread("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN004_LU_full_dataset/local.csv")
setnames(luf,"tden","loc.tden")
setnames(luf,"elev50","loc.elev")
#add 50m LU to CV data
setkey(m1.2011.cv,stn)
setkey(luf,stn)
m1.2011.cv.loc <- merge(m1.2011.cv, luf, all.x = T)
#m1.2011.cv.loc<-na.omit(m1.2011.cv.loc)
#create residual mp3 variable
m1.2011.cv.loc$res.m1<-m1.2011.cv.loc$PM10-m1.2011.cv.loc$pred.m1.cv
#The GAM model
gam.out<-gam(res.m1~s(loc.tden)+s(tden,MeanPbl)+s(loc.tden,WSa)+s(loc_p_os,fx=FALSE,k=4,bs='cr')+s(loc.elev,fx=FALSE,k=4,bs='cr')+s(dA1,fx=FALSE,k=4,bs='cr')+s(dsea,fx=FALSE,k=4,bs='cr'),data=m1.2011.cv.loc)
#plot(bp.model.ps)
#summary(bp.model.ps)
## reg
m1.2011.cv.loc$pred.m1.loc <-predict(gam.out)
m1.2011.cv.loc$pred.m1.both <- m1.2011.cv.loc$pred.m1.cv + m1.2011.cv.loc$pred.m1.loc
res[res$year=="2011", 'm1cv.loc.R2'] <- print(summary(lm(PM10~pred.m1.both,data=m1.2011.cv.loc))$r.squared)
res[res$year=="2011", 'm1cv.loc.I'] <-print(summary(lm(PM10~pred.m1.both,data=m1.2011.cv.loc))$coef[1,1])
res[res$year=="2011", 'm1cv.loc.I.se'] <-print(summary(lm(PM10~pred.m1.both,data=m1.2011.cv.loc))$coef[1,2])
res[res$year=="2011", 'm1cv.loc.S'] <-print(summary(lm(PM10~pred.m1.both,data=m1.2011.cv.loc))$coef[2,1])
res[res$year=="2011", 'm1cv.loc.S.se'] <-print(summary(lm(PM10~pred.m1.both,data=m1.2011.cv.loc))$coef[2,2])
#RMSPE
res[res$year=="2011", 'm1cv.loc.PE'] <- print(rmse(residuals(m1.fit.2011.cv)))
#spatial
spatial2011.cv.loc<-m1.2011.cv.loc %>%
group_by(stn) %>%
summarise(barpm = mean(PM10, na.rm=TRUE), barpred = mean(pred.m1, na.rm=TRUE))
m1.fit.2011.cv.loc.s <- lm(barpm ~ barpred, data=spatial2011.cv.loc)
res[res$year=="2011", 'm1cv.loc.R2.s'] <- print(summary(lm(barpm ~ barpred, data=spatial2011.cv.loc))$r.squared)
res[res$year=="2011", 'm1cv.loc.PE.s'] <- print(rmse(residuals(m1.fit.2011.cv.loc.s)))
#temporal
tempo2011.loc.cv<-left_join(m1.2011.cv.loc,spatial2011.cv.loc)
tempo2011.loc.cv$delpm <-tempo2011.loc.cv$PM10-tempo2011.loc.cv$barpm
tempo2011.loc.cv$delpred <-tempo2011.loc.cv$pred.m1.both-tempo2011.loc.cv$barpred
mod_temporal.loc.cv <- lm(delpm ~ delpred, data=tempo2011.loc.cv)
res[res$year=="2011", 'm1cv.loc.R2.t'] <- print(summary(lm(delpm ~ delpred, data=tempo2011.loc.cv))$r.squared)
#############save midpoint
saveRDS(res, "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/res.AQ.2011.rds")
saveRDS(res, "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/resALL.AQ.rds")
saveRDS(m1.2011.cv.loc,"/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod1.AQ.2011.predCV.rds")
###############
#MOD2
###############
m2.2011<-readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod2.AQ.2011.rds")
m2.2011[,elev.s:= scale(elev)]
m2.2011[,tden.s:= scale(tden)]
m2.2011[,pden.s:= scale(pden)]
m2.2011[,dist2A1.s:= scale(dist2A1)]
m2.2011[,dist2water.s:= scale(dist2water)]
m2.2011[,dist2rail.s:= scale(dist2rail)]
m2.2011[,Dist2road.s:= scale(Dist2road)]
m2.2011[,ndvi.s:= scale(ndvi)]
m2.2011[,MeanPbl.s:= scale(MeanPbl)]
m2.2011[,p_ind.s:= scale(p_ind)]
m2.2011[,p_for.s:= scale(p_for)]
m2.2011[,p_farm.s:= scale(p_farm)]
m2.2011[,p_dos.s:= scale(p_dos)]
m2.2011[,p_dev.s:= scale(p_dev)]
m2.2011[,p_os.s:= scale(p_os)]
m2.2011[,tempa.s:= scale(tempa)]
m2.2011[,WDa.s:= scale(WDa)]
m2.2011[,WSa.s:= scale(WSa)]
m2.2011[,RHa.s:= scale(RHa)]
m2.2011[,Raina.s:= scale(Raina)]
m2.2011[,NO2a.s:= scale(NO2a)]
#generate predictions
m2.2011[, pred.m2 := predict(object=m1.fit.2011,newdata=m2.2011,allow.new.levels=TRUE,re.form=NULL)]
describe(m2.2011$pred.m2)
#delete implossible valuesOA[24~
m2.2011 <- m2.2011[pred.m2 > 0.00000000000001 , ]
m2.2011 <- m2.2011[pred.m2 < 1500 , ]
saveRDS(m2.2011,"/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod2.AQ.2011.pred2.rds")
#-------------->prepare for mod3
m2.2011[, bimon := (m + 1) %/% 2]
setkey(m2.2011,day, aodid)
m2.2011<-m2.2011[!is.na(meanPM10)]
#run the lmer part regressing stage 2 pred Vs mean pm
#in israel check per month, also check 30km band and other methods for meanpm
m2.smooth = lme(pred.m2 ~ meanPM10,random = list(aodid= ~1 + meanPM10),control=lmeControl(opt = "optim"), data= m2.2011 )
#xm2.smooth = lmer(pred.m2 ~ meanPM10+(1+ meanPM10|aodid), data= m2.2011 )
#correlate to see everything from mod2 and the mpm works
m2.2011[, pred.t31 := predict(m2.smooth)]
m2.2011[, resid := residuals(m2.smooth)]
res[res$year=="2011", 'm3.t31'] <- print(summary(lm(pred.m2~pred.t31,data=m2.2011))$r.squared)
#split the files to the separate bi monthly datsets
T2011_bimon1 <- subset(m2.2011 ,m2.2011$bimon == "1")
T2011_bimon2 <- subset(m2.2011 ,m2.2011$bimon == "2")
T2011_bimon3 <- subset(m2.2011 ,m2.2011$bimon == "3")
T2011_bimon4 <- subset(m2.2011 ,m2.2011$bimon == "4")
T2011_bimon5 <- subset(m2.2011 ,m2.2011$bimon == "5")
T2011_bimon6 <- subset(m2.2011 ,m2.2011$bimon == "6")
#run the separate splines (smooth) for x and y for each bimon
#whats the default band (distance) that the spline goes out and uses
fit2_1 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2011_bimon1 )
fit2_2 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2011_bimon2 )
fit2_3 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2011_bimon3 )
fit2_4 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2011_bimon4 )
fit2_5 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2011_bimon5 )
fit2_6 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2011_bimon6 )
#get the predicted-fitted
Xpred_1 <- (T2011_bimon1$pred.t31 - fit2_1$fitted)
Xpred_2 <- (T2011_bimon2$pred.t31 - fit2_2$fitted)
Xpred_3 <- (T2011_bimon3$pred.t31 - fit2_3$fitted)
Xpred_4 <- (T2011_bimon4$pred.t31 - fit2_4$fitted)
Xpred_5 <- (T2011_bimon5$pred.t31 - fit2_5$fitted)
Xpred_6 <- (T2011_bimon6$pred.t31 - fit2_6$fitted)
#remerge to 1 file
m2.2011$pred.t32 <- c( Xpred_1,Xpred_2, Xpred_3, Xpred_4, Xpred_5, Xpred_6)
#this is important so that its sorted as in the first gamm
setkey(m2.2011,day, aodid)
#rerun the lme on the predictions including the spatial spline (smooth)
Final_pred_2011 <- lme(pred.t32 ~ meanPM10 ,random = list(aodid= ~1 + meanPM10 ),control=lmeControl(opt = "optim"),data= m2.2011 )
m2.2011[, pred.t33 := predict(Final_pred_2011)]
#check correlations
res[res$year=="2011", 'm3.t33'] <- print(summary(lm(pred.m2 ~ pred.t33,data=m2.2011))$r.squared)
#------------------------>>>
#import mod3
data.m3 <- readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod3.AQ.2011.rds")
#for PM10
data.m3 <- data.m3[,c(1,2,5,29:32,52,53),with=FALSE]
data.m3[, bimon := (m + 1) %/% 2]
setkey(data.m3,day, aodid)
data.m3<-data.m3[!is.na(meanPM10)]
#generate m.3 initial pred
data.m3$pred.m3.mix <- predict(Final_pred_2011,data.m3)
#create unique grid
ugrid <-data.m3 %>%
group_by(aodid) %>%
summarise(lat_aod = mean(lat_aod, na.rm=TRUE), long_aod = mean(long_aod, na.rm=TRUE),x_aod_ITM = mean(x_aod_ITM, na.rm=TRUE), y_aod_ITM = mean(y_aod_ITM, na.rm=TRUE))
#### PREDICT Gam part
#split back into bimons to include the gam prediction in final prediction
data.m3_bimon1 <- data.m3[bimon == 1, ]
data.m3_bimon2 <- data.m3[bimon == 2, ]
data.m3_bimon3 <- data.m3[bimon == 3, ]
data.m3_bimon4 <- data.m3[bimon == 4, ]
data.m3_bimon5 <- data.m3[bimon == 5, ]
data.m3_bimon6 <- data.m3[bimon == 6, ]
#addin unique grid to each bimon
uniq_gid_bimon1 <- ugrid
uniq_gid_bimon2 <- ugrid
uniq_gid_bimon3 <- ugrid
uniq_gid_bimon4 <- ugrid
uniq_gid_bimon5 <- ugrid
uniq_gid_bimon6 <- ugrid
#get predictions for Bimon residuals
uniq_gid_bimon1$gpred <- predict.gam(fit2_1,uniq_gid_bimon1)
uniq_gid_bimon2$gpred <- predict.gam(fit2_2,uniq_gid_bimon2)
uniq_gid_bimon3$gpred <- predict.gam(fit2_3,uniq_gid_bimon3)
uniq_gid_bimon4$gpred <- predict.gam(fit2_4,uniq_gid_bimon4)
uniq_gid_bimon5$gpred <- predict.gam(fit2_5,uniq_gid_bimon5)
uniq_gid_bimon6$gpred <- predict.gam(fit2_6,uniq_gid_bimon6)
#merge things back togheter
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> merges
setkey(uniq_gid_bimon1,aodid)
setkey(data.m3_bimon1,aodid)
data.m3_bimon1 <- merge(data.m3_bimon1, uniq_gid_bimon1[,list(aodid,gpred)], all.x = T)
setkey(uniq_gid_bimon2,aodid)
setkey(data.m3_bimon2,aodid)
data.m3_bimon2 <- merge(data.m3_bimon2, uniq_gid_bimon2[,list(aodid,gpred)], all.x = T)
setkey(uniq_gid_bimon3,aodid)
setkey(data.m3_bimon3,aodid)
data.m3_bimon3 <- merge(data.m3_bimon3, uniq_gid_bimon3[,list(aodid,gpred)], all.x = T)
setkey(uniq_gid_bimon4,aodid)
setkey(data.m3_bimon4,aodid)
data.m3_bimon4 <- merge(data.m3_bimon4, uniq_gid_bimon4[,list(aodid,gpred)], all.x = T)
setkey(uniq_gid_bimon5,aodid)
setkey(data.m3_bimon5,aodid)
data.m3_bimon5 <- merge(data.m3_bimon5, uniq_gid_bimon5[,list(aodid,gpred)], all.x = T)
setkey(uniq_gid_bimon6,aodid)
setkey(data.m3_bimon6,aodid)
data.m3_bimon6 <- merge(data.m3_bimon6, uniq_gid_bimon6[,list(aodid,gpred)], all.x = T)
#reattach all parts
mod3 <- rbind(data.m3_bimon1,data.m3_bimon2,data.m3_bimon3,data.m3_bimon4,data.m3_bimon5,data.m3_bimon6)
# create pred.m3
mod3$pred.m3 <-mod3$pred.m3.mix+mod3$gpred
#hist(mod3$pred.m3)
#describe(mod3$pred.m3)
#recode negative into zero
mod3 <- mod3[pred.m3 >= 0]
saveRDS(mod3,"/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod3.AQ.2011.pred.rds")
#clean
keep(mod3,res,rmse, sure=TRUE)
gc()
#########################
#prepare for m3.R2
#########################
#load mod1
mod1<- readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod1.AQ.2011.pred.rds")
mod1[,aodid:= paste(mod1$long_aod,mod1$lat_aod,sep="-")]
mod1<-mod1[,c("aodid","day","PM10","pred.m1","stn"),with=FALSE]
#R2.m3
setkey(mod3,day,aodid)
setkey(mod1,day,aodid)
m1.2011 <- merge(mod1,mod3[, list(day,aodid,pred.m3)], all.x = T)
m3.fit.2011<- summary(lm(PM10~pred.m3,data=m1.2011))
res[res$year=="2011", 'm3.R2'] <- print(summary(lm(PM10~pred.m3,data=m1.2011))$r.squared)
#RMSPE
res[res$year=="2011", 'm3.PE'] <- print(rmse(residuals(m3.fit.2011)))
#spatial
###to check
spatial2011<-m1.2011 %>%
group_by(stn) %>%
summarise(barpm = mean(PM10, na.rm=TRUE), barpred = mean(pred.m3, na.rm=TRUE))
m1.fit.2011.spat<- lm(barpm ~ barpred, data=spatial2011)
res[res$year=="2011", 'm3.R2.s'] <- print(summary(lm(barpm ~ barpred, data=spatial2011))$r.squared)
res[res$year=="2011", 'm3.PE.s'] <- print(rmse(residuals(m1.fit.2011.spat)))
#temporal
tempo2011<-left_join(m1.2011,spatial2011)
tempo2011$delpm <-tempo2011$PM10-tempo2011$barpm
tempo2011$delpred <-tempo2011$pred.m3-tempo2011$barpred
mod_temporal <- lm(delpm ~ delpred, data=tempo2011)
res[res$year=="2011", 'm3.R2.t'] <- print(summary(lm(delpm ~ delpred, data=tempo2011))$r.squared)
#############save midpoint
saveRDS(res, "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/res.AQ.2011.rds")
saveRDS(res, "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/resALL.AQ.rds")
#########################
#import mod2
mod2<- readRDS( "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod2.AQ.2011.pred2.rds")
mod2<-mod2[,c("aodid","day","pred.m2"),with=FALSE]
#----------------> store the best available
mod3best <- mod3[, list(aodid, x_aod_ITM, y_aod_ITM, day, pred.m3)]
setkey(mod3best, day, aodid)
setkey(mod2, day, aodid)
mod3best <- merge(mod3best, mod2[,list(aodid, day, pred.m2)], all.x = T)
#reload mod1
mod1<- readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod1.AQ.2011.pred.rds")
mod1$aodid<-paste(mod1$long_aod,mod1$lat_aod,sep="-")
mod1<-mod1[,c("aodid","day","PM10","pred.m1"),with=FALSE]
setkey(mod1,day,aodid)
mod3best <- merge(mod3best, mod1, all.x = T)
mod3best[,bestpred := pred.m3]
mod3best[!is.na(pred.m2),bestpred := pred.m2]
mod3best[!is.na(pred.m1),bestpred := pred.m1]
#save
saveRDS(mod3best,"/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod3.AQ.2011.FINAL.rds")
#save for GIS
write.csv(mod3best[, list(LTPM = mean(bestpred, na.rm = T),
predvariance = var(bestpred, na.rm = T),
predmin = min(bestpred, na.rm = T),
predmax = max(bestpred, na.rm = T),
npred = sum(!is.na(bestpred)),
npred.m1 = sum(!is.na(pred.m1)),
npred.m2 = sum(!is.na(pred.m2)),
npred.m3 = sum(!is.na(pred.m3)),
x_aod_ITM = x_aod_ITM[1], y_aod_ITM = y_aod_ITM[1]),by=aodid], "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/LTPM3.AQ.2011.csv", row.names = F)
keep(res, sure=TRUE)
c()
###############
#LIBS
###############
library(lme4);library(reshape);library(foreign) ;library(ggplot2);library(plyr);library(data.table);library(reshape2);library(Hmisc);library(mgcv);library(gdata);library(car);library(dplyr);library(ggmap);library(broom);library(splines)
#sourcing
source("/media/NAS/Uni/org/files/Uni/Projects/code/$Rsnips/CV_splits.r")
source("/media/NAS/Uni/org/files/Uni/Projects/code/$Rsnips/rmspe.r")
#if needed load res table
#res<- readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/resALL.AQ.rds")
### import data
m1.2012 <-readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod1.AQ.2012.rds")
#rescale
m1.2012[,elev.s:= scale(elev)]
m1.2012[,tden.s:= scale(tden)]
m1.2012[,pden.s:= scale(pden)]
m1.2012[,dist2A1.s:= scale(dist2A1)]
m1.2012[,dist2water.s:= scale(dist2water)]
m1.2012[,dist2rail.s:= scale(dist2rail)]
m1.2012[,Dist2road.s:= scale(Dist2road)]
m1.2012[,ndvi.s:= scale(ndvi)]
m1.2012[,MeanPbl.s:= scale(MeanPbl)]
m1.2012[,p_ind.s:= scale(p_ind)]
m1.2012[,p_for.s:= scale(p_for)]
m1.2012[,p_farm.s:= scale(p_farm)]
m1.2012[,p_dos.s:= scale(p_dos)]
m1.2012[,p_dev.s:= scale(p_dev)]
m1.2012[,p_os.s:= scale(p_os)]
m1.2012[,tempa.s:= scale(tempa)]
m1.2012[,WDa.s:= scale(WDa)]
m1.2012[,WSa.s:= scale(WSa)]
m1.2012[,RHa.s:= scale(RHa)]
m1.2012[,Raina.s:= scale(Raina)]
m1.2012[,NO2a.s:= scale(NO2a)]
m1.formula <- as.formula(PM10~ aod
+tempa.s+WDa.s+WSa.s+Dust+MeanPbl.s #temporal
+elev.s+tden.s+pden.s+Dist2road.s+ndvi.s #spatial
+p_os.s #+p_dev.s+p_dos.s+p_farm.s+p_for.s+p_ind.s #land use
#+aod*Dust #interactions
+(1+aod|day/reg_num)) #+(1|stn) !!! stn screws up mod3
#full fit
m1.fit.2012 <- lmer(m1.formula,data=m1.2012,weights=normwt)
m1.2012$pred.m1 <- predict(m1.fit.2012)
res[res$year=="2012", 'm1.R2'] <- print(summary(lm(PM10~pred.m1,data=m1.2012))$r.squared)
#RMSPE
res[res$year=="2012", 'm1.PE'] <- print(rmse(residuals(m1.fit.2012)))
#spatial
###to check
spatial2012<-m1.2012 %>%
group_by(stn) %>%
summarise(barpm = mean(PM10, na.rm=TRUE), barpred = mean(pred.m1, na.rm=TRUE))
m1.fit.2012.spat<- lm(barpm ~ barpred, data=spatial2012)
res[res$year=="2012", 'm1.R2.s'] <- print(summary(lm(barpm ~ barpred, data=spatial2012))$r.squared)
res[res$year=="2012", 'm1.PE.s'] <- print(rmse(residuals(m1.fit.2012.spat)))
#temporal
tempo2012<-left_join(m1.2012,spatial2012)
tempo2012$delpm <-tempo2012$PM10-tempo2012$barpm
tempo2012$delpred <-tempo2012$pred.m1-tempo2012$barpred
mod_temporal <- lm(delpm ~ delpred, data=tempo2012)
res[res$year=="2012", 'm1.R2.t'] <- print(summary(lm(delpm ~ delpred, data=tempo2012))$r.squared)
saveRDS(m1.2012,"/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod1.AQ.2012.pred.rds")
#---------------->>>> CV
#s1
splits_s1 <- splitdf(m1.2012)
test_s1 <- splits_s1$testset
train_s1 <- splits_s1$trainset
out_train_s1 <- lmer(m1.formula,data = train_s1,weights=normwt)
test_s1$pred.m1.cv <- predict(object=out_train_s1 ,newdata=test_s1,allow.new.levels=TRUE,re.form=NULL )
test_s1$iter<-"s1"
#s2
splits_s2 <- splitdf(m1.2012)
test_s2 <- splits_s2$testset
train_s2 <- splits_s2$trainset
out_train_s2 <- lmer(m1.formula,data = train_s2,weights=normwt)
test_s2$pred.m1.cv <- predict(object=out_train_s2 ,newdata=test_s2,allow.new.levels=TRUE,re.form=NULL )
test_s2$iter<-"s2"
#s3
splits_s3 <- splitdf(m1.2012)
test_s3 <- splits_s3$testset
train_s3 <- splits_s3$trainset
out_train_s3 <- lmer(m1.formula,data = train_s3,weights=normwt)
test_s3$pred.m1.cv <- predict(object=out_train_s3 ,newdata=test_s3,allow.new.levels=TRUE,re.form=NULL )
test_s3$iter<-"s3"
#s4
splits_s4 <- splitdf(m1.2012)
test_s4 <- splits_s4$testset
train_s4 <- splits_s4$trainset
out_train_s4 <- lmer(m1.formula,data = train_s4,weights=normwt)
test_s4$pred.m1.cv <- predict(object=out_train_s4 ,newdata=test_s4,allow.new.levels=TRUE,re.form=NULL )
test_s4$iter<-"s4"
#s5
splits_s5 <- splitdf(m1.2012)
test_s5 <- splits_s5$testset
train_s5 <- splits_s5$trainset
out_train_s5 <- lmer(m1.formula,data = train_s5,weights=normwt)
test_s5$pred.m1.cv <- predict(object=out_train_s5 ,newdata=test_s5,allow.new.levels=TRUE,re.form=NULL )
test_s5$iter<-"s5"
#s6
splits_s6 <- splitdf(m1.2012)
test_s6 <- splits_s6$testset
train_s6 <- splits_s6$trainset
out_train_s6 <- lmer(m1.formula,data = train_s6,weights=normwt)
test_s6$pred.m1.cv <- predict(object=out_train_s6 ,newdata=test_s6,allow.new.levels=TRUE,re.form=NULL )
test_s6$iter<-"s6"
#s7
splits_s7 <- splitdf(m1.2012)
test_s7 <- splits_s7$testset
train_s7 <- splits_s7$trainset
out_train_s7 <- lmer(m1.formula,data = train_s7,weights=normwt)
test_s7$pred.m1.cv <- predict(object=out_train_s7 ,newdata=test_s7,allow.new.levels=TRUE,re.form=NULL )
test_s7$iter<-"s7"
#s8
splits_s8 <- splitdf(m1.2012)
test_s8 <- splits_s8$testset
train_s8 <- splits_s8$trainset
out_train_s8 <- lmer(m1.formula,data = train_s8,weights=normwt)
test_s8$pred.m1.cv <- predict(object=out_train_s8 ,newdata=test_s8,allow.new.levels=TRUE,re.form=NULL )
test_s8$iter<-"s8"
#s9
splits_s9 <- splitdf(m1.2012)
test_s9 <- splits_s9$testset
train_s9 <- splits_s9$trainset
out_train_s9 <- lmer(m1.formula,data = train_s9,weights=normwt)
test_s9$pred.m1.cv <- predict(object=out_train_s9 ,newdata=test_s9,allow.new.levels=TRUE,re.form=NULL )
test_s9$iter<-"s9"
#s10
splits_s10 <- splitdf(m1.2012)
test_s10 <- splits_s10$testset
train_s10 <- splits_s10$trainset
out_train_s10 <- lmer(m1.formula,data = train_s10,weights=normwt)
test_s10$pred.m1.cv <- predict(object=out_train_s10 ,newdata=test_s10,allow.new.levels=TRUE,re.form=NULL )
test_s10$iter<-"s10"
#BIND 1 dataset
m1.2012.cv<- data.table(rbind(test_s1,test_s2,test_s3,test_s4,test_s5,test_s6,test_s7,test_s8,test_s9, test_s10))
# cleanup (remove from WS) objects from CV
rm(list = ls(pattern = "train_|test_"))
#table updates
m1.fit.2012.cv<-lm(PM10~pred.m1.cv,data=m1.2012.cv)
res[res$year=="2012", 'm1cv.R2'] <- print(summary(lm(PM10~pred.m1.cv,data=m1.2012.cv))$r.squared)
res[res$year=="2012", 'm1cv.I'] <-print(summary(lm(PM10~pred.m1.cv,data=m1.2012.cv))$coef[1,1])
res[res$year=="2012", 'm1cv.I.se'] <-print(summary(lm(PM10~pred.m1.cv,data=m1.2012.cv))$coef[1,2])
res[res$year=="2012", 'm1cv.S'] <-print(summary(lm(PM10~pred.m1.cv,data=m1.2012.cv))$coef[2,1])
res[res$year=="2012", 'm1cv.S.se'] <-print(summary(lm(PM10~pred.m1.cv,data=m1.2012.cv))$coef[2,2])
#RMSPE
res[res$year=="2012", 'm1cv.PE'] <- print(rmse(residuals(m1.fit.2012.cv)))
#spatial
spatial2012.cv<-m1.2012.cv %>%
group_by(stn) %>%
summarise(barpm = mean(PM10, na.rm=TRUE), barpred = mean(pred.m1, na.rm=TRUE))
m1.fit.2012.cv.s <- lm(barpm ~ barpred, data=spatial2012.cv)
res[res$year=="2012", 'm1cv.R2.s'] <- print(summary(lm(barpm ~ barpred, data=spatial2012.cv))$r.squared)
res[res$year=="2012", 'm1cv.PE.s'] <- print(rmse(residuals(m1.fit.2012.cv.s)))
#temporal
tempo2012.cv<-left_join(m1.2012.cv,spatial2012.cv)
tempo2012.cv$delpm <-tempo2012.cv$PM10-tempo2012.cv$barpm
tempo2012.cv$delpred <-tempo2012.cv$pred.m1.cv-tempo2012.cv$barpred
mod_temporal.cv <- lm(delpm ~ delpred, data=tempo2012.cv)
res[res$year=="2012", 'm1cv.R2.t'] <- print(summary(lm(delpm ~ delpred, data=tempo2012.cv))$r.squared)
#-------->>> loc stage
luf<-fread("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN004_LU_full_dataset/local.csv")
setnames(luf,"tden","loc.tden")
setnames(luf,"elev50","loc.elev")
#add 50m LU to CV data
setkey(m1.2012.cv,stn)
setkey(luf,stn)
m1.2012.cv.loc <- merge(m1.2012.cv, luf, all.x = T)
#m1.2012.cv.loc<-na.omit(m1.2012.cv.loc)
#create residual mp3 variable
m1.2012.cv.loc$res.m1<-m1.2012.cv.loc$PM10-m1.2012.cv.loc$pred.m1.cv
#The GAM model
gam.out<-gam(res.m1~s(loc.tden)+s(tden,MeanPbl)+s(loc.tden,WSa)+s(loc_p_os,fx=FALSE,k=4,bs='cr')+s(loc.elev,fx=FALSE,k=4,bs='cr')+s(dA1,fx=FALSE,k=4,bs='cr')+s(dsea,fx=FALSE,k=4,bs='cr'),data=m1.2012.cv.loc)
#plot(bp.model.ps)
#summary(bp.model.ps)
## reg
m1.2012.cv.loc$pred.m1.loc <-predict(gam.out)
m1.2012.cv.loc$pred.m1.both <- m1.2012.cv.loc$pred.m1.cv + m1.2012.cv.loc$pred.m1.loc
res[res$year=="2012", 'm1cv.loc.R2'] <- print(summary(lm(PM10~pred.m1.both,data=m1.2012.cv.loc))$r.squared)
res[res$year=="2012", 'm1cv.loc.I'] <-print(summary(lm(PM10~pred.m1.both,data=m1.2012.cv.loc))$coef[1,1])
res[res$year=="2012", 'm1cv.loc.I.se'] <-print(summary(lm(PM10~pred.m1.both,data=m1.2012.cv.loc))$coef[1,2])
res[res$year=="2012", 'm1cv.loc.S'] <-print(summary(lm(PM10~pred.m1.both,data=m1.2012.cv.loc))$coef[2,1])
res[res$year=="2012", 'm1cv.loc.S.se'] <-print(summary(lm(PM10~pred.m1.both,data=m1.2012.cv.loc))$coef[2,2])
#RMSPE
res[res$year=="2012", 'm1cv.loc.PE'] <- print(rmse(residuals(m1.fit.2012.cv)))
#spatial
spatial2012.cv.loc<-m1.2012.cv.loc %>%
group_by(stn) %>%
summarise(barpm = mean(PM10, na.rm=TRUE), barpred = mean(pred.m1, na.rm=TRUE))
m1.fit.2012.cv.loc.s <- lm(barpm ~ barpred, data=spatial2012.cv.loc)
res[res$year=="2012", 'm1cv.loc.R2.s'] <- print(summary(lm(barpm ~ barpred, data=spatial2012.cv.loc))$r.squared)
res[res$year=="2012", 'm1cv.loc.PE.s'] <- print(rmse(residuals(m1.fit.2012.cv.loc.s)))
#temporal
tempo2012.loc.cv<-left_join(m1.2012.cv.loc,spatial2012.cv.loc)
tempo2012.loc.cv$delpm <-tempo2012.loc.cv$PM10-tempo2012.loc.cv$barpm
tempo2012.loc.cv$delpred <-tempo2012.loc.cv$pred.m1.both-tempo2012.loc.cv$barpred
mod_temporal.loc.cv <- lm(delpm ~ delpred, data=tempo2012.loc.cv)
res[res$year=="2012", 'm1cv.loc.R2.t'] <- print(summary(lm(delpm ~ delpred, data=tempo2012.loc.cv))$r.squared)
#############save midpoint
saveRDS(res, "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/res.AQ.2012.rds")
saveRDS(res, "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/resALL.AQ.rds")
saveRDS(m1.2012.cv.loc,"/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod1.AQ.2012.predCV.rds")
###############
#MOD2
###############
m2.2012<-readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod2.AQ.2012.rds")
m2.2012[,elev.s:= scale(elev)]
m2.2012[,tden.s:= scale(tden)]
m2.2012[,pden.s:= scale(pden)]
m2.2012[,dist2A1.s:= scale(dist2A1)]
m2.2012[,dist2water.s:= scale(dist2water)]
m2.2012[,dist2rail.s:= scale(dist2rail)]
m2.2012[,Dist2road.s:= scale(Dist2road)]
m2.2012[,ndvi.s:= scale(ndvi)]
m2.2012[,MeanPbl.s:= scale(MeanPbl)]
m2.2012[,p_ind.s:= scale(p_ind)]
m2.2012[,p_for.s:= scale(p_for)]
m2.2012[,p_farm.s:= scale(p_farm)]
m2.2012[,p_dos.s:= scale(p_dos)]
m2.2012[,p_dev.s:= scale(p_dev)]
m2.2012[,p_os.s:= scale(p_os)]
m2.2012[,tempa.s:= scale(tempa)]
m2.2012[,WDa.s:= scale(WDa)]
m2.2012[,WSa.s:= scale(WSa)]
m2.2012[,RHa.s:= scale(RHa)]
m2.2012[,Raina.s:= scale(Raina)]
m2.2012[,NO2a.s:= scale(NO2a)]
#generate predictions
m2.2012[, pred.m2 := predict(object=m1.fit.2012,newdata=m2.2012,allow.new.levels=TRUE,re.form=NULL)]
describe(m2.2012$pred.m2)
#delete implossible valuesOA[24~
m2.2012 <- m2.2012[pred.m2 > 0.00000000000001 , ]
m2.2012 <- m2.2012[pred.m2 < 1500 , ]
saveRDS(m2.2012,"/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod2.AQ.2012.pred2.rds")
#-------------->prepare for mod3
m2.2012[, bimon := (m + 1) %/% 2]
setkey(m2.2012,day, aodid)
m2.2012<-m2.2012[!is.na(meanPM10)]
#run the lmer part regressing stage 2 pred Vs mean pm
#in israel check per month, also check 30km band and other methods for meanpm
m2.smooth = lme(pred.m2 ~ meanPM10,random = list(aodid= ~1 + meanPM10),control=lmeControl(opt = "optim"), data= m2.2012 )
#xm2.smooth = lmer(pred.m2 ~ meanPM10+(1+ meanPM10|aodid), data= m2.2012 )
#correlate to see everything from mod2 and the mpm works
m2.2012[, pred.t31 := predict(m2.smooth)]
m2.2012[, resid := residuals(m2.smooth)]
res[res$year=="2012", 'm3.t31'] <- print(summary(lm(pred.m2~pred.t31,data=m2.2012))$r.squared)
#split the files to the separate bi monthly datsets
T2012_bimon1 <- subset(m2.2012 ,m2.2012$bimon == "1")
T2012_bimon2 <- subset(m2.2012 ,m2.2012$bimon == "2")
T2012_bimon3 <- subset(m2.2012 ,m2.2012$bimon == "3")
T2012_bimon4 <- subset(m2.2012 ,m2.2012$bimon == "4")
T2012_bimon5 <- subset(m2.2012 ,m2.2012$bimon == "5")
T2012_bimon6 <- subset(m2.2012 ,m2.2012$bimon == "6")
#run the separate splines (smooth) for x and y for each bimon
#whats the default band (distance) that the spline goes out and uses
fit2_1 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2012_bimon1 )
fit2_2 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2012_bimon2 )
fit2_3 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2012_bimon3 )
fit2_4 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2012_bimon4 )
fit2_5 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2012_bimon5 )
fit2_6 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2012_bimon6 )
#get the predicted-fitted
Xpred_1 <- (T2012_bimon1$pred.t31 - fit2_1$fitted)
Xpred_2 <- (T2012_bimon2$pred.t31 - fit2_2$fitted)
Xpred_3 <- (T2012_bimon3$pred.t31 - fit2_3$fitted)
Xpred_4 <- (T2012_bimon4$pred.t31 - fit2_4$fitted)
Xpred_5 <- (T2012_bimon5$pred.t31 - fit2_5$fitted)
Xpred_6 <- (T2012_bimon6$pred.t31 - fit2_6$fitted)
#remerge to 1 file
m2.2012$pred.t32 <- c( Xpred_1,Xpred_2, Xpred_3, Xpred_4, Xpred_5, Xpred_6)
#this is important so that its sorted as in the first gamm
setkey(m2.2012,day, aodid)
#rerun the lme on the predictions including the spatial spline (smooth)
Final_pred_2012 <- lme(pred.t32 ~ meanPM10 ,random = list(aodid= ~1 + meanPM10 ),control=lmeControl(opt = "optim"),data= m2.2012 )
m2.2012[, pred.t33 := predict(Final_pred_2012)]
#check correlations
res[res$year=="2012", 'm3.t33'] <- print(summary(lm(pred.m2 ~ pred.t33,data=m2.2012))$r.squared)
#------------------------>>>
#import mod3
data.m3 <- readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod3.AQ.2012.rds")
#for PM10
data.m3 <- data.m3[,c(1,2,5,29:32,52,53),with=FALSE]
data.m3[, bimon := (m + 1) %/% 2]
setkey(data.m3,day, aodid)
data.m3<-data.m3[!is.na(meanPM10)]
#generate m.3 initial pred
data.m3$pred.m3.mix <- predict(Final_pred_2012,data.m3)
#create unique grid
ugrid <-data.m3 %>%
group_by(aodid) %>%
summarise(lat_aod = mean(lat_aod, na.rm=TRUE), long_aod = mean(long_aod, na.rm=TRUE),x_aod_ITM = mean(x_aod_ITM, na.rm=TRUE), y_aod_ITM = mean(y_aod_ITM, na.rm=TRUE))
#### PREDICT Gam part
#split back into bimons to include the gam prediction in final prediction
data.m3_bimon1 <- data.m3[bimon == 1, ]
data.m3_bimon2 <- data.m3[bimon == 2, ]
data.m3_bimon3 <- data.m3[bimon == 3, ]
data.m3_bimon4 <- data.m3[bimon == 4, ]
data.m3_bimon5 <- data.m3[bimon == 5, ]
data.m3_bimon6 <- data.m3[bimon == 6, ]
#addin unique grid to each bimon
uniq_gid_bimon1 <- ugrid
uniq_gid_bimon2 <- ugrid
uniq_gid_bimon3 <- ugrid
uniq_gid_bimon4 <- ugrid
uniq_gid_bimon5 <- ugrid
uniq_gid_bimon6 <- ugrid
#get predictions for Bimon residuals
uniq_gid_bimon1$gpred <- predict.gam(fit2_1,uniq_gid_bimon1)
uniq_gid_bimon2$gpred <- predict.gam(fit2_2,uniq_gid_bimon2)
uniq_gid_bimon3$gpred <- predict.gam(fit2_3,uniq_gid_bimon3)
uniq_gid_bimon4$gpred <- predict.gam(fit2_4,uniq_gid_bimon4)
uniq_gid_bimon5$gpred <- predict.gam(fit2_5,uniq_gid_bimon5)
uniq_gid_bimon6$gpred <- predict.gam(fit2_6,uniq_gid_bimon6)
#merge things back togheter
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> merges
setkey(uniq_gid_bimon1,aodid)
setkey(data.m3_bimon1,aodid)
data.m3_bimon1 <- merge(data.m3_bimon1, uniq_gid_bimon1[,list(aodid,gpred)], all.x = T)
setkey(uniq_gid_bimon2,aodid)
setkey(data.m3_bimon2,aodid)
data.m3_bimon2 <- merge(data.m3_bimon2, uniq_gid_bimon2[,list(aodid,gpred)], all.x = T)
setkey(uniq_gid_bimon3,aodid)
setkey(data.m3_bimon3,aodid)
data.m3_bimon3 <- merge(data.m3_bimon3, uniq_gid_bimon3[,list(aodid,gpred)], all.x = T)
setkey(uniq_gid_bimon4,aodid)
setkey(data.m3_bimon4,aodid)
data.m3_bimon4 <- merge(data.m3_bimon4, uniq_gid_bimon4[,list(aodid,gpred)], all.x = T)
setkey(uniq_gid_bimon5,aodid)
setkey(data.m3_bimon5,aodid)
data.m3_bimon5 <- merge(data.m3_bimon5, uniq_gid_bimon5[,list(aodid,gpred)], all.x = T)
setkey(uniq_gid_bimon6,aodid)
setkey(data.m3_bimon6,aodid)
data.m3_bimon6 <- merge(data.m3_bimon6, uniq_gid_bimon6[,list(aodid,gpred)], all.x = T)
#reattach all parts
mod3 <- rbind(data.m3_bimon1,data.m3_bimon2,data.m3_bimon3,data.m3_bimon4,data.m3_bimon5,data.m3_bimon6)
# create pred.m3
mod3$pred.m3 <-mod3$pred.m3.mix+mod3$gpred
#hist(mod3$pred.m3)
#describe(mod3$pred.m3)
#recode negative into zero
mod3 <- mod3[pred.m3 >= 0]
saveRDS(mod3,"/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod3.AQ.2012.pred.rds")
#clean
keep(mod3,res,rmse, sure=TRUE)
gc()
#########################
#prepare for m3.R2
#########################
#load mod1
mod1<- readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod1.AQ.2012.pred.rds")
mod1[,aodid:= paste(mod1$long_aod,mod1$lat_aod,sep="-")]
mod1<-mod1[,c("aodid","day","PM10","pred.m1","stn"),with=FALSE]
#R2.m3
setkey(mod3,day,aodid)
setkey(mod1,day,aodid)
m1.2012 <- merge(mod1,mod3[, list(day,aodid,pred.m3)], all.x = T)
m3.fit.2012<- summary(lm(PM10~pred.m3,data=m1.2012))
res[res$year=="2012", 'm3.R2'] <- print(summary(lm(PM10~pred.m3,data=m1.2012))$r.squared)
#RMSPE
res[res$year=="2012", 'm3.PE'] <- print(rmse(residuals(m3.fit.2012)))
#spatial
###to check
spatial2012<-m1.2012 %>%
group_by(stn) %>%
summarise(barpm = mean(PM10, na.rm=TRUE), barpred = mean(pred.m3, na.rm=TRUE))
m1.fit.2012.spat<- lm(barpm ~ barpred, data=spatial2012)
res[res$year=="2012", 'm3.R2.s'] <- print(summary(lm(barpm ~ barpred, data=spatial2012))$r.squared)
res[res$year=="2012", 'm3.PE.s'] <- print(rmse(residuals(m1.fit.2012.spat)))
#temporal
tempo2012<-left_join(m1.2012,spatial2012)
tempo2012$delpm <-tempo2012$PM10-tempo2012$barpm
tempo2012$delpred <-tempo2012$pred.m3-tempo2012$barpred
mod_temporal <- lm(delpm ~ delpred, data=tempo2012)
res[res$year=="2012", 'm3.R2.t'] <- print(summary(lm(delpm ~ delpred, data=tempo2012))$r.squared)
#############save midpoint
saveRDS(res, "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/res.AQ.2012.rds")
saveRDS(res, "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/resALL.AQ.rds")
#########################
#import mod2
mod2<- readRDS( "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod2.AQ.2012.pred2.rds")
mod2<-mod2[,c("aodid","day","pred.m2"),with=FALSE]
#----------------> store the best available
mod3best <- mod3[, list(aodid, x_aod_ITM, y_aod_ITM, day, pred.m3)]
setkey(mod3best, day, aodid)
setkey(mod2, day, aodid)
mod3best <- merge(mod3best, mod2[,list(aodid, day, pred.m2)], all.x = T)
#reload mod1
mod1<- readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod1.AQ.2012.pred.rds")
mod1$aodid<-paste(mod1$long_aod,mod1$lat_aod,sep="-")
mod1<-mod1[,c("aodid","day","PM10","pred.m1"),with=FALSE]
setkey(mod1,day,aodid)
mod3best <- merge(mod3best, mod1, all.x = T)
mod3best[,bestpred := pred.m3]
mod3best[!is.na(pred.m2),bestpred := pred.m2]
mod3best[!is.na(pred.m1),bestpred := pred.m1]
#save
saveRDS(mod3best,"/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod3.AQ.2012.FINAL.rds")
#save for GIS
write.csv(mod3best[, list(LTPM = mean(bestpred, na.rm = T),
predvariance = var(bestpred, na.rm = T),
predmin = min(bestpred, na.rm = T),
predmax = max(bestpred, na.rm = T),
npred = sum(!is.na(bestpred)),
npred.m1 = sum(!is.na(pred.m1)),
npred.m2 = sum(!is.na(pred.m2)),
npred.m3 = sum(!is.na(pred.m3)),
x_aod_ITM = x_aod_ITM[1], y_aod_ITM = y_aod_ITM[1]),by=aodid], "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/LTPM3.AQ.2012.csv", row.names = F)
keep(res, sure=TRUE)
| /Uni/Projects/code/P046.Israel_MAIAC/CNNEW/PM25_allM.r | no_license | zeltak/org | R | false | false | 207,141 | r | ###############
#LIBS
###############
library(lme4);library(reshape);library(foreign) ;library(ggplot2);library(plyr);library(data.table);library(reshape2);library(Hmisc);library(mgcv);library(gdata);library(car);library(dplyr);library(ggmap);library(broom);library(splines)
#sourcing
source("/media/NAS/Uni/org/files/Uni/Projects/code/$Rsnips/CV_splits.r")
source("/media/NAS/Uni/org/files/Uni/Projects/code/$Rsnips/rmspe.r")
#if needed load res table
#res<- readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/resALL.AQ.rds")
#-------------------->> RES TABLE
res <- matrix(nrow=10, ncol=45)
res <- data.frame(res)
colnames(res) <- c(
"year"
,"m1.R2","m1.PE","m1.R2.s","m1.R2.t","m1.PE.s" #full model
,"m1cv.R2","m1cv.I","m1cv.I.se","m1cv.S","m1cv.S.se","m1cv.PE","m1cv.R2.s","m1cv.R2.t","m1cv.PE.s" #mod1 CV
,"m1cv.loc.R2","m1cv.loc.I","m1cv.loc.I.se","m1cv.loc.S","m1cv.loc.S.se","m1cv.loc.PE","m1cv.loc.PE.s","m1cv.loc.R2.s","m1cv.loc.R2.t"#loc m1
,"m2.R2" #mod2
,"m3.t31","m3.t33" #mod3 tests
,"m3.R2","m3.PE","m3.R2.s","m3.R2.t","m3.PE.s"#mod3
,"XX","XX","XX","XX","XX","XX","XX","XX","XX","XX","XX","XX","XX" )
res$year <- c(2003:2012)
### import data
m1.2003 <-readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod1.AQ.2003.rds")
})
raWDaf
raWDaf<-as.data.table(raWDaf)
bad<- raWDaf[R2< 0.01]
bad[,badid := paste(stn,sep="-")]
#################BAD STN
m1.2003[,badid := paste(stn,sep="-")]
####Take out bad stations
m1.2003 <- m1.2003[!(m1.2003$badid %in% bad$badid), ]
#scale vars
m1.2003[,elev.s:= scale(elev)]
m1.2003[,tden.s:= scale(tden)]
m1.2003[,pden.s:= scale(pden)]
m1.2003[,dist2A1.s:= scale(dist2A1)]
m1.2003[,dist2water.s:= scale(dist2water)]
m1.2003[,dist2rail.s:= scale(dist2rail)]
m1.2003[,Dist2road.s:= scale(Dist2road)]
m1.2003[,ndvi.s:= scale(ndvi)]
m1.2003[,MeanPbl.s:= scale(MeanPbl)]
m1.2003[,p_ind.s:= scale(p_ind)]
m1.2003[,p_for.s:= scale(p_for)]
m1.2003[,p_farm.s:= scale(p_farm)]
m1.2003[,p_dos.s:= scale(p_dos)]
m1.2003[,p_dev.s:= scale(p_dev)]
m1.2003[,p_os.s:= scale(p_os)]
m1.2003[,tempa.s:= scale(tempa)]
m1.2003[,WDa.s:= scale(WDa)]
m1.2003[,WSa.s:= scale(WSa)]
m1.2003[,RHa.s:= scale(RHa)]
m1.2003[,Raina.s:= scale(Raina)]
m1.2003[,NO2a.s:= scale(NO2a)]
m1.formula <- as.formula(PM10~ aod
+tempa.s+WDa.s+WSa.s+Dust+MeanPbl.s #temporal
+elev.s+tden.s+pden.s+Dist2road.s+ndvi.s #spatial
+p_os.s #+p_dev.s+p_dos.s+p_farm.s+p_for.s+p_ind.s #land use
#+aod*Dust #interactions
+(1+aod|day/reg_num)) #+(1|stn) !!! stn screws up mod3
#full fit
m1.fit.2003 <- lmer(m1.formula,data=m1.2003,weights=normwt)
m1.2003$pred.m1 <- predict(m1.fit.2003)
res[res$year=="2003", 'm1.R2'] <- print(summary(lm(PM10~pred.m1,data=m1.2003))$r.squared)
#RMSPE
res[res$year=="2003", 'm1.PE'] <- print(rmse(residuals(m1.fit.2003)))
#spatial
###to check
spatial2003<-m1.2003 %>%
group_by(stn) %>%
summarise(barpm = mean(PM10, na.rm=TRUE), barpred = mean(pred.m1, na.rm=TRUE))
m1.fit.2003.spat<- lm(barpm ~ barpred, data=spatial2003)
res[res$year=="2003", 'm1.R2.s'] <- print(summary(lm(barpm ~ barpred, data=spatial2003))$r.squared)
res[res$year=="2003", 'm1.PE.s'] <- print(rmse(residuals(m1.fit.2003.spat)))
#temporal
tempo2003<-left_join(m1.2003,spatial2003)
tempo2003$delpm <-tempo2003$PM10-tempo2003$barpm
tempo2003$delpred <-tempo2003$pred.m1-tempo2003$barpred
mod_temporal <- lm(delpm ~ delpred, data=tempo2003)
res[res$year=="2003", 'm1.R2.t'] <- print(summary(lm(delpm ~ delpred, data=tempo2003))$r.squared)
saveRDS(m1.2003,"/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod1.AQ.2003.pred.rds")
#---------------->>>> CV
#s1
splits_s1 <- splitdf(m1.2003)
test_s1 <- splits_s1$testset
train_s1 <- splits_s1$trainset
out_train_s1 <- lmer(m1.formula,data = train_s1,weights=normwt)
test_s1$pred.m1.cv <- predict(object=out_train_s1 ,newdata=test_s1,allow.new.levels=TRUE,re.form=NULL )
test_s1$iter<-"s1"
#s2
splits_s2 <- splitdf(m1.2003)
test_s2 <- splits_s2$testset
train_s2 <- splits_s2$trainset
out_train_s2 <- lmer(m1.formula,data = train_s2,weights=normwt)
test_s2$pred.m1.cv <- predict(object=out_train_s2 ,newdata=test_s2,allow.new.levels=TRUE,re.form=NULL )
test_s2$iter<-"s2"
#s3
splits_s3 <- splitdf(m1.2003)
test_s3 <- splits_s3$testset
train_s3 <- splits_s3$trainset
out_train_s3 <- lmer(m1.formula,data = train_s3,weights=normwt)
test_s3$pred.m1.cv <- predict(object=out_train_s3 ,newdata=test_s3,allow.new.levels=TRUE,re.form=NULL )
test_s3$iter<-"s3"
#s4
splits_s4 <- splitdf(m1.2003)
test_s4 <- splits_s4$testset
train_s4 <- splits_s4$trainset
out_train_s4 <- lmer(m1.formula,data = train_s4,weights=normwt)
test_s4$pred.m1.cv <- predict(object=out_train_s4 ,newdata=test_s4,allow.new.levels=TRUE,re.form=NULL )
test_s4$iter<-"s4"
#s5
splits_s5 <- splitdf(m1.2003)
test_s5 <- splits_s5$testset
train_s5 <- splits_s5$trainset
out_train_s5 <- lmer(m1.formula,data = train_s5,weights=normwt)
test_s5$pred.m1.cv <- predict(object=out_train_s5 ,newdata=test_s5,allow.new.levels=TRUE,re.form=NULL )
test_s5$iter<-"s5"
#s6
splits_s6 <- splitdf(m1.2003)
test_s6 <- splits_s6$testset
train_s6 <- splits_s6$trainset
out_train_s6 <- lmer(m1.formula,data = train_s6,weights=normwt)
test_s6$pred.m1.cv <- predict(object=out_train_s6 ,newdata=test_s6,allow.new.levels=TRUE,re.form=NULL )
test_s6$iter<-"s6"
#s7
splits_s7 <- splitdf(m1.2003)
test_s7 <- splits_s7$testset
train_s7 <- splits_s7$trainset
out_train_s7 <- lmer(m1.formula,data = train_s7,weights=normwt)
test_s7$pred.m1.cv <- predict(object=out_train_s7 ,newdata=test_s7,allow.new.levels=TRUE,re.form=NULL )
test_s7$iter<-"s7"
#s8
splits_s8 <- splitdf(m1.2003)
test_s8 <- splits_s8$testset
train_s8 <- splits_s8$trainset
out_train_s8 <- lmer(m1.formula,data = train_s8,weights=normwt)
test_s8$pred.m1.cv <- predict(object=out_train_s8 ,newdata=test_s8,allow.new.levels=TRUE,re.form=NULL )
test_s8$iter<-"s8"
#s9
splits_s9 <- splitdf(m1.2003)
test_s9 <- splits_s9$testset
train_s9 <- splits_s9$trainset
out_train_s9 <- lmer(m1.formula,data = train_s9,weights=normwt)
test_s9$pred.m1.cv <- predict(object=out_train_s9 ,newdata=test_s9,allow.new.levels=TRUE,re.form=NULL )
test_s9$iter<-"s9"
#s10
splits_s10 <- splitdf(m1.2003)
test_s10 <- splits_s10$testset
train_s10 <- splits_s10$trainset
out_train_s10 <- lmer(m1.formula,data = train_s10,weights=normwt)
test_s10$pred.m1.cv <- predict(object=out_train_s10 ,newdata=test_s10,allow.new.levels=TRUE,re.form=NULL )
test_s10$iter<-"s10"
#BIND 1 dataset
m1.2003.cv<- data.table(rbind(test_s1,test_s2,test_s3,test_s4,test_s5,test_s6,test_s7,test_s8,test_s9, test_s10))
# cleanup (remove from WS) objects from CV
rm(list = ls(pattern = "train_|test_"))
#table updates
m1.fit.2003.cv<-lm(PM10~pred.m1.cv,data=m1.2003.cv)
res[res$year=="2003", 'm1cv.R2'] <- print(summary(lm(PM10~pred.m1.cv,data=m1.2003.cv))$r.squared)
res[res$year=="2003", 'm1cv.I'] <-print(summary(lm(PM10~pred.m1.cv,data=m1.2003.cv))$coef[1,1])
res[res$year=="2003", 'm1cv.I.se'] <-print(summary(lm(PM10~pred.m1.cv,data=m1.2003.cv))$coef[1,2])
res[res$year=="2003", 'm1cv.S'] <-print(summary(lm(PM10~pred.m1.cv,data=m1.2003.cv))$coef[2,1])
res[res$year=="2003", 'm1cv.S.se'] <-print(summary(lm(PM10~pred.m1.cv,data=m1.2003.cv))$coef[2,2])
#RMSPE
res[res$year=="2003", 'm1cv.PE'] <- print(rmse(residuals(m1.fit.2003.cv)))
#spatial
spatial2003.cv<-m1.2003.cv %>%
group_by(stn) %>%
summarise(barpm = mean(PM10, na.rm=TRUE), barpred = mean(pred.m1, na.rm=TRUE))
m1.fit.2003.cv.s <- lm(barpm ~ barpred, data=spatial2003.cv)
res[res$year=="2003", 'm1cv.R2.s'] <- print(summary(lm(barpm ~ barpred, data=spatial2003.cv))$r.squared)
res[res$year=="2003", 'm1cv.PE.s'] <- print(rmse(residuals(m1.fit.2003.cv.s)))
#temporal
tempo2003.cv<-left_join(m1.2003.cv,spatial2003.cv)
tempo2003.cv$delpm <-tempo2003.cv$PM10-tempo2003.cv$barpm
tempo2003.cv$delpred <-tempo2003.cv$pred.m1.cv-tempo2003.cv$barpred
mod_temporal.cv <- lm(delpm ~ delpred, data=tempo2003.cv)
res[res$year=="2003", 'm1cv.R2.t'] <- print(summary(lm(delpm ~ delpred, data=tempo2003.cv))$r.squared)
#-------->>> loc stage
luf<-fread("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN004_LU_full_dataset/local.csv")
setnames(luf,"tden","loc.tden")
setnames(luf,"elev50","loc.elev")
#add 50m LU to CV data
setkey(m1.2003.cv,stn)
setkey(luf,stn)
m1.2003.cv.loc <- merge(m1.2003.cv, luf, all.x = T)
#m1.2003.cv.loc<-na.omit(m1.2003.cv.loc)
#create residual mp3 variable
m1.2003.cv.loc$res.m1<-m1.2003.cv.loc$PM10-m1.2003.cv.loc$pred.m1.cv
#The GAM model
gam.out<-gam(res.m1~s(loc.tden)+s(tden,MeanPbl)+s(loc.tden,WSa)+s(loc_p_os,fx=FALSE,k=4,bs='cr')+s(loc.elev,fx=FALSE,k=4,bs='cr')+s(dA1,fx=FALSE,k=4,bs='cr')+s(dsea,fx=FALSE,k=4,bs='cr'),data=m1.2003.cv.loc)
#plot(bp.model.ps)
#summary(bp.model.ps)
## reg
m1.2003.cv.loc$pred.m1.loc <-predict(gam.out)
m1.2003.cv.loc$pred.m1.both <- m1.2003.cv.loc$pred.m1.cv + m1.2003.cv.loc$pred.m1.loc
res[res$year=="2003", 'm1cv.loc.R2'] <- print(summary(lm(PM10~pred.m1.both,data=m1.2003.cv.loc))$r.squared)
res[res$year=="2003", 'm1cv.loc.I'] <-print(summary(lm(PM10~pred.m1.both,data=m1.2003.cv.loc))$coef[1,1])
res[res$year=="2003", 'm1cv.loc.I.se'] <-print(summary(lm(PM10~pred.m1.both,data=m1.2003.cv.loc))$coef[1,2])
res[res$year=="2003", 'm1cv.loc.S'] <-print(summary(lm(PM10~pred.m1.both,data=m1.2003.cv.loc))$coef[2,1])
res[res$year=="2003", 'm1cv.loc.S.se'] <-print(summary(lm(PM10~pred.m1.both,data=m1.2003.cv.loc))$coef[2,2])
#RMSPE
res[res$year=="2003", 'm1cv.loc.PE'] <- print(rmse(residuals(m1.fit.2003.cv)))
#spatial
spatial2003.cv.loc<-m1.2003.cv.loc %>%
group_by(stn) %>%
summarise(barpm = mean(PM10, na.rm=TRUE), barpred = mean(pred.m1, na.rm=TRUE))
m1.fit.2003.cv.loc.s <- lm(barpm ~ barpred, data=spatial2003.cv.loc)
res[res$year=="2003", 'm1cv.loc.R2.s'] <- print(summary(lm(barpm ~ barpred, data=spatial2003.cv.loc))$r.squared)
res[res$year=="2003", 'm1cv.loc.PE.s'] <- print(rmse(residuals(m1.fit.2003.cv.loc.s)))
#temporal
tempo2003.loc.cv<-left_join(m1.2003.cv.loc,spatial2003.cv.loc)
tempo2003.loc.cv$delpm <-tempo2003.loc.cv$PM10-tempo2003.loc.cv$barpm
tempo2003.loc.cv$delpred <-tempo2003.loc.cv$pred.m1.both-tempo2003.loc.cv$barpred
mod_temporal.loc.cv <- lm(delpm ~ delpred, data=tempo2003.loc.cv)
res[res$year=="2003", 'm1cv.loc.R2.t'] <- print(summary(lm(delpm ~ delpred, data=tempo2003.loc.cv))$r.squared)
#############save midpoint
saveRDS(res, "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/res.AQ.2003.rds")
saveRDS(res, "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/resALL.AQ.rds")
saveRDS(m1.2003.cv.loc,"/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod1.AQ.2003.predCV.rds")
###############
#MOD2
###############
m2.2003<-readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod2.AQ.2003.rds")
m2.2003[,elev.s:= scale(elev)]
m2.2003[,tden.s:= scale(tden)]
m2.2003[,pden.s:= scale(pden)]
m2.2003[,dist2A1.s:= scale(dist2A1)]
m2.2003[,dist2water.s:= scale(dist2water)]
m2.2003[,dist2rail.s:= scale(dist2rail)]
m2.2003[,Dist2road.s:= scale(Dist2road)]
m2.2003[,ndvi.s:= scale(ndvi)]
m2.2003[,MeanPbl.s:= scale(MeanPbl)]
m2.2003[,p_ind.s:= scale(p_ind)]
m2.2003[,p_for.s:= scale(p_for)]
m2.2003[,p_farm.s:= scale(p_farm)]
m2.2003[,p_dos.s:= scale(p_dos)]
m2.2003[,p_dev.s:= scale(p_dev)]
m2.2003[,p_os.s:= scale(p_os)]
m2.2003[,tempa.s:= scale(tempa)]
m2.2003[,WDa.s:= scale(WDa)]
m2.2003[,WSa.s:= scale(WSa)]
m2.2003[,RHa.s:= scale(RHa)]
m2.2003[,Raina.s:= scale(Raina)]
m2.2003[,NO2a.s:= scale(NO2a)]
#generate predictions
m2.2003[, pred.m2 := predict(object=m1.fit.2003,newdata=m2.2003,allow.new.levels=TRUE,re.form=NULL)]
describe(m2.2003$pred.m2)
#delete implossible valuesOA[24~
m2.2003 <- m2.2003[pred.m2 > 0.00000000000001 , ]
m2.2003 <- m2.2003[pred.m2 < 1500 , ]
saveRDS(m2.2003,"/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod2.AQ.2003.pred2.rds")
#-------------->prepare for mod3
m2.2003[, bimon := (m + 1) %/% 2]
setkey(m2.2003,day, aodid)
m2.2003<-m2.2003[!is.na(meanPM10)]
#run the lmer part regressing stage 2 pred Vs mean pm
#in israel check per month, also check 30km band and other methods for meanpm
m2.smooth = lme(pred.m2 ~ meanPM10,random = list(aodid= ~1 + meanPM10),control=lmeControl(opt = "optim"), data= m2.2003 )
#xm2.smooth = lmer(pred.m2 ~ meanPM10+(1+ meanPM10|aodid), data= m2.2003 )
#correlate to see everything from mod2 and the mpm works
m2.2003[, pred.t31 := predict(m2.smooth)]
m2.2003[, resid := residuals(m2.smooth)]
res[res$year=="2003", 'm3.t31'] <- print(summary(lm(pred.m2~pred.t31,data=m2.2003))$r.squared)
#split the files to the separate bi monthly datsets
T2003_bimon1 <- subset(m2.2003 ,m2.2003$bimon == "1")
T2003_bimon2 <- subset(m2.2003 ,m2.2003$bimon == "2")
T2003_bimon3 <- subset(m2.2003 ,m2.2003$bimon == "3")
T2003_bimon4 <- subset(m2.2003 ,m2.2003$bimon == "4")
T2003_bimon5 <- subset(m2.2003 ,m2.2003$bimon == "5")
T2003_bimon6 <- subset(m2.2003 ,m2.2003$bimon == "6")
#run the separate splines (smooth) for x and y for each bimon
#whats the default band (distance) that the spline goes out and uses
fit2_1 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2003_bimon1 )
fit2_2 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2003_bimon2 )
fit2_3 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2003_bimon3 )
fit2_4 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2003_bimon4 )
fit2_5 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2003_bimon5 )
fit2_6 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2003_bimon6 )
#get the predicted-fitted
Xpred_1 <- (T2003_bimon1$pred.t31 - fit2_1$fitted)
Xpred_2 <- (T2003_bimon2$pred.t31 - fit2_2$fitted)
Xpred_3 <- (T2003_bimon3$pred.t31 - fit2_3$fitted)
Xpred_4 <- (T2003_bimon4$pred.t31 - fit2_4$fitted)
Xpred_5 <- (T2003_bimon5$pred.t31 - fit2_5$fitted)
Xpred_6 <- (T2003_bimon6$pred.t31 - fit2_6$fitted)
#remerge to 1 file
m2.2003$pred.t32 <- c( Xpred_1,Xpred_2, Xpred_3, Xpred_4, Xpred_5, Xpred_6)
#this is important so that its sorted as in the first gamm
setkey(m2.2003,day, aodid)
#rerun the lme on the predictions including the spatial spline (smooth)
Final_pred_2003 <- lme(pred.t32 ~ meanPM10 ,random = list(aodid= ~1 + meanPM10 ),control=lmeControl(opt = "optim"),data= m2.2003 )
m2.2003[, pred.t33 := predict(Final_pred_2003)]
#check correlations
res[res$year=="2003", 'm3.t33'] <- print(summary(lm(pred.m2 ~ pred.t33,data=m2.2003))$r.squared)
#------------------------>>>
#import mod3
data.m3 <- readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod3.AQ.2003.rds")
#for PM10
data.m3 <- data.m3[,c(1,2,5,29:32,52,53),with=FALSE]
data.m3[, bimon := (m + 1) %/% 2]
setkey(data.m3,day, aodid)
data.m3<-data.m3[!is.na(meanPM10)]
#generate m.3 initial pred
data.m3$pred.m3.mix <- predict(Final_pred_2003,data.m3)
#create unique grid
ugrid <-data.m3 %>%
group_by(aodid) %>%
summarise(lat_aod = mean(lat_aod, na.rm=TRUE), long_aod = mean(long_aod, na.rm=TRUE),x_aod_ITM = mean(x_aod_ITM, na.rm=TRUE), y_aod_ITM = mean(y_aod_ITM, na.rm=TRUE))
#### PREDICT Gam part
#split back into bimons to include the gam prediction in final prediction
data.m3_bimon1 <- data.m3[bimon == 1, ]
data.m3_bimon2 <- data.m3[bimon == 2, ]
data.m3_bimon3 <- data.m3[bimon == 3, ]
data.m3_bimon4 <- data.m3[bimon == 4, ]
data.m3_bimon5 <- data.m3[bimon == 5, ]
data.m3_bimon6 <- data.m3[bimon == 6, ]
#addin unique grid to each bimon
uniq_gid_bimon1 <- ugrid
uniq_gid_bimon2 <- ugrid
uniq_gid_bimon3 <- ugrid
uniq_gid_bimon4 <- ugrid
uniq_gid_bimon5 <- ugrid
uniq_gid_bimon6 <- ugrid
#get predictions for Bimon residuals
uniq_gid_bimon1$gpred <- predict.gam(fit2_1,uniq_gid_bimon1)
uniq_gid_bimon2$gpred <- predict.gam(fit2_2,uniq_gid_bimon2)
uniq_gid_bimon3$gpred <- predict.gam(fit2_3,uniq_gid_bimon3)
uniq_gid_bimon4$gpred <- predict.gam(fit2_4,uniq_gid_bimon4)
uniq_gid_bimon5$gpred <- predict.gam(fit2_5,uniq_gid_bimon5)
uniq_gid_bimon6$gpred <- predict.gam(fit2_6,uniq_gid_bimon6)
#merge things back togheter
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> merges
setkey(uniq_gid_bimon1,aodid)
setkey(data.m3_bimon1,aodid)
data.m3_bimon1 <- merge(data.m3_bimon1, uniq_gid_bimon1[,list(aodid,gpred)], all.x = T)
setkey(uniq_gid_bimon2,aodid)
setkey(data.m3_bimon2,aodid)
data.m3_bimon2 <- merge(data.m3_bimon2, uniq_gid_bimon2[,list(aodid,gpred)], all.x = T)
setkey(uniq_gid_bimon3,aodid)
setkey(data.m3_bimon3,aodid)
data.m3_bimon3 <- merge(data.m3_bimon3, uniq_gid_bimon3[,list(aodid,gpred)], all.x = T)
setkey(uniq_gid_bimon4,aodid)
setkey(data.m3_bimon4,aodid)
data.m3_bimon4 <- merge(data.m3_bimon4, uniq_gid_bimon4[,list(aodid,gpred)], all.x = T)
setkey(uniq_gid_bimon5,aodid)
setkey(data.m3_bimon5,aodid)
data.m3_bimon5 <- merge(data.m3_bimon5, uniq_gid_bimon5[,list(aodid,gpred)], all.x = T)
setkey(uniq_gid_bimon6,aodid)
setkey(data.m3_bimon6,aodid)
data.m3_bimon6 <- merge(data.m3_bimon6, uniq_gid_bimon6[,list(aodid,gpred)], all.x = T)
#reattach all parts
mod3 <- rbind(data.m3_bimon1,data.m3_bimon2,data.m3_bimon3,data.m3_bimon4,data.m3_bimon5,data.m3_bimon6)
# create pred.m3
mod3$pred.m3 <-mod3$pred.m3.mix+mod3$gpred
#hist(mod3$pred.m3)
#describe(mod3$pred.m3)
#recode negative into zero
mod3 <- mod3[pred.m3 >= 0]
saveRDS(mod3,"/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod3.AQ.2003.pred.rds")
#clean
keep(mod3,res,rmse, sure=TRUE)
gc()
#########################
#prepare for m3.R2
#########################
#load mod1
mod1<- readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod1.AQ.2003.pred.rds")
mod1[,aodid:= paste(mod1$long_aod,mod1$lat_aod,sep="-")]
mod1<-mod1[,c("aodid","day","PM10","pred.m1","stn"),with=FALSE]
#R2.m3
setkey(mod3,day,aodid)
setkey(mod1,day,aodid)
m1.2003 <- merge(mod1,mod3[, list(day,aodid,pred.m3)], all.x = T)
m3.fit.2003<- summary(lm(PM10~pred.m3,data=m1.2003))
res[res$year=="2003", 'm3.R2'] <- print(summary(lm(PM10~pred.m3,data=m1.2003))$r.squared)
#RMSPE
res[res$year=="2003", 'm3.PE'] <- print(rmse(residuals(m3.fit.2003)))
#spatial
###to check
spatial2003<-m1.2003 %>%
group_by(stn) %>%
summarise(barpm = mean(PM10, na.rm=TRUE), barpred = mean(pred.m3, na.rm=TRUE))
m1.fit.2003.spat<- lm(barpm ~ barpred, data=spatial2003)
res[res$year=="2003", 'm3.R2.s'] <- print(summary(lm(barpm ~ barpred, data=spatial2003))$r.squared)
res[res$year=="2003", 'm3.PE.s'] <- print(rmse(residuals(m1.fit.2003.spat)))
#temporal
tempo2003<-left_join(m1.2003,spatial2003)
tempo2003$delpm <-tempo2003$PM10-tempo2003$barpm
tempo2003$delpred <-tempo2003$pred.m3-tempo2003$barpred
mod_temporal <- lm(delpm ~ delpred, data=tempo2003)
res[res$year=="2003", 'm3.R2.t'] <- print(summary(lm(delpm ~ delpred, data=tempo2003))$r.squared)
#############save midpoint
saveRDS(res, "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/res.AQ.2003.rds")
saveRDS(res, "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/resALL.AQ.rds")
#########################
#import mod2
mod2<- readRDS( "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod2.AQ.2003.pred2.rds")
mod2<-mod2[,c("aodid","day","pred.m2"),with=FALSE]
#----------------> store the best available
mod3best <- mod3[, list(aodid, x_aod_ITM, y_aod_ITM, day, pred.m3)]
setkey(mod3best, day, aodid)
setkey(mod2, day, aodid)
mod3best <- merge(mod3best, mod2[,list(aodid, day, pred.m2)], all.x = T)
#reload mod1
mod1<- readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod1.AQ.2003.pred.rds")
mod1$aodid<-paste(mod1$long_aod,mod1$lat_aod,sep="-")
mod1<-mod1[,c("aodid","day","PM10","pred.m1"),with=FALSE]
setkey(mod1,day,aodid)
mod3best <- merge(mod3best, mod1, all.x = T)
mod3best[,bestpred := pred.m3]
mod3best[!is.na(pred.m2),bestpred := pred.m2]
mod3best[!is.na(pred.m1),bestpred := pred.m1]
#save
saveRDS(mod3best,"/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod3.AQ.2003.FINAL.rds")
#save for GIS
write.csv(mod3best[, list(LTPM = mean(bestpred, na.rm = T),
predvariance = var(bestpred, na.rm = T),
predmin = min(bestpred, na.rm = T),
predmax = max(bestpred, na.rm = T),
npred = sum(!is.na(bestpred)),
npred.m1 = sum(!is.na(pred.m1)),
npred.m2 = sum(!is.na(pred.m2)),
npred.m3 = sum(!is.na(pred.m3)),
x_aod_ITM = x_aod_ITM[1], y_aod_ITM = y_aod_ITM[1]),by=aodid], "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/LTPM3.AQ.2003.csv", row.names = F)
keep(res, sure=TRUE)
c()
LIBS
library(lme4);library(reshape);library(foreign) ;library(ggplot2);library(plyr);library(data.table);library(reshape2);library(Hmisc);library(mgcv);library(gdata);library(car);library(dplyr);library(ggmap);library(broom);library(splines)
#sourcing
source("/media/NAS/Uni/org/files/Uni/Projects/code/$Rsnips/CV_splits.r")
source("/media/NAS/Uni/org/files/Uni/Projects/code/$Rsnips/rmspe.r")
#if needed load res table
res<- readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/resALL.AQ.rds")
### import data
m1.2004 <-readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod1.AQ.2004.rds")
################# clean BAD STN PM10 and check if improved model?
raWDaf <- ddply(m1.2004, c( "stn"),
function(x) {
mod1 <- lm(PM10 ~ aod, data=x)
data.frame(R2 = round(summary(mod1)$r.squared, 5),
nsamps = length(summary(mod1)$resid))
})
raWDaf
raWDaf<-as.data.table(raWDaf)
bad<- raWDaf[R2< 0.01]
bad[,badid := paste(stn,sep="-")]
#################BAD STN
m1.2004[,badid := paste(stn,sep="-")]
####Take out bad stations
m1.2004 <- m1.2004[!(m1.2004$badid %in% bad$badid), ]
#scale vars
m1.2004[,elev.s:= scale(elev)]
m1.2004[,tden.s:= scale(tden)]
m1.2004[,pden.s:= scale(pden)]
m1.2004[,dist2A1.s:= scale(dist2A1)]
m1.2004[,dist2water.s:= scale(dist2water)]
m1.2004[,dist2rail.s:= scale(dist2rail)]
m1.2004[,Dist2road.s:= scale(Dist2road)]
m1.2004[,ndvi.s:= scale(ndvi)]
m1.2004[,MeanPbl.s:= scale(MeanPbl)]
m1.2004[,p_ind.s:= scale(p_ind)]
m1.2004[,p_for.s:= scale(p_for)]
m1.2004[,p_farm.s:= scale(p_farm)]
m1.2004[,p_dos.s:= scale(p_dos)]
m1.2004[,p_dev.s:= scale(p_dev)]
m1.2004[,p_os.s:= scale(p_os)]
m1.2004[,tempa.s:= scale(tempa)]
m1.2004[,WDa.s:= scale(WDa)]
m1.2004[,WSa.s:= scale(WSa)]
m1.2004[,RHa.s:= scale(RHa)]
m1.2004[,Raina.s:= scale(Raina)]
m1.2004[,NO2a.s:= scale(NO2a)]
m1.formula <- as.formula(PM10~ aod
+tempa.s+WDa.s+WSa.s+Dust+MeanPbl.s #temporal
+elev.s+tden.s+pden.s+Dist2road.s+ndvi.s #spatial
+p_os.s #+p_dev.s+p_dos.s+p_farm.s+p_for.s+p_ind.s #land use
#+aod*Dust #interactions
+(1+aod|day/reg_num)) #+(1|stn) !!! stn screws up mod3
#full fit
m1.fit.2004 <- lmer(m1.formula,data=m1.2004,weights=normwt)
m1.2004$pred.m1 <- predict(m1.fit.2004)
res[res$year=="2004", 'm1.R2'] <- print(summary(lm(PM10~pred.m1,data=m1.2004))$r.squared)
#RMSPE
res[res$year=="2004", 'm1.PE'] <- print(rmse(residuals(m1.fit.2004)))
#spatial
###to check
spatial2004<-m1.2004 %>%
group_by(stn) %>%
summarise(barpm = mean(PM10, na.rm=TRUE), barpred = mean(pred.m1, na.rm=TRUE))
m1.fit.2004.spat<- lm(barpm ~ barpred, data=spatial2004)
res[res$year=="2004", 'm1.R2.s'] <- print(summary(lm(barpm ~ barpred, data=spatial2004))$r.squared)
res[res$year=="2004", 'm1.PE.s'] <- print(rmse(residuals(m1.fit.2004.spat)))
#temporal
tempo2004<-left_join(m1.2004,spatial2004)
tempo2004$delpm <-tempo2004$PM10-tempo2004$barpm
tempo2004$delpred <-tempo2004$pred.m1-tempo2004$barpred
mod_temporal <- lm(delpm ~ delpred, data=tempo2004)
res[res$year=="2004", 'm1.R2.t'] <- print(summary(lm(delpm ~ delpred, data=tempo2004))$r.squared)
saveRDS(m1.2004,"/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod1.AQ.2004.pred.rds")
#---------------->>>> CV
#s1
splits_s1 <- splitdf(m1.2004)
test_s1 <- splits_s1$testset
train_s1 <- splits_s1$trainset
out_train_s1 <- lmer(m1.formula,data = train_s1,weights=normwt)
test_s1$pred.m1.cv <- predict(object=out_train_s1 ,newdata=test_s1,allow.new.levels=TRUE,re.form=NULL )
test_s1$iter<-"s1"
#s2
splits_s2 <- splitdf(m1.2004)
test_s2 <- splits_s2$testset
train_s2 <- splits_s2$trainset
out_train_s2 <- lmer(m1.formula,data = train_s2,weights=normwt)
test_s2$pred.m1.cv <- predict(object=out_train_s2 ,newdata=test_s2,allow.new.levels=TRUE,re.form=NULL )
test_s2$iter<-"s2"
#s3
splits_s3 <- splitdf(m1.2004)
test_s3 <- splits_s3$testset
train_s3 <- splits_s3$trainset
out_train_s3 <- lmer(m1.formula,data = train_s3,weights=normwt)
test_s3$pred.m1.cv <- predict(object=out_train_s3 ,newdata=test_s3,allow.new.levels=TRUE,re.form=NULL )
test_s3$iter<-"s3"
#s4
splits_s4 <- splitdf(m1.2004)
test_s4 <- splits_s4$testset
train_s4 <- splits_s4$trainset
out_train_s4 <- lmer(m1.formula,data = train_s4,weights=normwt)
test_s4$pred.m1.cv <- predict(object=out_train_s4 ,newdata=test_s4,allow.new.levels=TRUE,re.form=NULL )
test_s4$iter<-"s4"
#s5
splits_s5 <- splitdf(m1.2004)
test_s5 <- splits_s5$testset
train_s5 <- splits_s5$trainset
out_train_s5 <- lmer(m1.formula,data = train_s5,weights=normwt)
test_s5$pred.m1.cv <- predict(object=out_train_s5 ,newdata=test_s5,allow.new.levels=TRUE,re.form=NULL )
test_s5$iter<-"s5"
#s6
splits_s6 <- splitdf(m1.2004)
test_s6 <- splits_s6$testset
train_s6 <- splits_s6$trainset
out_train_s6 <- lmer(m1.formula,data = train_s6,weights=normwt)
test_s6$pred.m1.cv <- predict(object=out_train_s6 ,newdata=test_s6,allow.new.levels=TRUE,re.form=NULL )
test_s6$iter<-"s6"
#s7
splits_s7 <- splitdf(m1.2004)
test_s7 <- splits_s7$testset
train_s7 <- splits_s7$trainset
out_train_s7 <- lmer(m1.formula,data = train_s7,weights=normwt)
test_s7$pred.m1.cv <- predict(object=out_train_s7 ,newdata=test_s7,allow.new.levels=TRUE,re.form=NULL )
test_s7$iter<-"s7"
#s8
splits_s8 <- splitdf(m1.2004)
test_s8 <- splits_s8$testset
train_s8 <- splits_s8$trainset
out_train_s8 <- lmer(m1.formula,data = train_s8,weights=normwt)
test_s8$pred.m1.cv <- predict(object=out_train_s8 ,newdata=test_s8,allow.new.levels=TRUE,re.form=NULL )
test_s8$iter<-"s8"
#s9
splits_s9 <- splitdf(m1.2004)
test_s9 <- splits_s9$testset
train_s9 <- splits_s9$trainset
out_train_s9 <- lmer(m1.formula,data = train_s9,weights=normwt)
test_s9$pred.m1.cv <- predict(object=out_train_s9 ,newdata=test_s9,allow.new.levels=TRUE,re.form=NULL )
test_s9$iter<-"s9"
#s10
splits_s10 <- splitdf(m1.2004)
test_s10 <- splits_s10$testset
train_s10 <- splits_s10$trainset
out_train_s10 <- lmer(m1.formula,data = train_s10,weights=normwt)
test_s10$pred.m1.cv <- predict(object=out_train_s10 ,newdata=test_s10,allow.new.levels=TRUE,re.form=NULL )
test_s10$iter<-"s10"
#BIND 1 dataset
m1.2004.cv<- data.table(rbind(test_s1,test_s2,test_s3,test_s4,test_s5,test_s6,test_s7,test_s8,test_s9, test_s10))
# cleanup (remove from WS) objects from CV
rm(list = ls(pattern = "train_|test_"))
#table updates
m1.fit.2004.cv<-lm(PM10~pred.m1.cv,data=m1.2004.cv)
res[res$year=="2004", 'm1cv.R2'] <- print(summary(lm(PM10~pred.m1.cv,data=m1.2004.cv))$r.squared)
res[res$year=="2004", 'm1cv.I'] <-print(summary(lm(PM10~pred.m1.cv,data=m1.2004.cv))$coef[1,1])
res[res$year=="2004", 'm1cv.I.se'] <-print(summary(lm(PM10~pred.m1.cv,data=m1.2004.cv))$coef[1,2])
res[res$year=="2004", 'm1cv.S'] <-print(summary(lm(PM10~pred.m1.cv,data=m1.2004.cv))$coef[2,1])
res[res$year=="2004", 'm1cv.S.se'] <-print(summary(lm(PM10~pred.m1.cv,data=m1.2004.cv))$coef[2,2])
#RMSPE
res[res$year=="2004", 'm1cv.PE'] <- print(rmse(residuals(m1.fit.2004.cv)))
#spatial
spatial2004.cv<-m1.2004.cv %>%
group_by(stn) %>%
summarise(barpm = mean(PM10, na.rm=TRUE), barpred = mean(pred.m1, na.rm=TRUE))
m1.fit.2004.cv.s <- lm(barpm ~ barpred, data=spatial2004.cv)
res[res$year=="2004", 'm1cv.R2.s'] <- print(summary(lm(barpm ~ barpred, data=spatial2004.cv))$r.squared)
res[res$year=="2004", 'm1cv.PE.s'] <- print(rmse(residuals(m1.fit.2004.cv.s)))
#temporal
tempo2004.cv<-left_join(m1.2004.cv,spatial2004.cv)
tempo2004.cv$delpm <-tempo2004.cv$PM10-tempo2004.cv$barpm
tempo2004.cv$delpred <-tempo2004.cv$pred.m1.cv-tempo2004.cv$barpred
mod_temporal.cv <- lm(delpm ~ delpred, data=tempo2004.cv)
res[res$year=="2004", 'm1cv.R2.t'] <- print(summary(lm(delpm ~ delpred, data=tempo2004.cv))$r.squared)
#-------->>> loc stage
luf<-fread("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN004_LU_full_dataset/local.csv")
setnames(luf,"tden","loc.tden")
setnames(luf,"elev50","loc.elev")
#add 50m LU to CV data
setkey(m1.2004.cv,stn)
setkey(luf,stn)
m1.2004.cv.loc <- merge(m1.2004.cv, luf, all.x = T)
#m1.2004.cv.loc<-na.omit(m1.2004.cv.loc)
#create residual mp3 variable
m1.2004.cv.loc$res.m1<-m1.2004.cv.loc$PM10-m1.2004.cv.loc$pred.m1.cv
#The GAM model
gam.out<-gam(res.m1~s(loc.tden)+s(tden,MeanPbl)+s(loc.tden,WSa)+s(loc_p_os,fx=FALSE,k=4,bs='cr')+s(loc.elev,fx=FALSE,k=4,bs='cr')+s(dA1,fx=FALSE,k=4,bs='cr')+s(dsea,fx=FALSE,k=4,bs='cr'),data=m1.2004.cv.loc)
#plot(bp.model.ps)
#summary(bp.model.ps)
## reg
m1.2004.cv.loc$pred.m1.loc <-predict(gam.out)
m1.2004.cv.loc$pred.m1.both <- m1.2004.cv.loc$pred.m1.cv + m1.2004.cv.loc$pred.m1.loc
res[res$year=="2004", 'm1cv.loc.R2'] <- print(summary(lm(PM10~pred.m1.both,data=m1.2004.cv.loc))$r.squared)
res[res$year=="2004", 'm1cv.loc.I'] <-print(summary(lm(PM10~pred.m1.both,data=m1.2004.cv.loc))$coef[1,1])
res[res$year=="2004", 'm1cv.loc.I.se'] <-print(summary(lm(PM10~pred.m1.both,data=m1.2004.cv.loc))$coef[1,2])
res[res$year=="2004", 'm1cv.loc.S'] <-print(summary(lm(PM10~pred.m1.both,data=m1.2004.cv.loc))$coef[2,1])
res[res$year=="2004", 'm1cv.loc.S.se'] <-print(summary(lm(PM10~pred.m1.both,data=m1.2004.cv.loc))$coef[2,2])
#RMSPE
res[res$year=="2004", 'm1cv.loc.PE'] <- print(rmse(residuals(m1.fit.2004.cv)))
#spatial
spatial2004.cv.loc<-m1.2004.cv.loc %>%
group_by(stn) %>%
summarise(barpm = mean(PM10, na.rm=TRUE), barpred = mean(pred.m1, na.rm=TRUE))
m1.fit.2004.cv.loc.s <- lm(barpm ~ barpred, data=spatial2004.cv.loc)
res[res$year=="2004", 'm1cv.loc.R2.s'] <- print(summary(lm(barpm ~ barpred, data=spatial2004.cv.loc))$r.squared)
res[res$year=="2004", 'm1cv.loc.PE.s'] <- print(rmse(residuals(m1.fit.2004.cv.loc.s)))
#temporal
tempo2004.loc.cv<-left_join(m1.2004.cv.loc,spatial2004.cv.loc)
tempo2004.loc.cv$delpm <-tempo2004.loc.cv$PM10-tempo2004.loc.cv$barpm
tempo2004.loc.cv$delpred <-tempo2004.loc.cv$pred.m1.both-tempo2004.loc.cv$barpred
mod_temporal.loc.cv <- lm(delpm ~ delpred, data=tempo2004.loc.cv)
res[res$year=="2004", 'm1cv.loc.R2.t'] <- print(summary(lm(delpm ~ delpred, data=tempo2004.loc.cv))$r.squared)
#############save midpoint
saveRDS(res, "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/res.AQ.2004.rds")
saveRDS(res, "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/resALL.AQ.rds")
saveRDS(m1.2004.cv.loc,"/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod1.AQ.2004.predCV.rds")
###############
#MOD2
###############
m2.2004<-readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod2.AQ.2004.rds")
m2.2004[,elev.s:= scale(elev)]
m2.2004[,tden.s:= scale(tden)]
m2.2004[,pden.s:= scale(pden)]
m2.2004[,dist2A1.s:= scale(dist2A1)]
m2.2004[,dist2water.s:= scale(dist2water)]
m2.2004[,dist2rail.s:= scale(dist2rail)]
m2.2004[,Dist2road.s:= scale(Dist2road)]
m2.2004[,ndvi.s:= scale(ndvi)]
m2.2004[,MeanPbl.s:= scale(MeanPbl)]
m2.2004[,p_ind.s:= scale(p_ind)]
m2.2004[,p_for.s:= scale(p_for)]
m2.2004[,p_farm.s:= scale(p_farm)]
m2.2004[,p_dos.s:= scale(p_dos)]
m2.2004[,p_dev.s:= scale(p_dev)]
m2.2004[,p_os.s:= scale(p_os)]
m2.2004[,tempa.s:= scale(tempa)]
m2.2004[,WDa.s:= scale(WDa)]
m2.2004[,WSa.s:= scale(WSa)]
m2.2004[,RHa.s:= scale(RHa)]
m2.2004[,Raina.s:= scale(Raina)]
m2.2004[,NO2a.s:= scale(NO2a)]
#generate predictions
m2.2004[, pred.m2 := predict(object=m1.fit.2004,newdata=m2.2004,allow.new.levels=TRUE,re.form=NULL)]
describe(m2.2004$pred.m2)
#delete implossible valuesOA[24~
m2.2004 <- m2.2004[pred.m2 > 0.00000000000001 , ]
m2.2004 <- m2.2004[pred.m2 < 1500 , ]
saveRDS(m2.2004,"/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod2.AQ.2004.pred2.rds")
#-------------->prepare for mod3
m2.2004[, bimon := (m + 1) %/% 2]
setkey(m2.2004,day, aodid)
m2.2004<-m2.2004[!is.na(meanPM10)]
#run the lmer part regressing stage 2 pred Vs mean pm
#in israel check per month, also check 30km band and other methods for meanpm
m2.smooth = lme(pred.m2 ~ meanPM10,random = list(aodid= ~1 + meanPM10),control=lmeControl(opt = "optim"), data= m2.2004 )
#xm2.smooth = lmer(pred.m2 ~ meanPM10+(1+ meanPM10|aodid), data= m2.2004 )
#correlate to see everything from mod2 and the mpm works
m2.2004[, pred.t31 := predict(m2.smooth)]
m2.2004[, resid := residuals(m2.smooth)]
res[res$year=="2004", 'm3.t31'] <- print(summary(lm(pred.m2~pred.t31,data=m2.2004))$r.squared)
#split the files to the separate bi monthly datsets
T2004_bimon1 <- subset(m2.2004 ,m2.2004$bimon == "1")
T2004_bimon2 <- subset(m2.2004 ,m2.2004$bimon == "2")
T2004_bimon3 <- subset(m2.2004 ,m2.2004$bimon == "3")
T2004_bimon4 <- subset(m2.2004 ,m2.2004$bimon == "4")
T2004_bimon5 <- subset(m2.2004 ,m2.2004$bimon == "5")
T2004_bimon6 <- subset(m2.2004 ,m2.2004$bimon == "6")
#run the separate splines (smooth) for x and y for each bimon
#whats the default band (distance) that the spline goes out and uses
fit2_1 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2004_bimon1 )
fit2_2 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2004_bimon2 )
fit2_3 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2004_bimon3 )
fit2_4 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2004_bimon4 )
fit2_5 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2004_bimon5 )
fit2_6 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2004_bimon6 )
#get the predicted-fitted
Xpred_1 <- (T2004_bimon1$pred.t31 - fit2_1$fitted)
Xpred_2 <- (T2004_bimon2$pred.t31 - fit2_2$fitted)
Xpred_3 <- (T2004_bimon3$pred.t31 - fit2_3$fitted)
Xpred_4 <- (T2004_bimon4$pred.t31 - fit2_4$fitted)
Xpred_5 <- (T2004_bimon5$pred.t31 - fit2_5$fitted)
Xpred_6 <- (T2004_bimon6$pred.t31 - fit2_6$fitted)
#remerge to 1 file
m2.2004$pred.t32 <- c( Xpred_1,Xpred_2, Xpred_3, Xpred_4, Xpred_5, Xpred_6)
#this is important so that its sorted as in the first gamm
setkey(m2.2004,day, aodid)
#rerun the lme on the predictions including the spatial spline (smooth)
Final_pred_2004 <- lme(pred.t32 ~ meanPM10 ,random = list(aodid= ~1 + meanPM10 ),control=lmeControl(opt = "optim"),data= m2.2004 )
m2.2004[, pred.t33 := predict(Final_pred_2004)]
#check correlations
res[res$year=="2004", 'm3.t33'] <- print(summary(lm(pred.m2 ~ pred.t33,data=m2.2004))$r.squared)
#------------------------>>>
#import mod3
data.m3 <- readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod3.AQ.2004.rds")
#for PM10
data.m3 <- data.m3[,c(1,2,5,29:32,52,53),with=FALSE]
data.m3[, bimon := (m + 1) %/% 2]
setkey(data.m3,day, aodid)
data.m3<-data.m3[!is.na(meanPM10)]
#generate m.3 initial pred
data.m3$pred.m3.mix <- predict(Final_pred_2004,data.m3)
#create unique grid
ugrid <-data.m3 %>%
group_by(aodid) %>%
summarise(lat_aod = mean(lat_aod, na.rm=TRUE), long_aod = mean(long_aod, na.rm=TRUE),x_aod_ITM = mean(x_aod_ITM, na.rm=TRUE), y_aod_ITM = mean(y_aod_ITM, na.rm=TRUE))
#### PREDICT Gam part
#split back into bimons to include the gam prediction in final prediction
data.m3_bimon1 <- data.m3[bimon == 1, ]
data.m3_bimon2 <- data.m3[bimon == 2, ]
data.m3_bimon3 <- data.m3[bimon == 3, ]
data.m3_bimon4 <- data.m3[bimon == 4, ]
data.m3_bimon5 <- data.m3[bimon == 5, ]
data.m3_bimon6 <- data.m3[bimon == 6, ]
#addin unique grid to each bimon
uniq_gid_bimon1 <- ugrid
uniq_gid_bimon2 <- ugrid
uniq_gid_bimon3 <- ugrid
uniq_gid_bimon4 <- ugrid
uniq_gid_bimon5 <- ugrid
uniq_gid_bimon6 <- ugrid
#get predictions for Bimon residuals
uniq_gid_bimon1$gpred <- predict.gam(fit2_1,uniq_gid_bimon1)
uniq_gid_bimon2$gpred <- predict.gam(fit2_2,uniq_gid_bimon2)
uniq_gid_bimon3$gpred <- predict.gam(fit2_3,uniq_gid_bimon3)
uniq_gid_bimon4$gpred <- predict.gam(fit2_4,uniq_gid_bimon4)
uniq_gid_bimon5$gpred <- predict.gam(fit2_5,uniq_gid_bimon5)
uniq_gid_bimon6$gpred <- predict.gam(fit2_6,uniq_gid_bimon6)
#merge things back togheter
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> merges
setkey(uniq_gid_bimon1,aodid)
setkey(data.m3_bimon1,aodid)
data.m3_bimon1 <- merge(data.m3_bimon1, uniq_gid_bimon1[,list(aodid,gpred)], all.x = T)
setkey(uniq_gid_bimon2,aodid)
setkey(data.m3_bimon2,aodid)
data.m3_bimon2 <- merge(data.m3_bimon2, uniq_gid_bimon2[,list(aodid,gpred)], all.x = T)
setkey(uniq_gid_bimon3,aodid)
setkey(data.m3_bimon3,aodid)
data.m3_bimon3 <- merge(data.m3_bimon3, uniq_gid_bimon3[,list(aodid,gpred)], all.x = T)
setkey(uniq_gid_bimon4,aodid)
setkey(data.m3_bimon4,aodid)
data.m3_bimon4 <- merge(data.m3_bimon4, uniq_gid_bimon4[,list(aodid,gpred)], all.x = T)
setkey(uniq_gid_bimon5,aodid)
setkey(data.m3_bimon5,aodid)
data.m3_bimon5 <- merge(data.m3_bimon5, uniq_gid_bimon5[,list(aodid,gpred)], all.x = T)
setkey(uniq_gid_bimon6,aodid)
setkey(data.m3_bimon6,aodid)
data.m3_bimon6 <- merge(data.m3_bimon6, uniq_gid_bimon6[,list(aodid,gpred)], all.x = T)
#reattach all parts
mod3 <- rbind(data.m3_bimon1,data.m3_bimon2,data.m3_bimon3,data.m3_bimon4,data.m3_bimon5,data.m3_bimon6)
# create pred.m3
mod3$pred.m3 <-mod3$pred.m3.mix+mod3$gpred
#hist(mod3$pred.m3)
#describe(mod3$pred.m3)
#recode negative into zero
mod3 <- mod3[pred.m3 >= 0]
saveRDS(mod3,"/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod3.AQ.2004.pred.rds")
#clean
keep(mod3,res,rmse, sure=TRUE)
gc()
#########################
#prepare for m3.R2
#########################
#load mod1
mod1<- readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod1.AQ.2004.pred.rds")
mod1[,aodid:= paste(mod1$long_aod,mod1$lat_aod,sep="-")]
mod1<-mod1[,c("aodid","day","PM10","pred.m1","stn"),with=FALSE]
#R2.m3
setkey(mod3,day,aodid)
setkey(mod1,day,aodid)
m1.2004 <- merge(mod1,mod3[, list(day,aodid,pred.m3)], all.x = T)
m3.fit.2004<- summary(lm(PM10~pred.m3,data=m1.2004))
res[res$year=="2004", 'm3.R2'] <- print(summary(lm(PM10~pred.m3,data=m1.2004))$r.squared)
#RMSPE
res[res$year=="2004", 'm3.PE'] <- print(rmse(residuals(m3.fit.2004)))
#spatial
###to check
spatial2004<-m1.2004 %>%
group_by(stn) %>%
summarise(barpm = mean(PM10, na.rm=TRUE), barpred = mean(pred.m3, na.rm=TRUE))
m1.fit.2004.spat<- lm(barpm ~ barpred, data=spatial2004)
res[res$year=="2004", 'm3.R2.s'] <- print(summary(lm(barpm ~ barpred, data=spatial2004))$r.squared)
res[res$year=="2004", 'm3.PE.s'] <- print(rmse(residuals(m1.fit.2004.spat)))
#temporal
tempo2004<-left_join(m1.2004,spatial2004)
tempo2004$delpm <-tempo2004$PM10-tempo2004$barpm
tempo2004$delpred <-tempo2004$pred.m3-tempo2004$barpred
mod_temporal <- lm(delpm ~ delpred, data=tempo2004)
res[res$year=="2004", 'm3.R2.t'] <- print(summary(lm(delpm ~ delpred, data=tempo2004))$r.squared)
#############save midpoint
saveRDS(res, "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/res.AQ.2004.rds")
saveRDS(res, "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/resALL.AQ.rds")
#########################
#import mod2
mod2<- readRDS( "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod2.AQ.2004.pred2.rds")
mod2<-mod2[,c("aodid","day","pred.m2"),with=FALSE]
#----------------> store the best available
mod3best <- mod3[, list(aodid, x_aod_ITM, y_aod_ITM, day, pred.m3)]
setkey(mod3best, day, aodid)
setkey(mod2, day, aodid)
mod3best <- merge(mod3best, mod2[,list(aodid, day, pred.m2)], all.x = T)
#reload mod1
mod1<- readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod1.AQ.2004.pred.rds")
mod1$aodid<-paste(mod1$long_aod,mod1$lat_aod,sep="-")
mod1<-mod1[,c("aodid","day","PM10","pred.m1"),with=FALSE]
setkey(mod1,day,aodid)
mod3best <- merge(mod3best, mod1, all.x = T)
mod3best[,bestpred := pred.m3]
mod3best[!is.na(pred.m2),bestpred := pred.m2]
mod3best[!is.na(pred.m1),bestpred := pred.m1]
#save
saveRDS(mod3best,"/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod3.AQ.2004.FINAL.rds")
#save for GIS
write.csv(mod3best[, list(LTPM = mean(bestpred, na.rm = T),
predvariance = var(bestpred, na.rm = T),
predmin = min(bestpred, na.rm = T),
predmax = max(bestpred, na.rm = T),
npred = sum(!is.na(bestpred)),
npred.m1 = sum(!is.na(pred.m1)),
npred.m2 = sum(!is.na(pred.m2)),
npred.m3 = sum(!is.na(pred.m3)),
x_aod_ITM = x_aod_ITM[1], y_aod_ITM = y_aod_ITM[1]),by=aodid], "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/LTPM3.AQ.2004.csv", row.names = F)
keep(res, sure=TRUE)
gc()
###############
#LIBS
###############
library(lme4);library(reshape);library(foreign) ;library(ggplot2);library(plyr);library(data.table);library(reshape2);library(Hmisc);library(mgcv);library(gdata);library(car);library(dplyr);library(ggmap);library(broom);library(splines)
#sourcing
source("/media/NAS/Uni/org/files/Uni/Projects/code/$Rsnips/CV_splits.r")
source("/media/NAS/Uni/org/files/Uni/Projects/code/$Rsnips/rmspe.r")
#if needed load res table
res<- readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/resALL.AQ.rds")
### import data
m1.2005 <-readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod1.AQ.2005.rds")
################# clean BAD STN PM10 and check if improved model?
raWDaf <- ddply(m1.2005, c( "stn"),
function(x) {
mod1 <- lm(PM10 ~ aod, data=x)
data.frame(R2 = round(summary(mod1)$r.squared, 5),
nsamps = length(summary(mod1)$resid))
})
raWDaf
raWDaf<-as.data.table(raWDaf)
bad<- raWDaf[R2< 0.01]
bad[,badid := paste(stn,sep="-")]
#################BAD STN
m1.2005[,badid := paste(stn,sep="-")]
####Take out bad stations
m1.2005 <- m1.2005[!(m1.2005$badid %in% bad$badid), ]
#scale vars
m1.2005[,elev.s:= scale(elev)]
m1.2005[,tden.s:= scale(tden)]
m1.2005[,pden.s:= scale(pden)]
m1.2005[,dist2A1.s:= scale(dist2A1)]
m1.2005[,dist2water.s:= scale(dist2water)]
m1.2005[,dist2rail.s:= scale(dist2rail)]
m1.2005[,Dist2road.s:= scale(Dist2road)]
m1.2005[,ndvi.s:= scale(ndvi)]
m1.2005[,MeanPbl.s:= scale(MeanPbl)]
m1.2005[,p_ind.s:= scale(p_ind)]
m1.2005[,p_for.s:= scale(p_for)]
m1.2005[,p_farm.s:= scale(p_farm)]
m1.2005[,p_dos.s:= scale(p_dos)]
m1.2005[,p_dev.s:= scale(p_dev)]
m1.2005[,p_os.s:= scale(p_os)]
m1.2005[,tempa.s:= scale(tempa)]
m1.2005[,WDa.s:= scale(WDa)]
m1.2005[,WSa.s:= scale(WSa)]
m1.2005[,RHa.s:= scale(RHa)]
m1.2005[,Raina.s:= scale(Raina)]
m1.2005[,NO2a.s:= scale(NO2a)]
m1.formula <- as.formula(PM10~ aod
+tempa.s+WDa.s+WSa.s+Dust+MeanPbl.s #temporal
+elev.s+tden.s+pden.s+Dist2road.s+ndvi.s #spatial
+p_os.s #+p_dev.s+p_dos.s+p_farm.s+p_for.s+p_ind.s #land use
#+aod*Dust #interactions
+(1+aod|day/reg_num)) #+(1|stn) !!! stn screws up mod3
#full fit
m1.fit.2005 <- lmer(m1.formula,data=m1.2005,weights=normwt)
m1.2005$pred.m1 <- predict(m1.fit.2005)
res[res$year=="2005", 'm1.R2'] <- print(summary(lm(PM10~pred.m1,data=m1.2005))$r.squared)
#RMSPE
res[res$year=="2005", 'm1.PE'] <- print(rmse(residuals(m1.fit.2005)))
#spatial
###to check
spatial2005<-m1.2005 %>%
group_by(stn) %>%
summarise(barpm = mean(PM10, na.rm=TRUE), barpred = mean(pred.m1, na.rm=TRUE))
m1.fit.2005.spat<- lm(barpm ~ barpred, data=spatial2005)
res[res$year=="2005", 'm1.R2.s'] <- print(summary(lm(barpm ~ barpred, data=spatial2005))$r.squared)
res[res$year=="2005", 'm1.PE.s'] <- print(rmse(residuals(m1.fit.2005.spat)))
#temporal
tempo2005<-left_join(m1.2005,spatial2005)
tempo2005$delpm <-tempo2005$PM10-tempo2005$barpm
tempo2005$delpred <-tempo2005$pred.m1-tempo2005$barpred
mod_temporal <- lm(delpm ~ delpred, data=tempo2005)
res[res$year=="2005", 'm1.R2.t'] <- print(summary(lm(delpm ~ delpred, data=tempo2005))$r.squared)
saveRDS(m1.2005,"/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod1.AQ.2005.pred.rds")
#---------------->>>> CV
#s1
splits_s1 <- splitdf(m1.2005)
test_s1 <- splits_s1$testset
train_s1 <- splits_s1$trainset
out_train_s1 <- lmer(m1.formula,data = train_s1,weights=normwt)
test_s1$pred.m1.cv <- predict(object=out_train_s1 ,newdata=test_s1,allow.new.levels=TRUE,re.form=NULL )
test_s1$iter<-"s1"
#s2
splits_s2 <- splitdf(m1.2005)
test_s2 <- splits_s2$testset
train_s2 <- splits_s2$trainset
out_train_s2 <- lmer(m1.formula,data = train_s2,weights=normwt)
test_s2$pred.m1.cv <- predict(object=out_train_s2 ,newdata=test_s2,allow.new.levels=TRUE,re.form=NULL )
test_s2$iter<-"s2"
#s3
splits_s3 <- splitdf(m1.2005)
test_s3 <- splits_s3$testset
train_s3 <- splits_s3$trainset
out_train_s3 <- lmer(m1.formula,data = train_s3,weights=normwt)
test_s3$pred.m1.cv <- predict(object=out_train_s3 ,newdata=test_s3,allow.new.levels=TRUE,re.form=NULL )
test_s3$iter<-"s3"
#s4
splits_s4 <- splitdf(m1.2005)
test_s4 <- splits_s4$testset
train_s4 <- splits_s4$trainset
out_train_s4 <- lmer(m1.formula,data = train_s4,weights=normwt)
test_s4$pred.m1.cv <- predict(object=out_train_s4 ,newdata=test_s4,allow.new.levels=TRUE,re.form=NULL )
test_s4$iter<-"s4"
#s5
splits_s5 <- splitdf(m1.2005)
test_s5 <- splits_s5$testset
train_s5 <- splits_s5$trainset
out_train_s5 <- lmer(m1.formula,data = train_s5,weights=normwt)
test_s5$pred.m1.cv <- predict(object=out_train_s5 ,newdata=test_s5,allow.new.levels=TRUE,re.form=NULL )
test_s5$iter<-"s5"
#s6
splits_s6 <- splitdf(m1.2005)
test_s6 <- splits_s6$testset
train_s6 <- splits_s6$trainset
out_train_s6 <- lmer(m1.formula,data = train_s6,weights=normwt)
test_s6$pred.m1.cv <- predict(object=out_train_s6 ,newdata=test_s6,allow.new.levels=TRUE,re.form=NULL )
test_s6$iter<-"s6"
#s7
splits_s7 <- splitdf(m1.2005)
test_s7 <- splits_s7$testset
train_s7 <- splits_s7$trainset
out_train_s7 <- lmer(m1.formula,data = train_s7,weights=normwt)
test_s7$pred.m1.cv <- predict(object=out_train_s7 ,newdata=test_s7,allow.new.levels=TRUE,re.form=NULL )
test_s7$iter<-"s7"
#s8
splits_s8 <- splitdf(m1.2005)
test_s8 <- splits_s8$testset
train_s8 <- splits_s8$trainset
out_train_s8 <- lmer(m1.formula,data = train_s8,weights=normwt)
test_s8$pred.m1.cv <- predict(object=out_train_s8 ,newdata=test_s8,allow.new.levels=TRUE,re.form=NULL )
test_s8$iter<-"s8"
#s9
splits_s9 <- splitdf(m1.2005)
test_s9 <- splits_s9$testset
train_s9 <- splits_s9$trainset
out_train_s9 <- lmer(m1.formula,data = train_s9,weights=normwt)
test_s9$pred.m1.cv <- predict(object=out_train_s9 ,newdata=test_s9,allow.new.levels=TRUE,re.form=NULL )
test_s9$iter<-"s9"
#s10
splits_s10 <- splitdf(m1.2005)
test_s10 <- splits_s10$testset
train_s10 <- splits_s10$trainset
out_train_s10 <- lmer(m1.formula,data = train_s10,weights=normwt)
test_s10$pred.m1.cv <- predict(object=out_train_s10 ,newdata=test_s10,allow.new.levels=TRUE,re.form=NULL )
test_s10$iter<-"s10"
#BIND 1 dataset
m1.2005.cv<- data.table(rbind(test_s1,test_s2,test_s3,test_s4,test_s5,test_s6,test_s7,test_s8,test_s9, test_s10))
# cleanup (remove from WS) objects from CV
rm(list = ls(pattern = "train_|test_"))
#table updates
m1.fit.2005.cv<-lm(PM10~pred.m1.cv,data=m1.2005.cv)
res[res$year=="2005", 'm1cv.R2'] <- print(summary(lm(PM10~pred.m1.cv,data=m1.2005.cv))$r.squared)
res[res$year=="2005", 'm1cv.I'] <-print(summary(lm(PM10~pred.m1.cv,data=m1.2005.cv))$coef[1,1])
res[res$year=="2005", 'm1cv.I.se'] <-print(summary(lm(PM10~pred.m1.cv,data=m1.2005.cv))$coef[1,2])
res[res$year=="2005", 'm1cv.S'] <-print(summary(lm(PM10~pred.m1.cv,data=m1.2005.cv))$coef[2,1])
res[res$year=="2005", 'm1cv.S.se'] <-print(summary(lm(PM10~pred.m1.cv,data=m1.2005.cv))$coef[2,2])
#RMSPE
res[res$year=="2005", 'm1cv.PE'] <- print(rmse(residuals(m1.fit.2005.cv)))
#spatial
spatial2005.cv<-m1.2005.cv %>%
group_by(stn) %>%
summarise(barpm = mean(PM10, na.rm=TRUE), barpred = mean(pred.m1, na.rm=TRUE))
m1.fit.2005.cv.s <- lm(barpm ~ barpred, data=spatial2005.cv)
res[res$year=="2005", 'm1cv.R2.s'] <- print(summary(lm(barpm ~ barpred, data=spatial2005.cv))$r.squared)
res[res$year=="2005", 'm1cv.PE.s'] <- print(rmse(residuals(m1.fit.2005.cv.s)))
#temporal
tempo2005.cv<-left_join(m1.2005.cv,spatial2005.cv)
tempo2005.cv$delpm <-tempo2005.cv$PM10-tempo2005.cv$barpm
tempo2005.cv$delpred <-tempo2005.cv$pred.m1.cv-tempo2005.cv$barpred
mod_temporal.cv <- lm(delpm ~ delpred, data=tempo2005.cv)
res[res$year=="2005", 'm1cv.R2.t'] <- print(summary(lm(delpm ~ delpred, data=tempo2005.cv))$r.squared)
#-------->>> loc stage
luf<-fread("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN004_LU_full_dataset/local.csv")
setnames(luf,"tden","loc.tden")
setnames(luf,"elev50","loc.elev")
#add 50m LU to CV data
setkey(m1.2005.cv,stn)
setkey(luf,stn)
m1.2005.cv.loc <- merge(m1.2005.cv, luf, all.x = T)
#m1.2005.cv.loc<-na.omit(m1.2005.cv.loc)
#create residual mp3 variable
m1.2005.cv.loc$res.m1<-m1.2005.cv.loc$PM10-m1.2005.cv.loc$pred.m1.cv
#The GAM model
gam.out<-gam(res.m1~s(loc.tden)+s(tden,MeanPbl)+s(loc.tden,WSa)+s(loc_p_os,fx=FALSE,k=4,bs='cr')+s(loc.elev,fx=FALSE,k=4,bs='cr')+s(dA1,fx=FALSE,k=4,bs='cr')+s(dsea,fx=FALSE,k=4,bs='cr'),data=m1.2005.cv.loc)
#plot(bp.model.ps)
#summary(bp.model.ps)
## reg
m1.2005.cv.loc$pred.m1.loc <-predict(gam.out)
m1.2005.cv.loc$pred.m1.both <- m1.2005.cv.loc$pred.m1.cv + m1.2005.cv.loc$pred.m1.loc
res[res$year=="2005", 'm1cv.loc.R2'] <- print(summary(lm(PM10~pred.m1.both,data=m1.2005.cv.loc))$r.squared)
res[res$year=="2005", 'm1cv.loc.I'] <-print(summary(lm(PM10~pred.m1.both,data=m1.2005.cv.loc))$coef[1,1])
res[res$year=="2005", 'm1cv.loc.I.se'] <-print(summary(lm(PM10~pred.m1.both,data=m1.2005.cv.loc))$coef[1,2])
res[res$year=="2005", 'm1cv.loc.S'] <-print(summary(lm(PM10~pred.m1.both,data=m1.2005.cv.loc))$coef[2,1])
res[res$year=="2005", 'm1cv.loc.S.se'] <-print(summary(lm(PM10~pred.m1.both,data=m1.2005.cv.loc))$coef[2,2])
#RMSPE
res[res$year=="2005", 'm1cv.loc.PE'] <- print(rmse(residuals(m1.fit.2005.cv)))
#spatial
spatial2005.cv.loc<-m1.2005.cv.loc %>%
group_by(stn) %>%
summarise(barpm = mean(PM10, na.rm=TRUE), barpred = mean(pred.m1, na.rm=TRUE))
m1.fit.2005.cv.loc.s <- lm(barpm ~ barpred, data=spatial2005.cv.loc)
res[res$year=="2005", 'm1cv.loc.R2.s'] <- print(summary(lm(barpm ~ barpred, data=spatial2005.cv.loc))$r.squared)
res[res$year=="2005", 'm1cv.loc.PE.s'] <- print(rmse(residuals(m1.fit.2005.cv.loc.s)))
#temporal
tempo2005.loc.cv<-left_join(m1.2005.cv.loc,spatial2005.cv.loc)
tempo2005.loc.cv$delpm <-tempo2005.loc.cv$PM10-tempo2005.loc.cv$barpm
tempo2005.loc.cv$delpred <-tempo2005.loc.cv$pred.m1.both-tempo2005.loc.cv$barpred
mod_temporal.loc.cv <- lm(delpm ~ delpred, data=tempo2005.loc.cv)
res[res$year=="2005", 'm1cv.loc.R2.t'] <- print(summary(lm(delpm ~ delpred, data=tempo2005.loc.cv))$r.squared)
#############save midpoint
saveRDS(res, "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/res.AQ.2005.rds")
saveRDS(res, "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/resALL.AQ.rds")
saveRDS(m1.2005.cv.loc,"/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod1.AQ.2005.predCV.rds")
###############
#MOD2
###############
m2.2005<-readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod2.AQ.2005.rds")
m2.2005[,elev.s:= scale(elev)]
m2.2005[,tden.s:= scale(tden)]
m2.2005[,pden.s:= scale(pden)]
m2.2005[,dist2A1.s:= scale(dist2A1)]
m2.2005[,dist2water.s:= scale(dist2water)]
m2.2005[,dist2rail.s:= scale(dist2rail)]
m2.2005[,Dist2road.s:= scale(Dist2road)]
m2.2005[,ndvi.s:= scale(ndvi)]
m2.2005[,MeanPbl.s:= scale(MeanPbl)]
m2.2005[,p_ind.s:= scale(p_ind)]
m2.2005[,p_for.s:= scale(p_for)]
m2.2005[,p_farm.s:= scale(p_farm)]
m2.2005[,p_dos.s:= scale(p_dos)]
m2.2005[,p_dev.s:= scale(p_dev)]
m2.2005[,p_os.s:= scale(p_os)]
m2.2005[,tempa.s:= scale(tempa)]
m2.2005[,WDa.s:= scale(WDa)]
m2.2005[,WSa.s:= scale(WSa)]
m2.2005[,RHa.s:= scale(RHa)]
m2.2005[,Raina.s:= scale(Raina)]
m2.2005[,NO2a.s:= scale(NO2a)]
#generate predictions
m2.2005[, pred.m2 := predict(object=m1.fit.2005,newdata=m2.2005,allow.new.levels=TRUE,re.form=NULL)]
describe(m2.2005$pred.m2)
#delete implossible valuesOA[24~
m2.2005 <- m2.2005[pred.m2 > 0.00000000000001 , ]
m2.2005 <- m2.2005[pred.m2 < 1500 , ]
saveRDS(m2.2005,"/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod2.AQ.2005.pred2.rds")
#-------------->prepare for mod3
m2.2005[, bimon := (m + 1) %/% 2]
setkey(m2.2005,day, aodid)
m2.2005<-m2.2005[!is.na(meanPM10)]
#run the lmer part regressing stage 2 pred Vs mean pm
#in israel check per month, also check 30km band and other methods for meanpm
m2.smooth = lme(pred.m2 ~ meanPM10,random = list(aodid= ~1 + meanPM10),control=lmeControl(opt = "optim"), data= m2.2005 )
#xm2.smooth = lmer(pred.m2 ~ meanPM10+(1+ meanPM10|aodid), data= m2.2005 )
#correlate to see everything from mod2 and the mpm works
m2.2005[, pred.t31 := predict(m2.smooth)]
m2.2005[, resid := residuals(m2.smooth)]
res[res$year=="2005", 'm3.t31'] <- print(summary(lm(pred.m2~pred.t31,data=m2.2005))$r.squared)
#split the files to the separate bi monthly datsets
T2005_bimon1 <- subset(m2.2005 ,m2.2005$bimon == "1")
T2005_bimon2 <- subset(m2.2005 ,m2.2005$bimon == "2")
T2005_bimon3 <- subset(m2.2005 ,m2.2005$bimon == "3")
T2005_bimon4 <- subset(m2.2005 ,m2.2005$bimon == "4")
T2005_bimon5 <- subset(m2.2005 ,m2.2005$bimon == "5")
T2005_bimon6 <- subset(m2.2005 ,m2.2005$bimon == "6")
#run the separate splines (smooth) for x and y for each bimon
#whats the default band (distance) that the spline goes out and uses
fit2_1 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2005_bimon1 )
fit2_2 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2005_bimon2 )
fit2_3 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2005_bimon3 )
fit2_4 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2005_bimon4 )
fit2_5 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2005_bimon5 )
fit2_6 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2005_bimon6 )
#get the predicted-fitted
Xpred_1 <- (T2005_bimon1$pred.t31 - fit2_1$fitted)
Xpred_2 <- (T2005_bimon2$pred.t31 - fit2_2$fitted)
Xpred_3 <- (T2005_bimon3$pred.t31 - fit2_3$fitted)
Xpred_4 <- (T2005_bimon4$pred.t31 - fit2_4$fitted)
Xpred_5 <- (T2005_bimon5$pred.t31 - fit2_5$fitted)
Xpred_6 <- (T2005_bimon6$pred.t31 - fit2_6$fitted)
#remerge to 1 file
m2.2005$pred.t32 <- c( Xpred_1,Xpred_2, Xpred_3, Xpred_4, Xpred_5, Xpred_6)
#this is important so that its sorted as in the first gamm
setkey(m2.2005,day, aodid)
#rerun the lme on the predictions including the spatial spline (smooth)
Final_pred_2005 <- lme(pred.t32 ~ meanPM10 ,random = list(aodid= ~1 + meanPM10 ),control=lmeControl(opt = "optim"),data= m2.2005 )
m2.2005[, pred.t33 := predict(Final_pred_2005)]
#check correlations
res[res$year=="2005", 'm3.t33'] <- print(summary(lm(pred.m2 ~ pred.t33,data=m2.2005))$r.squared)
#------------------------>>>
#import mod3
data.m3 <- readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod3.AQ.2005.rds")
#for PM10
data.m3 <- data.m3[,c(1,2,5,29:32,52,53),with=FALSE]
data.m3[, bimon := (m + 1) %/% 2]
setkey(data.m3,day, aodid)
data.m3<-data.m3[!is.na(meanPM10)]
#generate m.3 initial pred
data.m3$pred.m3.mix <- predict(Final_pred_2005,data.m3)
#create unique grid
ugrid <-data.m3 %>%
group_by(aodid) %>%
summarise(lat_aod = mean(lat_aod, na.rm=TRUE), long_aod = mean(long_aod, na.rm=TRUE),x_aod_ITM = mean(x_aod_ITM, na.rm=TRUE), y_aod_ITM = mean(y_aod_ITM, na.rm=TRUE))
#### PREDICT Gam part
#split back into bimons to include the gam prediction in final prediction
data.m3_bimon1 <- data.m3[bimon == 1, ]
data.m3_bimon2 <- data.m3[bimon == 2, ]
data.m3_bimon3 <- data.m3[bimon == 3, ]
data.m3_bimon4 <- data.m3[bimon == 4, ]
data.m3_bimon5 <- data.m3[bimon == 5, ]
data.m3_bimon6 <- data.m3[bimon == 6, ]
#addin unique grid to each bimon
uniq_gid_bimon1 <- ugrid
uniq_gid_bimon2 <- ugrid
uniq_gid_bimon3 <- ugrid
uniq_gid_bimon4 <- ugrid
uniq_gid_bimon5 <- ugrid
uniq_gid_bimon6 <- ugrid
#get predictions for Bimon residuals
uniq_gid_bimon1$gpred <- predict.gam(fit2_1,uniq_gid_bimon1)
uniq_gid_bimon2$gpred <- predict.gam(fit2_2,uniq_gid_bimon2)
uniq_gid_bimon3$gpred <- predict.gam(fit2_3,uniq_gid_bimon3)
uniq_gid_bimon4$gpred <- predict.gam(fit2_4,uniq_gid_bimon4)
uniq_gid_bimon5$gpred <- predict.gam(fit2_5,uniq_gid_bimon5)
uniq_gid_bimon6$gpred <- predict.gam(fit2_6,uniq_gid_bimon6)
#merge things back togheter
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> merges
setkey(uniq_gid_bimon1,aodid)
setkey(data.m3_bimon1,aodid)
data.m3_bimon1 <- merge(data.m3_bimon1, uniq_gid_bimon1[,list(aodid,gpred)], all.x = T)
setkey(uniq_gid_bimon2,aodid)
setkey(data.m3_bimon2,aodid)
data.m3_bimon2 <- merge(data.m3_bimon2, uniq_gid_bimon2[,list(aodid,gpred)], all.x = T)
setkey(uniq_gid_bimon3,aodid)
setkey(data.m3_bimon3,aodid)
data.m3_bimon3 <- merge(data.m3_bimon3, uniq_gid_bimon3[,list(aodid,gpred)], all.x = T)
setkey(uniq_gid_bimon4,aodid)
setkey(data.m3_bimon4,aodid)
data.m3_bimon4 <- merge(data.m3_bimon4, uniq_gid_bimon4[,list(aodid,gpred)], all.x = T)
setkey(uniq_gid_bimon5,aodid)
setkey(data.m3_bimon5,aodid)
data.m3_bimon5 <- merge(data.m3_bimon5, uniq_gid_bimon5[,list(aodid,gpred)], all.x = T)
setkey(uniq_gid_bimon6,aodid)
setkey(data.m3_bimon6,aodid)
data.m3_bimon6 <- merge(data.m3_bimon6, uniq_gid_bimon6[,list(aodid,gpred)], all.x = T)
#reattach all parts
mod3 <- rbind(data.m3_bimon1,data.m3_bimon2,data.m3_bimon3,data.m3_bimon4,data.m3_bimon5,data.m3_bimon6)
# create pred.m3
mod3$pred.m3 <-mod3$pred.m3.mix+mod3$gpred
#hist(mod3$pred.m3)
#describe(mod3$pred.m3)
#recode negative into zero
mod3 <- mod3[pred.m3 >= 0]
saveRDS(mod3,"/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod3.AQ.2005.pred.rds")
#clean
keep(mod3,res,rmse, sure=TRUE)
gc()
#########################
#prepare for m3.R2
#########################
#load mod1
mod1<- readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod1.AQ.2005.pred.rds")
mod1[,aodid:= paste(mod1$long_aod,mod1$lat_aod,sep="-")]
mod1<-mod1[,c("aodid","day","PM10","pred.m1","stn"),with=FALSE]
#R2.m3
setkey(mod3,day,aodid)
setkey(mod1,day,aodid)
m1.2005 <- merge(mod1,mod3[, list(day,aodid,pred.m3)], all.x = T)
m3.fit.2005<- summary(lm(PM10~pred.m3,data=m1.2005))
res[res$year=="2005", 'm3.R2'] <- print(summary(lm(PM10~pred.m3,data=m1.2005))$r.squared)
#RMSPE
res[res$year=="2005", 'm3.PE'] <- print(rmse(residuals(m3.fit.2005)))
#spatial
###to check
spatial2005<-m1.2005 %>%
group_by(stn) %>%
summarise(barpm = mean(PM10, na.rm=TRUE), barpred = mean(pred.m3, na.rm=TRUE))
m1.fit.2005.spat<- lm(barpm ~ barpred, data=spatial2005)
res[res$year=="2005", 'm3.R2.s'] <- print(summary(lm(barpm ~ barpred, data=spatial2005))$r.squared)
res[res$year=="2005", 'm3.PE.s'] <- print(rmse(residuals(m1.fit.2005.spat)))
#temporal
tempo2005<-left_join(m1.2005,spatial2005)
tempo2005$delpm <-tempo2005$PM10-tempo2005$barpm
tempo2005$delpred <-tempo2005$pred.m3-tempo2005$barpred
mod_temporal <- lm(delpm ~ delpred, data=tempo2005)
res[res$year=="2005", 'm3.R2.t'] <- print(summary(lm(delpm ~ delpred, data=tempo2005))$r.squared)
#############save midpoint
saveRDS(res, "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/res.AQ.2005.rds")
saveRDS(res, "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/resALL.AQ.rds")
#########################
#import mod2
mod2<- readRDS( "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod2.AQ.2005.pred2.rds")
mod2<-mod2[,c("aodid","day","pred.m2"),with=FALSE]
#----------------> store the best available
mod3best <- mod3[, list(aodid, x_aod_ITM, y_aod_ITM, day, pred.m3)]
setkey(mod3best, day, aodid)
setkey(mod2, day, aodid)
mod3best <- merge(mod3best, mod2[,list(aodid, day, pred.m2)], all.x = T)
#reload mod1
mod1<- readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod1.AQ.2005.pred.rds")
mod1$aodid<-paste(mod1$long_aod,mod1$lat_aod,sep="-")
mod1<-mod1[,c("aodid","day","PM10","pred.m1"),with=FALSE]
setkey(mod1,day,aodid)
mod3best <- merge(mod3best, mod1, all.x = T)
mod3best[,bestpred := pred.m3]
mod3best[!is.na(pred.m2),bestpred := pred.m2]
mod3best[!is.na(pred.m1),bestpred := pred.m1]
#save
saveRDS(mod3best,"/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod3.AQ.2005.FINAL.rds")
#save for GIS
write.csv(mod3best[, list(LTPM = mean(bestpred, na.rm = T),
predvariance = var(bestpred, na.rm = T),
predmin = min(bestpred, na.rm = T),
predmax = max(bestpred, na.rm = T),
npred = sum(!is.na(bestpred)),
npred.m1 = sum(!is.na(pred.m1)),
npred.m2 = sum(!is.na(pred.m2)),
npred.m3 = sum(!is.na(pred.m3)),
x_aod_ITM = x_aod_ITM[1], y_aod_ITM = y_aod_ITM[1]),by=aodid], "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/LTPM3.AQ.2005.csv", row.names = F)
keep(res, sure=TRUE)
c()
###############
#LIBS
###############
library(lme4);library(reshape);library(foreign) ;library(ggplot2);library(plyr);library(data.table);library(reshape2);library(Hmisc);library(mgcv);library(gdata);library(car);library(dplyr);library(ggmap);library(broom);library(splines)
#sourcing
source("/media/NAS/Uni/org/files/Uni/Projects/code/$Rsnips/CV_splits.r")
source("/media/NAS/Uni/org/files/Uni/Projects/code/$Rsnips/rmspe.r")
#if needed load res table
#res<- readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/resALL.AQ.rds")
### import data
m1.2006 <-readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod1.AQ.2006.rds")
################# clean BAD STN PM10 and check if improved model?
raWDaf <- ddply(m1.2006, c( "stn"),
function(x) {
mod1 <- lm(PM10 ~ aod, data=x)
data.frame(R2 = round(summary(mod1)$r.squared, 5),
nsamps = length(summary(mod1)$resid))
})
raWDaf
raWDaf<-as.data.table(raWDaf)
bad<- raWDaf[R2< 0.01]
bad[,badid := paste(stn,sep="-")]
#################BAD STN
m1.2006[,badid := paste(stn,sep="-")]
####Take out bad stations
m1.2006 <- m1.2006[!(m1.2006$badid %in% bad$badid), ]
#scale vars
m1.2006[,elev.s:= scale(elev)]
m1.2006[,tden.s:= scale(tden)]
m1.2006[,pden.s:= scale(pden)]
m1.2006[,dist2A1.s:= scale(dist2A1)]
m1.2006[,dist2water.s:= scale(dist2water)]
m1.2006[,dist2rail.s:= scale(dist2rail)]
m1.2006[,Dist2road.s:= scale(Dist2road)]
m1.2006[,ndvi.s:= scale(ndvi)]
m1.2006[,MeanPbl.s:= scale(MeanPbl)]
m1.2006[,p_ind.s:= scale(p_ind)]
m1.2006[,p_for.s:= scale(p_for)]
m1.2006[,p_farm.s:= scale(p_farm)]
m1.2006[,p_dos.s:= scale(p_dos)]
m1.2006[,p_dev.s:= scale(p_dev)]
m1.2006[,p_os.s:= scale(p_os)]
m1.2006[,tempa.s:= scale(tempa)]
m1.2006[,WDa.s:= scale(WDa)]
m1.2006[,WSa.s:= scale(WSa)]
m1.2006[,RHa.s:= scale(RHa)]
m1.2006[,Raina.s:= scale(Raina)]
m1.2006[,NO2a.s:= scale(NO2a)]
m1.formula <- as.formula(PM10~ aod
+tempa.s+WDa.s+WSa.s+Dust+MeanPbl.s #temporal
+elev.s+tden.s+pden.s+Dist2road.s+ndvi.s #spatial
+p_os.s #+p_dev.s+p_dos.s+p_farm.s+p_for.s+p_ind.s #land use
#+aod*Dust #interactions
+(1+aod|day/reg_num)) #+(1|stn) !!! stn screws up mod3
#full fit
m1.fit.2006 <- lmer(m1.formula,data=m1.2006,weights=normwt)
m1.2006$pred.m1 <- predict(m1.fit.2006)
res[res$year=="2006", 'm1.R2'] <- print(summary(lm(PM10~pred.m1,data=m1.2006))$r.squared)
#RMSPE
res[res$year=="2006", 'm1.PE'] <- print(rmse(residuals(m1.fit.2006)))
#spatial
###to check
spatial2006<-m1.2006 %>%
group_by(stn) %>%
summarise(barpm = mean(PM10, na.rm=TRUE), barpred = mean(pred.m1, na.rm=TRUE))
m1.fit.2006.spat<- lm(barpm ~ barpred, data=spatial2006)
res[res$year=="2006", 'm1.R2.s'] <- print(summary(lm(barpm ~ barpred, data=spatial2006))$r.squared)
res[res$year=="2006", 'm1.PE.s'] <- print(rmse(residuals(m1.fit.2006.spat)))
#temporal
tempo2006<-left_join(m1.2006,spatial2006)
tempo2006$delpm <-tempo2006$PM10-tempo2006$barpm
tempo2006$delpred <-tempo2006$pred.m1-tempo2006$barpred
mod_temporal <- lm(delpm ~ delpred, data=tempo2006)
res[res$year=="2006", 'm1.R2.t'] <- print(summary(lm(delpm ~ delpred, data=tempo2006))$r.squared)
saveRDS(m1.2006,"/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod1.AQ.2006.pred.rds")
#---------------->>>> CV
#s1
splits_s1 <- splitdf(m1.2006)
test_s1 <- splits_s1$testset
train_s1 <- splits_s1$trainset
out_train_s1 <- lmer(m1.formula,data = train_s1,weights=normwt)
test_s1$pred.m1.cv <- predict(object=out_train_s1 ,newdata=test_s1,allow.new.levels=TRUE,re.form=NULL )
test_s1$iter<-"s1"
#s2
splits_s2 <- splitdf(m1.2006)
test_s2 <- splits_s2$testset
train_s2 <- splits_s2$trainset
out_train_s2 <- lmer(m1.formula,data = train_s2,weights=normwt)
test_s2$pred.m1.cv <- predict(object=out_train_s2 ,newdata=test_s2,allow.new.levels=TRUE,re.form=NULL )
test_s2$iter<-"s2"
#s3
splits_s3 <- splitdf(m1.2006)
test_s3 <- splits_s3$testset
train_s3 <- splits_s3$trainset
out_train_s3 <- lmer(m1.formula,data = train_s3,weights=normwt)
test_s3$pred.m1.cv <- predict(object=out_train_s3 ,newdata=test_s3,allow.new.levels=TRUE,re.form=NULL )
test_s3$iter<-"s3"
#s4
splits_s4 <- splitdf(m1.2006)
test_s4 <- splits_s4$testset
train_s4 <- splits_s4$trainset
out_train_s4 <- lmer(m1.formula,data = train_s4,weights=normwt)
test_s4$pred.m1.cv <- predict(object=out_train_s4 ,newdata=test_s4,allow.new.levels=TRUE,re.form=NULL )
test_s4$iter<-"s4"
#s5
splits_s5 <- splitdf(m1.2006)
test_s5 <- splits_s5$testset
train_s5 <- splits_s5$trainset
out_train_s5 <- lmer(m1.formula,data = train_s5,weights=normwt)
test_s5$pred.m1.cv <- predict(object=out_train_s5 ,newdata=test_s5,allow.new.levels=TRUE,re.form=NULL )
test_s5$iter<-"s5"
#s6
splits_s6 <- splitdf(m1.2006)
test_s6 <- splits_s6$testset
train_s6 <- splits_s6$trainset
out_train_s6 <- lmer(m1.formula,data = train_s6,weights=normwt)
test_s6$pred.m1.cv <- predict(object=out_train_s6 ,newdata=test_s6,allow.new.levels=TRUE,re.form=NULL )
test_s6$iter<-"s6"
#s7
splits_s7 <- splitdf(m1.2006)
test_s7 <- splits_s7$testset
train_s7 <- splits_s7$trainset
out_train_s7 <- lmer(m1.formula,data = train_s7,weights=normwt)
test_s7$pred.m1.cv <- predict(object=out_train_s7 ,newdata=test_s7,allow.new.levels=TRUE,re.form=NULL )
test_s7$iter<-"s7"
#s8
splits_s8 <- splitdf(m1.2006)
test_s8 <- splits_s8$testset
train_s8 <- splits_s8$trainset
out_train_s8 <- lmer(m1.formula,data = train_s8,weights=normwt)
test_s8$pred.m1.cv <- predict(object=out_train_s8 ,newdata=test_s8,allow.new.levels=TRUE,re.form=NULL )
test_s8$iter<-"s8"
#s9
splits_s9 <- splitdf(m1.2006)
test_s9 <- splits_s9$testset
train_s9 <- splits_s9$trainset
out_train_s9 <- lmer(m1.formula,data = train_s9,weights=normwt)
test_s9$pred.m1.cv <- predict(object=out_train_s9 ,newdata=test_s9,allow.new.levels=TRUE,re.form=NULL )
test_s9$iter<-"s9"
#s10
splits_s10 <- splitdf(m1.2006)
test_s10 <- splits_s10$testset
train_s10 <- splits_s10$trainset
out_train_s10 <- lmer(m1.formula,data = train_s10,weights=normwt)
test_s10$pred.m1.cv <- predict(object=out_train_s10 ,newdata=test_s10,allow.new.levels=TRUE,re.form=NULL )
test_s10$iter<-"s10"
#BIND 1 dataset
m1.2006.cv<- data.table(rbind(test_s1,test_s2,test_s3,test_s4,test_s5,test_s6,test_s7,test_s8,test_s9, test_s10))
# cleanup (remove from WS) objects from CV
rm(list = ls(pattern = "train_|test_"))
#table updates
m1.fit.2006.cv<-lm(PM10~pred.m1.cv,data=m1.2006.cv)
res[res$year=="2006", 'm1cv.R2'] <- print(summary(lm(PM10~pred.m1.cv,data=m1.2006.cv))$r.squared)
res[res$year=="2006", 'm1cv.I'] <-print(summary(lm(PM10~pred.m1.cv,data=m1.2006.cv))$coef[1,1])
res[res$year=="2006", 'm1cv.I.se'] <-print(summary(lm(PM10~pred.m1.cv,data=m1.2006.cv))$coef[1,2])
res[res$year=="2006", 'm1cv.S'] <-print(summary(lm(PM10~pred.m1.cv,data=m1.2006.cv))$coef[2,1])
res[res$year=="2006", 'm1cv.S.se'] <-print(summary(lm(PM10~pred.m1.cv,data=m1.2006.cv))$coef[2,2])
#RMSPE
res[res$year=="2006", 'm1cv.PE'] <- print(rmse(residuals(m1.fit.2006.cv)))
#spatial
spatial2006.cv<-m1.2006.cv %>%
group_by(stn) %>%
summarise(barpm = mean(PM10, na.rm=TRUE), barpred = mean(pred.m1, na.rm=TRUE))
m1.fit.2006.cv.s <- lm(barpm ~ barpred, data=spatial2006.cv)
res[res$year=="2006", 'm1cv.R2.s'] <- print(summary(lm(barpm ~ barpred, data=spatial2006.cv))$r.squared)
res[res$year=="2006", 'm1cv.PE.s'] <- print(rmse(residuals(m1.fit.2006.cv.s)))
#temporal
tempo2006.cv<-left_join(m1.2006.cv,spatial2006.cv)
tempo2006.cv$delpm <-tempo2006.cv$PM10-tempo2006.cv$barpm
tempo2006.cv$delpred <-tempo2006.cv$pred.m1.cv-tempo2006.cv$barpred
mod_temporal.cv <- lm(delpm ~ delpred, data=tempo2006.cv)
res[res$year=="2006", 'm1cv.R2.t'] <- print(summary(lm(delpm ~ delpred, data=tempo2006.cv))$r.squared)
#-------->>> loc stage
luf<-fread("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN004_LU_full_dataset/local.csv")
setnames(luf,"tden","loc.tden")
setnames(luf,"elev50","loc.elev")
#add 50m LU to CV data
setkey(m1.2006.cv,stn)
setkey(luf,stn)
m1.2006.cv.loc <- merge(m1.2006.cv, luf, all.x = T)
#m1.2006.cv.loc<-na.omit(m1.2006.cv.loc)
#create residual mp3 variable
m1.2006.cv.loc$res.m1<-m1.2006.cv.loc$PM10-m1.2006.cv.loc$pred.m1.cv
#The GAM model
gam.out<-gam(res.m1~s(loc.tden)+s(tden,MeanPbl)+s(loc.tden,WSa)+s(loc_p_os,fx=FALSE,k=4,bs='cr')+s(loc.elev,fx=FALSE,k=4,bs='cr')+s(dA1,fx=FALSE,k=4,bs='cr')+s(dsea,fx=FALSE,k=4,bs='cr'),data=m1.2006.cv.loc)
#plot(bp.model.ps)
#summary(bp.model.ps)
## reg
m1.2006.cv.loc$pred.m1.loc <-predict(gam.out)
m1.2006.cv.loc$pred.m1.both <- m1.2006.cv.loc$pred.m1.cv + m1.2006.cv.loc$pred.m1.loc
res[res$year=="2006", 'm1cv.loc.R2'] <- print(summary(lm(PM10~pred.m1.both,data=m1.2006.cv.loc))$r.squared)
res[res$year=="2006", 'm1cv.loc.I'] <-print(summary(lm(PM10~pred.m1.both,data=m1.2006.cv.loc))$coef[1,1])
res[res$year=="2006", 'm1cv.loc.I.se'] <-print(summary(lm(PM10~pred.m1.both,data=m1.2006.cv.loc))$coef[1,2])
res[res$year=="2006", 'm1cv.loc.S'] <-print(summary(lm(PM10~pred.m1.both,data=m1.2006.cv.loc))$coef[2,1])
res[res$year=="2006", 'm1cv.loc.S.se'] <-print(summary(lm(PM10~pred.m1.both,data=m1.2006.cv.loc))$coef[2,2])
#RMSPE
res[res$year=="2006", 'm1cv.loc.PE'] <- print(rmse(residuals(m1.fit.2006.cv)))
#spatial
spatial2006.cv.loc<-m1.2006.cv.loc %>%
group_by(stn) %>%
summarise(barpm = mean(PM10, na.rm=TRUE), barpred = mean(pred.m1, na.rm=TRUE))
m1.fit.2006.cv.loc.s <- lm(barpm ~ barpred, data=spatial2006.cv.loc)
res[res$year=="2006", 'm1cv.loc.R2.s'] <- print(summary(lm(barpm ~ barpred, data=spatial2006.cv.loc))$r.squared)
res[res$year=="2006", 'm1cv.loc.PE.s'] <- print(rmse(residuals(m1.fit.2006.cv.loc.s)))
#temporal
tempo2006.loc.cv<-left_join(m1.2006.cv.loc,spatial2006.cv.loc)
tempo2006.loc.cv$delpm <-tempo2006.loc.cv$PM10-tempo2006.loc.cv$barpm
tempo2006.loc.cv$delpred <-tempo2006.loc.cv$pred.m1.both-tempo2006.loc.cv$barpred
mod_temporal.loc.cv <- lm(delpm ~ delpred, data=tempo2006.loc.cv)
res[res$year=="2006", 'm1cv.loc.R2.t'] <- print(summary(lm(delpm ~ delpred, data=tempo2006.loc.cv))$r.squared)
#############save midpoint
saveRDS(res, "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/res.AQ.2006.rds")
saveRDS(res, "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/resALL.AQ.rds")
saveRDS(m1.2006.cv.loc,"/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod1.AQ.2006.predCV.rds")
###############
#MOD2
###############
m2.2006<-readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod2.AQ.2006.rds")
m2.2006[,elev.s:= scale(elev)]
m2.2006[,tden.s:= scale(tden)]
m2.2006[,pden.s:= scale(pden)]
m2.2006[,dist2A1.s:= scale(dist2A1)]
m2.2006[,dist2water.s:= scale(dist2water)]
m2.2006[,dist2rail.s:= scale(dist2rail)]
m2.2006[,Dist2road.s:= scale(Dist2road)]
m2.2006[,ndvi.s:= scale(ndvi)]
m2.2006[,MeanPbl.s:= scale(MeanPbl)]
m2.2006[,p_ind.s:= scale(p_ind)]
m2.2006[,p_for.s:= scale(p_for)]
m2.2006[,p_farm.s:= scale(p_farm)]
m2.2006[,p_dos.s:= scale(p_dos)]
m2.2006[,p_dev.s:= scale(p_dev)]
m2.2006[,p_os.s:= scale(p_os)]
m2.2006[,tempa.s:= scale(tempa)]
m2.2006[,WDa.s:= scale(WDa)]
m2.2006[,WSa.s:= scale(WSa)]
m2.2006[,RHa.s:= scale(RHa)]
m2.2006[,Raina.s:= scale(Raina)]
m2.2006[,NO2a.s:= scale(NO2a)]
#generate predictions
m2.2006[, pred.m2 := predict(object=m1.fit.2006,newdata=m2.2006,allow.new.levels=TRUE,re.form=NULL)]
describe(m2.2006$pred.m2)
#delete implossible valuesOA[24~
m2.2006 <- m2.2006[pred.m2 > 0.00000000000001 , ]
m2.2006 <- m2.2006[pred.m2 < 1500 , ]
saveRDS(m2.2006,"/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod2.AQ.2006.pred2.rds")
#-------------->prepare for mod3
m2.2006[, bimon := (m + 1) %/% 2]
setkey(m2.2006,day, aodid)
m2.2006<-m2.2006[!is.na(meanPM10)]
#run the lmer part regressing stage 2 pred Vs mean pm
#in israel check per month, also check 30km band and other methods for meanpm
m2.smooth = lme(pred.m2 ~ meanPM10,random = list(aodid= ~1 + meanPM10),control=lmeControl(opt = "optim"), data= m2.2006 )
#xm2.smooth = lmer(pred.m2 ~ meanPM10+(1+ meanPM10|aodid), data= m2.2006 )
#correlate to see everything from mod2 and the mpm works
m2.2006[, pred.t31 := predict(m2.smooth)]
m2.2006[, resid := residuals(m2.smooth)]
res[res$year=="2006", 'm3.t31'] <- print(summary(lm(pred.m2~pred.t31,data=m2.2006))$r.squared)
#split the files to the separate bi monthly datsets
T2006_bimon1 <- subset(m2.2006 ,m2.2006$bimon == "1")
T2006_bimon2 <- subset(m2.2006 ,m2.2006$bimon == "2")
T2006_bimon3 <- subset(m2.2006 ,m2.2006$bimon == "3")
T2006_bimon4 <- subset(m2.2006 ,m2.2006$bimon == "4")
T2006_bimon5 <- subset(m2.2006 ,m2.2006$bimon == "5")
T2006_bimon6 <- subset(m2.2006 ,m2.2006$bimon == "6")
#run the separate splines (smooth) for x and y for each bimon
#whats the default band (distance) that the spline goes out and uses
fit2_1 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2006_bimon1 )
fit2_2 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2006_bimon2 )
fit2_3 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2006_bimon3 )
fit2_4 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2006_bimon4 )
fit2_5 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2006_bimon5 )
fit2_6 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2006_bimon6 )
#get the predicted-fitted
Xpred_1 <- (T2006_bimon1$pred.t31 - fit2_1$fitted)
Xpred_2 <- (T2006_bimon2$pred.t31 - fit2_2$fitted)
Xpred_3 <- (T2006_bimon3$pred.t31 - fit2_3$fitted)
Xpred_4 <- (T2006_bimon4$pred.t31 - fit2_4$fitted)
Xpred_5 <- (T2006_bimon5$pred.t31 - fit2_5$fitted)
Xpred_6 <- (T2006_bimon6$pred.t31 - fit2_6$fitted)
#remerge to 1 file
m2.2006$pred.t32 <- c( Xpred_1,Xpred_2, Xpred_3, Xpred_4, Xpred_5, Xpred_6)
#this is important so that its sorted as in the first gamm
setkey(m2.2006,day, aodid)
#rerun the lme on the predictions including the spatial spline (smooth)
Final_pred_2006 <- lme(pred.t32 ~ meanPM10 ,random = list(aodid= ~1 + meanPM10 ),control=lmeControl(opt = "optim"),data= m2.2006 )
m2.2006[, pred.t33 := predict(Final_pred_2006)]
#check correlations
res[res$year=="2006", 'm3.t33'] <- print(summary(lm(pred.m2 ~ pred.t33,data=m2.2006))$r.squared)
#------------------------>>>
#import mod3
data.m3 <- readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod3.AQ.2006.rds")
#for PM10
data.m3 <- data.m3[,c(1,2,5,29:32,52,53),with=FALSE]
data.m3[, bimon := (m + 1) %/% 2]
setkey(data.m3,day, aodid)
data.m3<-data.m3[!is.na(meanPM10)]
#generate m.3 initial pred
data.m3$pred.m3.mix <- predict(Final_pred_2006,data.m3)
#create unique grid
ugrid <-data.m3 %>%
group_by(aodid) %>%
summarise(lat_aod = mean(lat_aod, na.rm=TRUE), long_aod = mean(long_aod, na.rm=TRUE),x_aod_ITM = mean(x_aod_ITM, na.rm=TRUE), y_aod_ITM = mean(y_aod_ITM, na.rm=TRUE))
#### PREDICT Gam part
#split back into bimons to include the gam prediction in final prediction
data.m3_bimon1 <- data.m3[bimon == 1, ]
data.m3_bimon2 <- data.m3[bimon == 2, ]
data.m3_bimon3 <- data.m3[bimon == 3, ]
data.m3_bimon4 <- data.m3[bimon == 4, ]
data.m3_bimon5 <- data.m3[bimon == 5, ]
data.m3_bimon6 <- data.m3[bimon == 6, ]
#addin unique grid to each bimon
uniq_gid_bimon1 <- ugrid
uniq_gid_bimon2 <- ugrid
uniq_gid_bimon3 <- ugrid
uniq_gid_bimon4 <- ugrid
uniq_gid_bimon5 <- ugrid
uniq_gid_bimon6 <- ugrid
#get predictions for Bimon residuals
uniq_gid_bimon1$gpred <- predict.gam(fit2_1,uniq_gid_bimon1)
uniq_gid_bimon2$gpred <- predict.gam(fit2_2,uniq_gid_bimon2)
uniq_gid_bimon3$gpred <- predict.gam(fit2_3,uniq_gid_bimon3)
uniq_gid_bimon4$gpred <- predict.gam(fit2_4,uniq_gid_bimon4)
uniq_gid_bimon5$gpred <- predict.gam(fit2_5,uniq_gid_bimon5)
uniq_gid_bimon6$gpred <- predict.gam(fit2_6,uniq_gid_bimon6)
#merge things back togheter
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> merges
setkey(uniq_gid_bimon1,aodid)
setkey(data.m3_bimon1,aodid)
data.m3_bimon1 <- merge(data.m3_bimon1, uniq_gid_bimon1[,list(aodid,gpred)], all.x = T)
setkey(uniq_gid_bimon2,aodid)
setkey(data.m3_bimon2,aodid)
data.m3_bimon2 <- merge(data.m3_bimon2, uniq_gid_bimon2[,list(aodid,gpred)], all.x = T)
setkey(uniq_gid_bimon3,aodid)
setkey(data.m3_bimon3,aodid)
data.m3_bimon3 <- merge(data.m3_bimon3, uniq_gid_bimon3[,list(aodid,gpred)], all.x = T)
setkey(uniq_gid_bimon4,aodid)
setkey(data.m3_bimon4,aodid)
data.m3_bimon4 <- merge(data.m3_bimon4, uniq_gid_bimon4[,list(aodid,gpred)], all.x = T)
setkey(uniq_gid_bimon5,aodid)
setkey(data.m3_bimon5,aodid)
data.m3_bimon5 <- merge(data.m3_bimon5, uniq_gid_bimon5[,list(aodid,gpred)], all.x = T)
setkey(uniq_gid_bimon6,aodid)
setkey(data.m3_bimon6,aodid)
data.m3_bimon6 <- merge(data.m3_bimon6, uniq_gid_bimon6[,list(aodid,gpred)], all.x = T)
#reattach all parts
mod3 <- rbind(data.m3_bimon1,data.m3_bimon2,data.m3_bimon3,data.m3_bimon4,data.m3_bimon5,data.m3_bimon6)
# create pred.m3
mod3$pred.m3 <-mod3$pred.m3.mix+mod3$gpred
#hist(mod3$pred.m3)
#describe(mod3$pred.m3)
#recode negative into zero
mod3 <- mod3[pred.m3 >= 0]
saveRDS(mod3,"/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod3.AQ.2006.pred.rds")
#clean
keep(mod3,res,rmse, sure=TRUE)
gc()
#########################
#prepare for m3.R2
#########################
#load mod1
mod1<- readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod1.AQ.2006.pred.rds")
mod1[,aodid:= paste(mod1$long_aod,mod1$lat_aod,sep="-")]
mod1<-mod1[,c("aodid","day","PM10","pred.m1","stn"),with=FALSE]
#R2.m3
setkey(mod3,day,aodid)
setkey(mod1,day,aodid)
m1.2006 <- merge(mod1,mod3[, list(day,aodid,pred.m3)], all.x = T)
m3.fit.2006<- summary(lm(PM10~pred.m3,data=m1.2006))
res[res$year=="2006", 'm3.R2'] <- print(summary(lm(PM10~pred.m3,data=m1.2006))$r.squared)
#RMSPE
res[res$year=="2006", 'm3.PE'] <- print(rmse(residuals(m3.fit.2006)))
#spatial
###to check
spatial2006<-m1.2006 %>%
group_by(stn) %>%
summarise(barpm = mean(PM10, na.rm=TRUE), barpred = mean(pred.m3, na.rm=TRUE))
m1.fit.2006.spat<- lm(barpm ~ barpred, data=spatial2006)
res[res$year=="2006", 'm3.R2.s'] <- print(summary(lm(barpm ~ barpred, data=spatial2006))$r.squared)
res[res$year=="2006", 'm3.PE.s'] <- print(rmse(residuals(m1.fit.2006.spat)))
#temporal
tempo2006<-left_join(m1.2006,spatial2006)
tempo2006$delpm <-tempo2006$PM10-tempo2006$barpm
tempo2006$delpred <-tempo2006$pred.m3-tempo2006$barpred
mod_temporal <- lm(delpm ~ delpred, data=tempo2006)
res[res$year=="2006", 'm3.R2.t'] <- print(summary(lm(delpm ~ delpred, data=tempo2006))$r.squared)
#############save midpoint
saveRDS(res, "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/res.AQ.2006.rds")
saveRDS(res, "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/resALL.AQ.rds")
#########################
#import mod2
mod2<- readRDS( "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod2.AQ.2006.pred2.rds")
mod2<-mod2[,c("aodid","day","pred.m2"),with=FALSE]
#----------------> store the best available
mod3best <- mod3[, list(aodid, x_aod_ITM, y_aod_ITM, day, pred.m3)]
setkey(mod3best, day, aodid)
setkey(mod2, day, aodid)
mod3best <- merge(mod3best, mod2[,list(aodid, day, pred.m2)], all.x = T)
#reload mod1
mod1<- readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod1.AQ.2006.pred.rds")
mod1$aodid<-paste(mod1$long_aod,mod1$lat_aod,sep="-")
mod1<-mod1[,c("aodid","day","PM10","pred.m1"),with=FALSE]
setkey(mod1,day,aodid)
mod3best <- merge(mod3best, mod1, all.x = T)
mod3best[,bestpred := pred.m3]
mod3best[!is.na(pred.m2),bestpred := pred.m2]
mod3best[!is.na(pred.m1),bestpred := pred.m1]
#save
saveRDS(mod3best,"/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod3.AQ.2006.FINAL.rds")
#save for GIS
write.csv(mod3best[, list(LTPM = mean(bestpred, na.rm = T),
predvariance = var(bestpred, na.rm = T),
predmin = min(bestpred, na.rm = T),
predmax = max(bestpred, na.rm = T),
npred = sum(!is.na(bestpred)),
npred.m1 = sum(!is.na(pred.m1)),
npred.m2 = sum(!is.na(pred.m2)),
npred.m3 = sum(!is.na(pred.m3)),
x_aod_ITM = x_aod_ITM[1], y_aod_ITM = y_aod_ITM[1]),by=aodid], "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/LTPM3.AQ.2006.csv", row.names = F)
keep(res, sure=TRUE)
c()
###############
#LIBS
###############
library(lme4);library(reshape);library(foreign) ;library(ggplot2);library(plyr);library(data.table);library(reshape2);library(Hmisc);library(mgcv);library(gdata);library(car);library(dplyr);library(ggmap);library(broom);library(splines)
#sourcing
source("/media/NAS/Uni/org/files/Uni/Projects/code/$Rsnips/CV_splits.r")
source("/media/NAS/Uni/org/files/Uni/Projects/code/$Rsnips/rmspe.r")
#if needed load res table
#res<- readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/resALL.AQ.rds")
### import data
m1.2007 <-readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod1.AQ.2007.rds")
################# clean BAD STN PM10 and check if improved model?
raWDaf <- ddply(m1.2007, c( "stn"),
function(x) {
mod1 <- lm(PM10 ~ aod, data=x)
data.frame(R2 = round(summary(mod1)$r.squared, 5),
nsamps = length(summary(mod1)$resid))
})
raWDaf
raWDaf<-as.data.table(raWDaf)
bad<- raWDaf[R2< 0.01]
bad[,badid := paste(stn,sep="-")]
#################BAD STN
m1.2007[,badid := paste(stn,sep="-")]
####Take out bad stations
m1.2007 <- m1.2007[!(m1.2007$badid %in% bad$badid), ]
#scale vars
m1.2007[,elev.s:= scale(elev)]
m1.2007[,tden.s:= scale(tden)]
m1.2007[,pden.s:= scale(pden)]
m1.2007[,dist2A1.s:= scale(dist2A1)]
m1.2007[,dist2water.s:= scale(dist2water)]
m1.2007[,dist2rail.s:= scale(dist2rail)]
m1.2007[,Dist2road.s:= scale(Dist2road)]
m1.2007[,ndvi.s:= scale(ndvi)]
m1.2007[,MeanPbl.s:= scale(MeanPbl)]
m1.2007[,p_ind.s:= scale(p_ind)]
m1.2007[,p_for.s:= scale(p_for)]
m1.2007[,p_farm.s:= scale(p_farm)]
m1.2007[,p_dos.s:= scale(p_dos)]
m1.2007[,p_dev.s:= scale(p_dev)]
m1.2007[,p_os.s:= scale(p_os)]
m1.2007[,tempa.s:= scale(tempa)]
m1.2007[,WDa.s:= scale(WDa)]
m1.2007[,WSa.s:= scale(WSa)]
m1.2007[,RHa.s:= scale(RHa)]
m1.2007[,Raina.s:= scale(Raina)]
m1.2007[,NO2a.s:= scale(NO2a)]
m1.formula <- as.formula(PM10~ aod
+tempa.s+WDa.s+WSa.s+Dust+MeanPbl.s #temporal
+elev.s+tden.s+pden.s+Dist2road.s+ndvi.s #spatial
+p_os.s #+p_dev.s+p_dos.s+p_farm.s+p_for.s+p_ind.s #land use
#+aod*Dust #interactions
+(1+aod|day/reg_num)) #+(1|stn) !!! stn screws up mod3
#full fit
m1.fit.2007 <- lmer(m1.formula,data=m1.2007,weights=normwt)
m1.2007$pred.m1 <- predict(m1.fit.2007)
res[res$year=="2007", 'm1.R2'] <- print(summary(lm(PM10~pred.m1,data=m1.2007))$r.squared)
#RMSPE
res[res$year=="2007", 'm1.PE'] <- print(rmse(residuals(m1.fit.2007)))
#spatial
###to check
spatial2007<-m1.2007 %>%
group_by(stn) %>%
summarise(barpm = mean(PM10, na.rm=TRUE), barpred = mean(pred.m1, na.rm=TRUE))
m1.fit.2007.spat<- lm(barpm ~ barpred, data=spatial2007)
res[res$year=="2007", 'm1.R2.s'] <- print(summary(lm(barpm ~ barpred, data=spatial2007))$r.squared)
res[res$year=="2007", 'm1.PE.s'] <- print(rmse(residuals(m1.fit.2007.spat)))
#temporal
tempo2007<-left_join(m1.2007,spatial2007)
tempo2007$delpm <-tempo2007$PM10-tempo2007$barpm
tempo2007$delpred <-tempo2007$pred.m1-tempo2007$barpred
mod_temporal <- lm(delpm ~ delpred, data=tempo2007)
res[res$year=="2007", 'm1.R2.t'] <- print(summary(lm(delpm ~ delpred, data=tempo2007))$r.squared)
saveRDS(m1.2007,"/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod1.AQ.2007.pred.rds")
#---------------->>>> CV
#s1
splits_s1 <- splitdf(m1.2007)
test_s1 <- splits_s1$testset
train_s1 <- splits_s1$trainset
out_train_s1 <- lmer(m1.formula,data = train_s1,weights=normwt)
test_s1$pred.m1.cv <- predict(object=out_train_s1 ,newdata=test_s1,allow.new.levels=TRUE,re.form=NULL )
test_s1$iter<-"s1"
#s2
splits_s2 <- splitdf(m1.2007)
test_s2 <- splits_s2$testset
train_s2 <- splits_s2$trainset
out_train_s2 <- lmer(m1.formula,data = train_s2,weights=normwt)
test_s2$pred.m1.cv <- predict(object=out_train_s2 ,newdata=test_s2,allow.new.levels=TRUE,re.form=NULL )
test_s2$iter<-"s2"
#s3
splits_s3 <- splitdf(m1.2007)
test_s3 <- splits_s3$testset
train_s3 <- splits_s3$trainset
out_train_s3 <- lmer(m1.formula,data = train_s3,weights=normwt)
test_s3$pred.m1.cv <- predict(object=out_train_s3 ,newdata=test_s3,allow.new.levels=TRUE,re.form=NULL )
test_s3$iter<-"s3"
#s4
splits_s4 <- splitdf(m1.2007)
test_s4 <- splits_s4$testset
train_s4 <- splits_s4$trainset
out_train_s4 <- lmer(m1.formula,data = train_s4,weights=normwt)
test_s4$pred.m1.cv <- predict(object=out_train_s4 ,newdata=test_s4,allow.new.levels=TRUE,re.form=NULL )
test_s4$iter<-"s4"
#s5
splits_s5 <- splitdf(m1.2007)
test_s5 <- splits_s5$testset
train_s5 <- splits_s5$trainset
out_train_s5 <- lmer(m1.formula,data = train_s5,weights=normwt)
test_s5$pred.m1.cv <- predict(object=out_train_s5 ,newdata=test_s5,allow.new.levels=TRUE,re.form=NULL )
test_s5$iter<-"s5"
#s6
splits_s6 <- splitdf(m1.2007)
test_s6 <- splits_s6$testset
train_s6 <- splits_s6$trainset
out_train_s6 <- lmer(m1.formula,data = train_s6,weights=normwt)
test_s6$pred.m1.cv <- predict(object=out_train_s6 ,newdata=test_s6,allow.new.levels=TRUE,re.form=NULL )
test_s6$iter<-"s6"
#s7
splits_s7 <- splitdf(m1.2007)
test_s7 <- splits_s7$testset
train_s7 <- splits_s7$trainset
out_train_s7 <- lmer(m1.formula,data = train_s7,weights=normwt)
test_s7$pred.m1.cv <- predict(object=out_train_s7 ,newdata=test_s7,allow.new.levels=TRUE,re.form=NULL )
test_s7$iter<-"s7"
#s8
splits_s8 <- splitdf(m1.2007)
test_s8 <- splits_s8$testset
train_s8 <- splits_s8$trainset
out_train_s8 <- lmer(m1.formula,data = train_s8,weights=normwt)
test_s8$pred.m1.cv <- predict(object=out_train_s8 ,newdata=test_s8,allow.new.levels=TRUE,re.form=NULL )
test_s8$iter<-"s8"
#s9
splits_s9 <- splitdf(m1.2007)
test_s9 <- splits_s9$testset
train_s9 <- splits_s9$trainset
out_train_s9 <- lmer(m1.formula,data = train_s9,weights=normwt)
test_s9$pred.m1.cv <- predict(object=out_train_s9 ,newdata=test_s9,allow.new.levels=TRUE,re.form=NULL )
test_s9$iter<-"s9"
#s10
splits_s10 <- splitdf(m1.2007)
test_s10 <- splits_s10$testset
train_s10 <- splits_s10$trainset
out_train_s10 <- lmer(m1.formula,data = train_s10,weights=normwt)
test_s10$pred.m1.cv <- predict(object=out_train_s10 ,newdata=test_s10,allow.new.levels=TRUE,re.form=NULL )
test_s10$iter<-"s10"
#BIND 1 dataset
m1.2007.cv<- data.table(rbind(test_s1,test_s2,test_s3,test_s4,test_s5,test_s6,test_s7,test_s8,test_s9, test_s10))
# cleanup (remove from WS) objects from CV
rm(list = ls(pattern = "train_|test_"))
#table updates
m1.fit.2007.cv<-lm(PM10~pred.m1.cv,data=m1.2007.cv)
res[res$year=="2007", 'm1cv.R2'] <- print(summary(lm(PM10~pred.m1.cv,data=m1.2007.cv))$r.squared)
res[res$year=="2007", 'm1cv.I'] <-print(summary(lm(PM10~pred.m1.cv,data=m1.2007.cv))$coef[1,1])
res[res$year=="2007", 'm1cv.I.se'] <-print(summary(lm(PM10~pred.m1.cv,data=m1.2007.cv))$coef[1,2])
res[res$year=="2007", 'm1cv.S'] <-print(summary(lm(PM10~pred.m1.cv,data=m1.2007.cv))$coef[2,1])
res[res$year=="2007", 'm1cv.S.se'] <-print(summary(lm(PM10~pred.m1.cv,data=m1.2007.cv))$coef[2,2])
#RMSPE
res[res$year=="2007", 'm1cv.PE'] <- print(rmse(residuals(m1.fit.2007.cv)))
#spatial
spatial2007.cv<-m1.2007.cv %>%
group_by(stn) %>%
summarise(barpm = mean(PM10, na.rm=TRUE), barpred = mean(pred.m1, na.rm=TRUE))
m1.fit.2007.cv.s <- lm(barpm ~ barpred, data=spatial2007.cv)
res[res$year=="2007", 'm1cv.R2.s'] <- print(summary(lm(barpm ~ barpred, data=spatial2007.cv))$r.squared)
res[res$year=="2007", 'm1cv.PE.s'] <- print(rmse(residuals(m1.fit.2007.cv.s)))
#temporal
tempo2007.cv<-left_join(m1.2007.cv,spatial2007.cv)
tempo2007.cv$delpm <-tempo2007.cv$PM10-tempo2007.cv$barpm
tempo2007.cv$delpred <-tempo2007.cv$pred.m1.cv-tempo2007.cv$barpred
mod_temporal.cv <- lm(delpm ~ delpred, data=tempo2007.cv)
res[res$year=="2007", 'm1cv.R2.t'] <- print(summary(lm(delpm ~ delpred, data=tempo2007.cv))$r.squared)
#-------->>> loc stage
luf<-fread("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN004_LU_full_dataset/local.csv")
setnames(luf,"tden","loc.tden")
setnames(luf,"elev50","loc.elev")
#add 50m LU to CV data
setkey(m1.2007.cv,stn)
setkey(luf,stn)
m1.2007.cv.loc <- merge(m1.2007.cv, luf, all.x = T)
#m1.2007.cv.loc<-na.omit(m1.2007.cv.loc)
#create residual mp3 variable
m1.2007.cv.loc$res.m1<-m1.2007.cv.loc$PM10-m1.2007.cv.loc$pred.m1.cv
#The GAM model
gam.out<-gam(res.m1~s(loc.tden)+s(tden,MeanPbl)+s(loc.tden,WSa)+s(loc_p_os,fx=FALSE,k=4,bs='cr')+s(loc.elev,fx=FALSE,k=4,bs='cr')+s(dA1,fx=FALSE,k=4,bs='cr')+s(dsea,fx=FALSE,k=4,bs='cr'),data=m1.2007.cv.loc)
#plot(bp.model.ps)
#summary(bp.model.ps)
## reg
m1.2007.cv.loc$pred.m1.loc <-predict(gam.out)
m1.2007.cv.loc$pred.m1.both <- m1.2007.cv.loc$pred.m1.cv + m1.2007.cv.loc$pred.m1.loc
res[res$year=="2007", 'm1cv.loc.R2'] <- print(summary(lm(PM10~pred.m1.both,data=m1.2007.cv.loc))$r.squared)
res[res$year=="2007", 'm1cv.loc.I'] <-print(summary(lm(PM10~pred.m1.both,data=m1.2007.cv.loc))$coef[1,1])
res[res$year=="2007", 'm1cv.loc.I.se'] <-print(summary(lm(PM10~pred.m1.both,data=m1.2007.cv.loc))$coef[1,2])
res[res$year=="2007", 'm1cv.loc.S'] <-print(summary(lm(PM10~pred.m1.both,data=m1.2007.cv.loc))$coef[2,1])
res[res$year=="2007", 'm1cv.loc.S.se'] <-print(summary(lm(PM10~pred.m1.both,data=m1.2007.cv.loc))$coef[2,2])
#RMSPE
res[res$year=="2007", 'm1cv.loc.PE'] <- print(rmse(residuals(m1.fit.2007.cv)))
#spatial
spatial2007.cv.loc<-m1.2007.cv.loc %>%
group_by(stn) %>%
summarise(barpm = mean(PM10, na.rm=TRUE), barpred = mean(pred.m1, na.rm=TRUE))
m1.fit.2007.cv.loc.s <- lm(barpm ~ barpred, data=spatial2007.cv.loc)
res[res$year=="2007", 'm1cv.loc.R2.s'] <- print(summary(lm(barpm ~ barpred, data=spatial2007.cv.loc))$r.squared)
res[res$year=="2007", 'm1cv.loc.PE.s'] <- print(rmse(residuals(m1.fit.2007.cv.loc.s)))
#temporal
tempo2007.loc.cv<-left_join(m1.2007.cv.loc,spatial2007.cv.loc)
tempo2007.loc.cv$delpm <-tempo2007.loc.cv$PM10-tempo2007.loc.cv$barpm
tempo2007.loc.cv$delpred <-tempo2007.loc.cv$pred.m1.both-tempo2007.loc.cv$barpred
mod_temporal.loc.cv <- lm(delpm ~ delpred, data=tempo2007.loc.cv)
res[res$year=="2007", 'm1cv.loc.R2.t'] <- print(summary(lm(delpm ~ delpred, data=tempo2007.loc.cv))$r.squared)
#############save midpoint
saveRDS(res, "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/res.AQ.2007.rds")
saveRDS(res, "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/resALL.AQ.rds")
saveRDS(m1.2007.cv.loc,"/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod1.AQ.2007.predCV.rds")
###############
#MOD2
###############
m2.2007<-readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod2.AQ.2007.rds")
m2.2007[,elev.s:= scale(elev)]
m2.2007[,tden.s:= scale(tden)]
m2.2007[,pden.s:= scale(pden)]
m2.2007[,dist2A1.s:= scale(dist2A1)]
m2.2007[,dist2water.s:= scale(dist2water)]
m2.2007[,dist2rail.s:= scale(dist2rail)]
m2.2007[,Dist2road.s:= scale(Dist2road)]
m2.2007[,ndvi.s:= scale(ndvi)]
m2.2007[,MeanPbl.s:= scale(MeanPbl)]
m2.2007[,p_ind.s:= scale(p_ind)]
m2.2007[,p_for.s:= scale(p_for)]
m2.2007[,p_farm.s:= scale(p_farm)]
m2.2007[,p_dos.s:= scale(p_dos)]
m2.2007[,p_dev.s:= scale(p_dev)]
m2.2007[,p_os.s:= scale(p_os)]
m2.2007[,tempa.s:= scale(tempa)]
m2.2007[,WDa.s:= scale(WDa)]
m2.2007[,WSa.s:= scale(WSa)]
m2.2007[,RHa.s:= scale(RHa)]
m2.2007[,Raina.s:= scale(Raina)]
m2.2007[,NO2a.s:= scale(NO2a)]
#generate predictions
m2.2007[, pred.m2 := predict(object=m1.fit.2007,newdata=m2.2007,allow.new.levels=TRUE,re.form=NULL)]
describe(m2.2007$pred.m2)
#delete implossible valuesOA[24~
m2.2007 <- m2.2007[pred.m2 > 0.00000000000001 , ]
m2.2007 <- m2.2007[pred.m2 < 1500 , ]
saveRDS(m2.2007,"/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod2.AQ.2007.pred2.rds")
#-------------->prepare for mod3
m2.2007[, bimon := (m + 1) %/% 2]
setkey(m2.2007,day, aodid)
m2.2007<-m2.2007[!is.na(meanPM10)]
#run the lmer part regressing stage 2 pred Vs mean pm
#in israel check per month, also check 30km band and other methods for meanpm
m2.smooth = lme(pred.m2 ~ meanPM10,random = list(aodid= ~1 + meanPM10),control=lmeControl(opt = "optim"), data= m2.2007 )
#xm2.smooth = lmer(pred.m2 ~ meanPM10+(1+ meanPM10|aodid), data= m2.2007 )
#correlate to see everything from mod2 and the mpm works
m2.2007[, pred.t31 := predict(m2.smooth)]
m2.2007[, resid := residuals(m2.smooth)]
res[res$year=="2007", 'm3.t31'] <- print(summary(lm(pred.m2~pred.t31,data=m2.2007))$r.squared)
#split the files to the separate bi monthly datsets
T2007_bimon1 <- subset(m2.2007 ,m2.2007$bimon == "1")
T2007_bimon2 <- subset(m2.2007 ,m2.2007$bimon == "2")
T2007_bimon3 <- subset(m2.2007 ,m2.2007$bimon == "3")
T2007_bimon4 <- subset(m2.2007 ,m2.2007$bimon == "4")
T2007_bimon5 <- subset(m2.2007 ,m2.2007$bimon == "5")
T2007_bimon6 <- subset(m2.2007 ,m2.2007$bimon == "6")
#run the separate splines (smooth) for x and y for each bimon
#whats the default band (distance) that the spline goes out and uses
fit2_1 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2007_bimon1 )
fit2_2 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2007_bimon2 )
fit2_3 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2007_bimon3 )
fit2_4 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2007_bimon4 )
fit2_5 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2007_bimon5 )
fit2_6 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2007_bimon6 )
#get the predicted-fitted
Xpred_1 <- (T2007_bimon1$pred.t31 - fit2_1$fitted)
Xpred_2 <- (T2007_bimon2$pred.t31 - fit2_2$fitted)
Xpred_3 <- (T2007_bimon3$pred.t31 - fit2_3$fitted)
Xpred_4 <- (T2007_bimon4$pred.t31 - fit2_4$fitted)
Xpred_5 <- (T2007_bimon5$pred.t31 - fit2_5$fitted)
Xpred_6 <- (T2007_bimon6$pred.t31 - fit2_6$fitted)
#remerge to 1 file
m2.2007$pred.t32 <- c( Xpred_1,Xpred_2, Xpred_3, Xpred_4, Xpred_5, Xpred_6)
#this is important so that its sorted as in the first gamm
setkey(m2.2007,day, aodid)
#rerun the lme on the predictions including the spatial spline (smooth)
Final_pred_2007 <- lme(pred.t32 ~ meanPM10 ,random = list(aodid= ~1 + meanPM10 ),control=lmeControl(opt = "optim"),data= m2.2007 )
m2.2007[, pred.t33 := predict(Final_pred_2007)]
#check correlations
res[res$year=="2007", 'm3.t33'] <- print(summary(lm(pred.m2 ~ pred.t33,data=m2.2007))$r.squared)
#------------------------>>>
#import mod3
data.m3 <- readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod3.AQ.2007.rds")
#for PM10
data.m3 <- data.m3[,c(1,2,5,29:32,52,53),with=FALSE]
data.m3[, bimon := (m + 1) %/% 2]
setkey(data.m3,day, aodid)
data.m3<-data.m3[!is.na(meanPM10)]
#generate m.3 initial pred
data.m3$pred.m3.mix <- predict(Final_pred_2007,data.m3)
#create unique grid
ugrid <-data.m3 %>%
group_by(aodid) %>%
summarise(lat_aod = mean(lat_aod, na.rm=TRUE), long_aod = mean(long_aod, na.rm=TRUE),x_aod_ITM = mean(x_aod_ITM, na.rm=TRUE), y_aod_ITM = mean(y_aod_ITM, na.rm=TRUE))
#### PREDICT Gam part
#split back into bimons to include the gam prediction in final prediction
data.m3_bimon1 <- data.m3[bimon == 1, ]
data.m3_bimon2 <- data.m3[bimon == 2, ]
data.m3_bimon3 <- data.m3[bimon == 3, ]
data.m3_bimon4 <- data.m3[bimon == 4, ]
data.m3_bimon5 <- data.m3[bimon == 5, ]
data.m3_bimon6 <- data.m3[bimon == 6, ]
#addin unique grid to each bimon
uniq_gid_bimon1 <- ugrid
uniq_gid_bimon2 <- ugrid
uniq_gid_bimon3 <- ugrid
uniq_gid_bimon4 <- ugrid
uniq_gid_bimon5 <- ugrid
uniq_gid_bimon6 <- ugrid
#get predictions for Bimon residuals
uniq_gid_bimon1$gpred <- predict.gam(fit2_1,uniq_gid_bimon1)
uniq_gid_bimon2$gpred <- predict.gam(fit2_2,uniq_gid_bimon2)
uniq_gid_bimon3$gpred <- predict.gam(fit2_3,uniq_gid_bimon3)
uniq_gid_bimon4$gpred <- predict.gam(fit2_4,uniq_gid_bimon4)
uniq_gid_bimon5$gpred <- predict.gam(fit2_5,uniq_gid_bimon5)
uniq_gid_bimon6$gpred <- predict.gam(fit2_6,uniq_gid_bimon6)
#merge things back togheter
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> merges
setkey(uniq_gid_bimon1,aodid)
setkey(data.m3_bimon1,aodid)
data.m3_bimon1 <- merge(data.m3_bimon1, uniq_gid_bimon1[,list(aodid,gpred)], all.x = T)
setkey(uniq_gid_bimon2,aodid)
setkey(data.m3_bimon2,aodid)
data.m3_bimon2 <- merge(data.m3_bimon2, uniq_gid_bimon2[,list(aodid,gpred)], all.x = T)
setkey(uniq_gid_bimon3,aodid)
setkey(data.m3_bimon3,aodid)
data.m3_bimon3 <- merge(data.m3_bimon3, uniq_gid_bimon3[,list(aodid,gpred)], all.x = T)
setkey(uniq_gid_bimon4,aodid)
setkey(data.m3_bimon4,aodid)
data.m3_bimon4 <- merge(data.m3_bimon4, uniq_gid_bimon4[,list(aodid,gpred)], all.x = T)
setkey(uniq_gid_bimon5,aodid)
setkey(data.m3_bimon5,aodid)
data.m3_bimon5 <- merge(data.m3_bimon5, uniq_gid_bimon5[,list(aodid,gpred)], all.x = T)
setkey(uniq_gid_bimon6,aodid)
setkey(data.m3_bimon6,aodid)
data.m3_bimon6 <- merge(data.m3_bimon6, uniq_gid_bimon6[,list(aodid,gpred)], all.x = T)
#reattach all parts
mod3 <- rbind(data.m3_bimon1,data.m3_bimon2,data.m3_bimon3,data.m3_bimon4,data.m3_bimon5,data.m3_bimon6)
# create pred.m3
mod3$pred.m3 <-mod3$pred.m3.mix+mod3$gpred
#hist(mod3$pred.m3)
#describe(mod3$pred.m3)
#recode negative into zero
mod3 <- mod3[pred.m3 >= 0]
saveRDS(mod3,"/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod3.AQ.2007.pred.rds")
#clean
keep(mod3,res,rmse, sure=TRUE)
gc()
#########################
#prepare for m3.R2
#########################
#load mod1
mod1<- readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod1.AQ.2007.pred.rds")
mod1[,aodid:= paste(mod1$long_aod,mod1$lat_aod,sep="-")]
mod1<-mod1[,c("aodid","day","PM10","pred.m1","stn"),with=FALSE]
#R2.m3
setkey(mod3,day,aodid)
setkey(mod1,day,aodid)
m1.2007 <- merge(mod1,mod3[, list(day,aodid,pred.m3)], all.x = T)
m3.fit.2007<- summary(lm(PM10~pred.m3,data=m1.2007))
res[res$year=="2007", 'm3.R2'] <- print(summary(lm(PM10~pred.m3,data=m1.2007))$r.squared)
#RMSPE
res[res$year=="2007", 'm3.PE'] <- print(rmse(residuals(m3.fit.2007)))
#spatial
###to check
spatial2007<-m1.2007 %>%
group_by(stn) %>%
summarise(barpm = mean(PM10, na.rm=TRUE), barpred = mean(pred.m3, na.rm=TRUE))
m1.fit.2007.spat<- lm(barpm ~ barpred, data=spatial2007)
res[res$year=="2007", 'm3.R2.s'] <- print(summary(lm(barpm ~ barpred, data=spatial2007))$r.squared)
res[res$year=="2007", 'm3.PE.s'] <- print(rmse(residuals(m1.fit.2007.spat)))
#temporal
tempo2007<-left_join(m1.2007,spatial2007)
tempo2007$delpm <-tempo2007$PM10-tempo2007$barpm
tempo2007$delpred <-tempo2007$pred.m3-tempo2007$barpred
mod_temporal <- lm(delpm ~ delpred, data=tempo2007)
res[res$year=="2007", 'm3.R2.t'] <- print(summary(lm(delpm ~ delpred, data=tempo2007))$r.squared)
#############save midpoint
saveRDS(res, "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/res.AQ.2007.rds")
saveRDS(res, "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/resALL.AQ.rds")
#########################
#import mod2
mod2<- readRDS( "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod2.AQ.2007.pred2.rds")
mod2<-mod2[,c("aodid","day","pred.m2"),with=FALSE]
#----------------> store the best available
mod3best <- mod3[, list(aodid, x_aod_ITM, y_aod_ITM, day, pred.m3)]
setkey(mod3best, day, aodid)
setkey(mod2, day, aodid)
mod3best <- merge(mod3best, mod2[,list(aodid, day, pred.m2)], all.x = T)
#reload mod1
mod1<- readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod1.AQ.2007.pred.rds")
mod1$aodid<-paste(mod1$long_aod,mod1$lat_aod,sep="-")
mod1<-mod1[,c("aodid","day","PM10","pred.m1"),with=FALSE]
setkey(mod1,day,aodid)
mod3best <- merge(mod3best, mod1, all.x = T)
mod3best[,bestpred := pred.m3]
mod3best[!is.na(pred.m2),bestpred := pred.m2]
mod3best[!is.na(pred.m1),bestpred := pred.m1]
#save
saveRDS(mod3best,"/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod3.AQ.2007.FINAL.rds")
#save for GIS
write.csv(mod3best[, list(LTPM = mean(bestpred, na.rm = T),
predvariance = var(bestpred, na.rm = T),
predmin = min(bestpred, na.rm = T),
predmax = max(bestpred, na.rm = T),
npred = sum(!is.na(bestpred)),
npred.m1 = sum(!is.na(pred.m1)),
npred.m2 = sum(!is.na(pred.m2)),
npred.m3 = sum(!is.na(pred.m3)),
x_aod_ITM = x_aod_ITM[1], y_aod_ITM = y_aod_ITM[1]),by=aodid], "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/LTPM3.AQ.2007.csv", row.names = F)
keep(res, sure=TRUE)
c()
###############
#LIBS
###############
library(lme4);library(reshape);library(foreign) ;library(ggplot2);library(plyr);library(data.table);library(reshape2);library(Hmisc);library(mgcv);library(gdata);library(car);library(dplyr);library(ggmap);library(broom);library(splines)
#sourcing
source("/media/NAS/Uni/org/files/Uni/Projects/code/$Rsnips/CV_splits.r")
source("/media/NAS/Uni/org/files/Uni/Projects/code/$Rsnips/rmspe.r")
#if needed load res table
#res<- readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/resALL.AQ.rds")
### import data
m1.2008 <-readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod1.AQ.2008.rds")
################# clean BAD STN PM10 and check if improved model?
raWDaf <- ddply(m1.2008, c( "stn"),
function(x) {
mod1 <- lm(PM10 ~ aod, data=x)
data.frame(R2 = round(summary(mod1)$r.squared, 5),
nsamps = length(summary(mod1)$resid))
})
raWDaf
raWDaf<-as.data.table(raWDaf)
bad<- raWDaf[R2< 0.01]
bad[,badid := paste(stn,sep="-")]
#################BAD STN
m1.2008[,badid := paste(stn,sep="-")]
####Take out bad stations
m1.2008 <- m1.2008[!(m1.2008$badid %in% bad$badid), ]
#scale vars
m1.2008[,elev.s:= scale(elev)]
m1.2008[,tden.s:= scale(tden)]
m1.2008[,pden.s:= scale(pden)]
m1.2008[,dist2A1.s:= scale(dist2A1)]
m1.2008[,dist2water.s:= scale(dist2water)]
m1.2008[,dist2rail.s:= scale(dist2rail)]
m1.2008[,Dist2road.s:= scale(Dist2road)]
m1.2008[,ndvi.s:= scale(ndvi)]
m1.2008[,MeanPbl.s:= scale(MeanPbl)]
m1.2008[,p_ind.s:= scale(p_ind)]
m1.2008[,p_for.s:= scale(p_for)]
m1.2008[,p_farm.s:= scale(p_farm)]
m1.2008[,p_dos.s:= scale(p_dos)]
m1.2008[,p_dev.s:= scale(p_dev)]
m1.2008[,p_os.s:= scale(p_os)]
m1.2008[,tempa.s:= scale(tempa)]
m1.2008[,WDa.s:= scale(WDa)]
m1.2008[,WSa.s:= scale(WSa)]
m1.2008[,RHa.s:= scale(RHa)]
m1.2008[,Raina.s:= scale(Raina)]
m1.2008[,NO2a.s:= scale(NO2a)]
m1.formula <- as.formula(PM10~ aod
+tempa.s+WDa.s+WSa.s+Dust+MeanPbl.s #temporal
+elev.s+tden.s+pden.s+Dist2road.s+ndvi.s #spatial
+p_os.s #+p_dev.s+p_dos.s+p_farm.s+p_for.s+p_ind.s #land use
#+aod*Dust #interactions
+(1+aod|day/reg_num)) #+(1|stn) !!! stn screws up mod3
#full fit
m1.fit.2008 <- lmer(m1.formula,data=m1.2008,weights=normwt)
m1.2008$pred.m1 <- predict(m1.fit.2008)
res[res$year=="2008", 'm1.R2'] <- print(summary(lm(PM10~pred.m1,data=m1.2008))$r.squared)
#RMSPE
res[res$year=="2008", 'm1.PE'] <- print(rmse(residuals(m1.fit.2008)))
#spatial
###to check
spatial2008<-m1.2008 %>%
group_by(stn) %>%
summarise(barpm = mean(PM10, na.rm=TRUE), barpred = mean(pred.m1, na.rm=TRUE))
m1.fit.2008.spat<- lm(barpm ~ barpred, data=spatial2008)
res[res$year=="2008", 'm1.R2.s'] <- print(summary(lm(barpm ~ barpred, data=spatial2008))$r.squared)
res[res$year=="2008", 'm1.PE.s'] <- print(rmse(residuals(m1.fit.2008.spat)))
#temporal
tempo2008<-left_join(m1.2008,spatial2008)
tempo2008$delpm <-tempo2008$PM10-tempo2008$barpm
tempo2008$delpred <-tempo2008$pred.m1-tempo2008$barpred
mod_temporal <- lm(delpm ~ delpred, data=tempo2008)
res[res$year=="2008", 'm1.R2.t'] <- print(summary(lm(delpm ~ delpred, data=tempo2008))$r.squared)
saveRDS(m1.2008,"/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod1.AQ.2008.pred.rds")
#---------------->>>> CV
#s1
splits_s1 <- splitdf(m1.2008)
test_s1 <- splits_s1$testset
train_s1 <- splits_s1$trainset
out_train_s1 <- lmer(m1.formula,data = train_s1,weights=normwt)
test_s1$pred.m1.cv <- predict(object=out_train_s1 ,newdata=test_s1,allow.new.levels=TRUE,re.form=NULL )
test_s1$iter<-"s1"
#s2
splits_s2 <- splitdf(m1.2008)
test_s2 <- splits_s2$testset
train_s2 <- splits_s2$trainset
out_train_s2 <- lmer(m1.formula,data = train_s2,weights=normwt)
test_s2$pred.m1.cv <- predict(object=out_train_s2 ,newdata=test_s2,allow.new.levels=TRUE,re.form=NULL )
test_s2$iter<-"s2"
#s3
splits_s3 <- splitdf(m1.2008)
test_s3 <- splits_s3$testset
train_s3 <- splits_s3$trainset
out_train_s3 <- lmer(m1.formula,data = train_s3,weights=normwt)
test_s3$pred.m1.cv <- predict(object=out_train_s3 ,newdata=test_s3,allow.new.levels=TRUE,re.form=NULL )
test_s3$iter<-"s3"
#s4
splits_s4 <- splitdf(m1.2008)
test_s4 <- splits_s4$testset
train_s4 <- splits_s4$trainset
out_train_s4 <- lmer(m1.formula,data = train_s4,weights=normwt)
test_s4$pred.m1.cv <- predict(object=out_train_s4 ,newdata=test_s4,allow.new.levels=TRUE,re.form=NULL )
test_s4$iter<-"s4"
#s5
splits_s5 <- splitdf(m1.2008)
test_s5 <- splits_s5$testset
train_s5 <- splits_s5$trainset
out_train_s5 <- lmer(m1.formula,data = train_s5,weights=normwt)
test_s5$pred.m1.cv <- predict(object=out_train_s5 ,newdata=test_s5,allow.new.levels=TRUE,re.form=NULL )
test_s5$iter<-"s5"
#s6
splits_s6 <- splitdf(m1.2008)
test_s6 <- splits_s6$testset
train_s6 <- splits_s6$trainset
out_train_s6 <- lmer(m1.formula,data = train_s6,weights=normwt)
test_s6$pred.m1.cv <- predict(object=out_train_s6 ,newdata=test_s6,allow.new.levels=TRUE,re.form=NULL )
test_s6$iter<-"s6"
#s7
splits_s7 <- splitdf(m1.2008)
test_s7 <- splits_s7$testset
train_s7 <- splits_s7$trainset
out_train_s7 <- lmer(m1.formula,data = train_s7,weights=normwt)
test_s7$pred.m1.cv <- predict(object=out_train_s7 ,newdata=test_s7,allow.new.levels=TRUE,re.form=NULL )
test_s7$iter<-"s7"
#s8
splits_s8 <- splitdf(m1.2008)
test_s8 <- splits_s8$testset
train_s8 <- splits_s8$trainset
out_train_s8 <- lmer(m1.formula,data = train_s8,weights=normwt)
test_s8$pred.m1.cv <- predict(object=out_train_s8 ,newdata=test_s8,allow.new.levels=TRUE,re.form=NULL )
test_s8$iter<-"s8"
#s9
splits_s9 <- splitdf(m1.2008)
test_s9 <- splits_s9$testset
train_s9 <- splits_s9$trainset
out_train_s9 <- lmer(m1.formula,data = train_s9,weights=normwt)
test_s9$pred.m1.cv <- predict(object=out_train_s9 ,newdata=test_s9,allow.new.levels=TRUE,re.form=NULL )
test_s9$iter<-"s9"
#s10
splits_s10 <- splitdf(m1.2008)
test_s10 <- splits_s10$testset
train_s10 <- splits_s10$trainset
out_train_s10 <- lmer(m1.formula,data = train_s10,weights=normwt)
test_s10$pred.m1.cv <- predict(object=out_train_s10 ,newdata=test_s10,allow.new.levels=TRUE,re.form=NULL )
test_s10$iter<-"s10"
#BIND 1 dataset
m1.2008.cv<- data.table(rbind(test_s1,test_s2,test_s3,test_s4,test_s5,test_s6,test_s7,test_s8,test_s9, test_s10))
# cleanup (remove from WS) objects from CV
rm(list = ls(pattern = "train_|test_"))
#table updates
m1.fit.2008.cv<-lm(PM10~pred.m1.cv,data=m1.2008.cv)
res[res$year=="2008", 'm1cv.R2'] <- print(summary(lm(PM10~pred.m1.cv,data=m1.2008.cv))$r.squared)
res[res$year=="2008", 'm1cv.I'] <-print(summary(lm(PM10~pred.m1.cv,data=m1.2008.cv))$coef[1,1])
res[res$year=="2008", 'm1cv.I.se'] <-print(summary(lm(PM10~pred.m1.cv,data=m1.2008.cv))$coef[1,2])
res[res$year=="2008", 'm1cv.S'] <-print(summary(lm(PM10~pred.m1.cv,data=m1.2008.cv))$coef[2,1])
res[res$year=="2008", 'm1cv.S.se'] <-print(summary(lm(PM10~pred.m1.cv,data=m1.2008.cv))$coef[2,2])
#RMSPE
res[res$year=="2008", 'm1cv.PE'] <- print(rmse(residuals(m1.fit.2008.cv)))
#spatial
spatial2008.cv<-m1.2008.cv %>%
group_by(stn) %>%
summarise(barpm = mean(PM10, na.rm=TRUE), barpred = mean(pred.m1, na.rm=TRUE))
m1.fit.2008.cv.s <- lm(barpm ~ barpred, data=spatial2008.cv)
res[res$year=="2008", 'm1cv.R2.s'] <- print(summary(lm(barpm ~ barpred, data=spatial2008.cv))$r.squared)
res[res$year=="2008", 'm1cv.PE.s'] <- print(rmse(residuals(m1.fit.2008.cv.s)))
#temporal
tempo2008.cv<-left_join(m1.2008.cv,spatial2008.cv)
tempo2008.cv$delpm <-tempo2008.cv$PM10-tempo2008.cv$barpm
tempo2008.cv$delpred <-tempo2008.cv$pred.m1.cv-tempo2008.cv$barpred
mod_temporal.cv <- lm(delpm ~ delpred, data=tempo2008.cv)
res[res$year=="2008", 'm1cv.R2.t'] <- print(summary(lm(delpm ~ delpred, data=tempo2008.cv))$r.squared)
#-------->>> loc stage
luf<-fread("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN004_LU_full_dataset/local.csv")
setnames(luf,"tden","loc.tden")
setnames(luf,"elev50","loc.elev")
#add 50m LU to CV data
setkey(m1.2008.cv,stn)
setkey(luf,stn)
m1.2008.cv.loc <- merge(m1.2008.cv, luf, all.x = T)
#m1.2008.cv.loc<-na.omit(m1.2008.cv.loc)
#create residual mp3 variable
m1.2008.cv.loc$res.m1<-m1.2008.cv.loc$PM10-m1.2008.cv.loc$pred.m1.cv
#The GAM model
gam.out<-gam(res.m1~s(loc.tden)+s(tden,MeanPbl)+s(loc.tden,WSa)+s(loc_p_os,fx=FALSE,k=4,bs='cr')+s(loc.elev,fx=FALSE,k=4,bs='cr')+s(dA1,fx=FALSE,k=4,bs='cr')+s(dsea,fx=FALSE,k=4,bs='cr'),data=m1.2008.cv.loc)
#plot(bp.model.ps)
#summary(bp.model.ps)
## reg
m1.2008.cv.loc$pred.m1.loc <-predict(gam.out)
m1.2008.cv.loc$pred.m1.both <- m1.2008.cv.loc$pred.m1.cv + m1.2008.cv.loc$pred.m1.loc
res[res$year=="2008", 'm1cv.loc.R2'] <- print(summary(lm(PM10~pred.m1.both,data=m1.2008.cv.loc))$r.squared)
res[res$year=="2008", 'm1cv.loc.I'] <-print(summary(lm(PM10~pred.m1.both,data=m1.2008.cv.loc))$coef[1,1])
res[res$year=="2008", 'm1cv.loc.I.se'] <-print(summary(lm(PM10~pred.m1.both,data=m1.2008.cv.loc))$coef[1,2])
res[res$year=="2008", 'm1cv.loc.S'] <-print(summary(lm(PM10~pred.m1.both,data=m1.2008.cv.loc))$coef[2,1])
res[res$year=="2008", 'm1cv.loc.S.se'] <-print(summary(lm(PM10~pred.m1.both,data=m1.2008.cv.loc))$coef[2,2])
#RMSPE
res[res$year=="2008", 'm1cv.loc.PE'] <- print(rmse(residuals(m1.fit.2008.cv)))
#spatial
spatial2008.cv.loc<-m1.2008.cv.loc %>%
group_by(stn) %>%
summarise(barpm = mean(PM10, na.rm=TRUE), barpred = mean(pred.m1, na.rm=TRUE))
m1.fit.2008.cv.loc.s <- lm(barpm ~ barpred, data=spatial2008.cv.loc)
res[res$year=="2008", 'm1cv.loc.R2.s'] <- print(summary(lm(barpm ~ barpred, data=spatial2008.cv.loc))$r.squared)
res[res$year=="2008", 'm1cv.loc.PE.s'] <- print(rmse(residuals(m1.fit.2008.cv.loc.s)))
#temporal
tempo2008.loc.cv<-left_join(m1.2008.cv.loc,spatial2008.cv.loc)
tempo2008.loc.cv$delpm <-tempo2008.loc.cv$PM10-tempo2008.loc.cv$barpm
tempo2008.loc.cv$delpred <-tempo2008.loc.cv$pred.m1.both-tempo2008.loc.cv$barpred
mod_temporal.loc.cv <- lm(delpm ~ delpred, data=tempo2008.loc.cv)
res[res$year=="2008", 'm1cv.loc.R2.t'] <- print(summary(lm(delpm ~ delpred, data=tempo2008.loc.cv))$r.squared)
#############save midpoint
saveRDS(res, "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/res.AQ.2008.rds")
saveRDS(res, "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/resALL.AQ.rds")
saveRDS(m1.2008.cv.loc,"/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod1.AQ.2008.predCV.rds")
###############
#MOD2
###############
m2.2008<-readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod2.AQ.2008.rds")
m2.2008[,elev.s:= scale(elev)]
m2.2008[,tden.s:= scale(tden)]
m2.2008[,pden.s:= scale(pden)]
m2.2008[,dist2A1.s:= scale(dist2A1)]
m2.2008[,dist2water.s:= scale(dist2water)]
m2.2008[,dist2rail.s:= scale(dist2rail)]
m2.2008[,Dist2road.s:= scale(Dist2road)]
m2.2008[,ndvi.s:= scale(ndvi)]
m2.2008[,MeanPbl.s:= scale(MeanPbl)]
m2.2008[,p_ind.s:= scale(p_ind)]
m2.2008[,p_for.s:= scale(p_for)]
m2.2008[,p_farm.s:= scale(p_farm)]
m2.2008[,p_dos.s:= scale(p_dos)]
m2.2008[,p_dev.s:= scale(p_dev)]
m2.2008[,p_os.s:= scale(p_os)]
m2.2008[,tempa.s:= scale(tempa)]
m2.2008[,WDa.s:= scale(WDa)]
m2.2008[,WSa.s:= scale(WSa)]
m2.2008[,RHa.s:= scale(RHa)]
m2.2008[,Raina.s:= scale(Raina)]
m2.2008[,NO2a.s:= scale(NO2a)]
#generate predictions
m2.2008[, pred.m2 := predict(object=m1.fit.2008,newdata=m2.2008,allow.new.levels=TRUE,re.form=NULL)]
describe(m2.2008$pred.m2)
#delete implossible valuesOA[24~
m2.2008 <- m2.2008[pred.m2 > 0.00000000000001 , ]
m2.2008 <- m2.2008[pred.m2 < 1500 , ]
saveRDS(m2.2008,"/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod2.AQ.2008.pred2.rds")
#-------------->prepare for mod3
m2.2008[, bimon := (m + 1) %/% 2]
setkey(m2.2008,day, aodid)
m2.2008<-m2.2008[!is.na(meanPM10)]
#run the lmer part regressing stage 2 pred Vs mean pm
#in israel check per month, also check 30km band and other methods for meanpm
m2.smooth = lme(pred.m2 ~ meanPM10,random = list(aodid= ~1 + meanPM10),control=lmeControl(opt = "optim"), data= m2.2008 )
#xm2.smooth = lmer(pred.m2 ~ meanPM10+(1+ meanPM10|aodid), data= m2.2008 )
#correlate to see everything from mod2 and the mpm works
m2.2008[, pred.t31 := predict(m2.smooth)]
m2.2008[, resid := residuals(m2.smooth)]
res[res$year=="2008", 'm3.t31'] <- print(summary(lm(pred.m2~pred.t31,data=m2.2008))$r.squared)
#split the files to the separate bi monthly datsets
T2008_bimon1 <- subset(m2.2008 ,m2.2008$bimon == "1")
T2008_bimon2 <- subset(m2.2008 ,m2.2008$bimon == "2")
T2008_bimon3 <- subset(m2.2008 ,m2.2008$bimon == "3")
T2008_bimon4 <- subset(m2.2008 ,m2.2008$bimon == "4")
T2008_bimon5 <- subset(m2.2008 ,m2.2008$bimon == "5")
T2008_bimon6 <- subset(m2.2008 ,m2.2008$bimon == "6")
#run the separate splines (smooth) for x and y for each bimon
#whats the default band (distance) that the spline goes out and uses
fit2_1 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2008_bimon1 )
fit2_2 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2008_bimon2 )
fit2_3 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2008_bimon3 )
fit2_4 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2008_bimon4 )
fit2_5 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2008_bimon5 )
fit2_6 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2008_bimon6 )
#get the predicted-fitted
Xpred_1 <- (T2008_bimon1$pred.t31 - fit2_1$fitted)
Xpred_2 <- (T2008_bimon2$pred.t31 - fit2_2$fitted)
Xpred_3 <- (T2008_bimon3$pred.t31 - fit2_3$fitted)
Xpred_4 <- (T2008_bimon4$pred.t31 - fit2_4$fitted)
Xpred_5 <- (T2008_bimon5$pred.t31 - fit2_5$fitted)
Xpred_6 <- (T2008_bimon6$pred.t31 - fit2_6$fitted)
#remerge to 1 file
m2.2008$pred.t32 <- c( Xpred_1,Xpred_2, Xpred_3, Xpred_4, Xpred_5, Xpred_6)
#this is important so that its sorted as in the first gamm
setkey(m2.2008,day, aodid)
#rerun the lme on the predictions including the spatial spline (smooth)
Final_pred_2008 <- lme(pred.t32 ~ meanPM10 ,random = list(aodid= ~1 + meanPM10 ),control=lmeControl(opt = "optim"),data= m2.2008 )
m2.2008[, pred.t33 := predict(Final_pred_2008)]
#check correlations
res[res$year=="2008", 'm3.t33'] <- print(summary(lm(pred.m2 ~ pred.t33,data=m2.2008))$r.squared)
#------------------------>>>
#import mod3
data.m3 <- readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod3.AQ.2008.rds")
#for PM10
data.m3 <- data.m3[,c(1,2,5,29:32,52,53),with=FALSE]
data.m3[, bimon := (m + 1) %/% 2]
setkey(data.m3,day, aodid)
data.m3<-data.m3[!is.na(meanPM10)]
#generate m.3 initial pred
data.m3$pred.m3.mix <- predict(Final_pred_2008,data.m3)
#create unique grid
ugrid <-data.m3 %>%
group_by(aodid) %>%
summarise(lat_aod = mean(lat_aod, na.rm=TRUE), long_aod = mean(long_aod, na.rm=TRUE),x_aod_ITM = mean(x_aod_ITM, na.rm=TRUE), y_aod_ITM = mean(y_aod_ITM, na.rm=TRUE))
#### PREDICT Gam part
#split back into bimons to include the gam prediction in final prediction
data.m3_bimon1 <- data.m3[bimon == 1, ]
data.m3_bimon2 <- data.m3[bimon == 2, ]
data.m3_bimon3 <- data.m3[bimon == 3, ]
data.m3_bimon4 <- data.m3[bimon == 4, ]
data.m3_bimon5 <- data.m3[bimon == 5, ]
data.m3_bimon6 <- data.m3[bimon == 6, ]
#addin unique grid to each bimon
uniq_gid_bimon1 <- ugrid
uniq_gid_bimon2 <- ugrid
uniq_gid_bimon3 <- ugrid
uniq_gid_bimon4 <- ugrid
uniq_gid_bimon5 <- ugrid
uniq_gid_bimon6 <- ugrid
#get predictions for Bimon residuals
uniq_gid_bimon1$gpred <- predict.gam(fit2_1,uniq_gid_bimon1)
uniq_gid_bimon2$gpred <- predict.gam(fit2_2,uniq_gid_bimon2)
uniq_gid_bimon3$gpred <- predict.gam(fit2_3,uniq_gid_bimon3)
uniq_gid_bimon4$gpred <- predict.gam(fit2_4,uniq_gid_bimon4)
uniq_gid_bimon5$gpred <- predict.gam(fit2_5,uniq_gid_bimon5)
uniq_gid_bimon6$gpred <- predict.gam(fit2_6,uniq_gid_bimon6)
#merge things back togheter
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> merges
setkey(uniq_gid_bimon1,aodid)
setkey(data.m3_bimon1,aodid)
data.m3_bimon1 <- merge(data.m3_bimon1, uniq_gid_bimon1[,list(aodid,gpred)], all.x = T)
setkey(uniq_gid_bimon2,aodid)
setkey(data.m3_bimon2,aodid)
data.m3_bimon2 <- merge(data.m3_bimon2, uniq_gid_bimon2[,list(aodid,gpred)], all.x = T)
setkey(uniq_gid_bimon3,aodid)
setkey(data.m3_bimon3,aodid)
data.m3_bimon3 <- merge(data.m3_bimon3, uniq_gid_bimon3[,list(aodid,gpred)], all.x = T)
setkey(uniq_gid_bimon4,aodid)
setkey(data.m3_bimon4,aodid)
data.m3_bimon4 <- merge(data.m3_bimon4, uniq_gid_bimon4[,list(aodid,gpred)], all.x = T)
setkey(uniq_gid_bimon5,aodid)
setkey(data.m3_bimon5,aodid)
data.m3_bimon5 <- merge(data.m3_bimon5, uniq_gid_bimon5[,list(aodid,gpred)], all.x = T)
setkey(uniq_gid_bimon6,aodid)
setkey(data.m3_bimon6,aodid)
data.m3_bimon6 <- merge(data.m3_bimon6, uniq_gid_bimon6[,list(aodid,gpred)], all.x = T)
#reattach all parts
mod3 <- rbind(data.m3_bimon1,data.m3_bimon2,data.m3_bimon3,data.m3_bimon4,data.m3_bimon5,data.m3_bimon6)
# create pred.m3
mod3$pred.m3 <-mod3$pred.m3.mix+mod3$gpred
#hist(mod3$pred.m3)
#describe(mod3$pred.m3)
#recode negative into zero
mod3 <- mod3[pred.m3 >= 0]
saveRDS(mod3,"/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod3.AQ.2008.pred.rds")
#clean
keep(mod3,res,rmse, sure=TRUE)
gc()
#########################
#prepare for m3.R2
#########################
#load mod1
mod1<- readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod1.AQ.2008.pred.rds")
mod1[,aodid:= paste(mod1$long_aod,mod1$lat_aod,sep="-")]
mod1<-mod1[,c("aodid","day","PM10","pred.m1","stn"),with=FALSE]
#R2.m3
setkey(mod3,day,aodid)
setkey(mod1,day,aodid)
m1.2008 <- merge(mod1,mod3[, list(day,aodid,pred.m3)], all.x = T)
m3.fit.2008<- summary(lm(PM10~pred.m3,data=m1.2008))
res[res$year=="2008", 'm3.R2'] <- print(summary(lm(PM10~pred.m3,data=m1.2008))$r.squared)
#RMSPE
res[res$year=="2008", 'm3.PE'] <- print(rmse(residuals(m3.fit.2008)))
#spatial
###to check
spatial2008<-m1.2008 %>%
group_by(stn) %>%
summarise(barpm = mean(PM10, na.rm=TRUE), barpred = mean(pred.m3, na.rm=TRUE))
m1.fit.2008.spat<- lm(barpm ~ barpred, data=spatial2008)
res[res$year=="2008", 'm3.R2.s'] <- print(summary(lm(barpm ~ barpred, data=spatial2008))$r.squared)
res[res$year=="2008", 'm3.PE.s'] <- print(rmse(residuals(m1.fit.2008.spat)))
#temporal
tempo2008<-left_join(m1.2008,spatial2008)
tempo2008$delpm <-tempo2008$PM10-tempo2008$barpm
tempo2008$delpred <-tempo2008$pred.m3-tempo2008$barpred
mod_temporal <- lm(delpm ~ delpred, data=tempo2008)
res[res$year=="2008", 'm3.R2.t'] <- print(summary(lm(delpm ~ delpred, data=tempo2008))$r.squared)
#############save midpoint
saveRDS(res, "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/res.AQ.2008.rds")
saveRDS(res, "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/resALL.AQ.rds")
#########################
#import mod2
mod2<- readRDS( "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod2.AQ.2008.pred2.rds")
mod2<-mod2[,c("aodid","day","pred.m2"),with=FALSE]
#----------------> store the best available
mod3best <- mod3[, list(aodid, x_aod_ITM, y_aod_ITM, day, pred.m3)]
setkey(mod3best, day, aodid)
setkey(mod2, day, aodid)
mod3best <- merge(mod3best, mod2[,list(aodid, day, pred.m2)], all.x = T)
#reload mod1
mod1<- readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod1.AQ.2008.pred.rds")
mod1$aodid<-paste(mod1$long_aod,mod1$lat_aod,sep="-")
mod1<-mod1[,c("aodid","day","PM10","pred.m1"),with=FALSE]
setkey(mod1,day,aodid)
mod3best <- merge(mod3best, mod1, all.x = T)
mod3best[,bestpred := pred.m3]
mod3best[!is.na(pred.m2),bestpred := pred.m2]
mod3best[!is.na(pred.m1),bestpred := pred.m1]
#save
saveRDS(mod3best,"/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod3.AQ.2008.FINAL.rds")
#save for GIS
write.csv(mod3best[, list(LTPM = mean(bestpred, na.rm = T),
predvariance = var(bestpred, na.rm = T),
predmin = min(bestpred, na.rm = T),
predmax = max(bestpred, na.rm = T),
npred = sum(!is.na(bestpred)),
npred.m1 = sum(!is.na(pred.m1)),
npred.m2 = sum(!is.na(pred.m2)),
npred.m3 = sum(!is.na(pred.m3)),
x_aod_ITM = x_aod_ITM[1], y_aod_ITM = y_aod_ITM[1]),by=aodid], "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/LTPM3.AQ.2008.csv", row.names = F)
keep(res, sure=TRUE)
c()
###############
#LIBS
###############
library(lme4);library(reshape);library(foreign) ;library(ggplot2);library(plyr);library(data.table);library(reshape2);library(Hmisc);library(mgcv);library(gdata);library(car);library(dplyr);library(ggmap);library(broom);library(splines)
#sourcing
source("/media/NAS/Uni/org/files/Uni/Projects/code/$Rsnips/CV_splits.r")
source("/media/NAS/Uni/org/files/Uni/Projects/code/$Rsnips/rmspe.r")
#if needed load res table
#res<- readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/resALL.AQ.rds")
### import data
m1.2009 <-readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod1.AQ.2009.rds")
################# clean BAD STN PM10 and check if improved model?
raWDaf <- ddply(m1.2009, c( "stn"),
function(x) {
mod1 <- lm(PM10 ~ aod, data=x)
data.frame(R2 = round(summary(mod1)$r.squared, 5),
nsamps = length(summary(mod1)$resid))
})
raWDaf
raWDaf<-as.data.table(raWDaf)
bad<- raWDaf[R2< 0.01]
bad[,badid := paste(stn,sep="-")]
#################BAD STN
m1.2009[,badid := paste(stn,sep="-")]
####Take out bad stations
m1.2009 <- m1.2009[!(m1.2009$badid %in% bad$badid), ]
#scale vars
m1.2009[,elev.s:= scale(elev)]
m1.2009[,tden.s:= scale(tden)]
m1.2009[,pden.s:= scale(pden)]
m1.2009[,dist2A1.s:= scale(dist2A1)]
m1.2009[,dist2water.s:= scale(dist2water)]
m1.2009[,dist2rail.s:= scale(dist2rail)]
m1.2009[,Dist2road.s:= scale(Dist2road)]
m1.2009[,ndvi.s:= scale(ndvi)]
m1.2009[,MeanPbl.s:= scale(MeanPbl)]
m1.2009[,p_ind.s:= scale(p_ind)]
m1.2009[,p_for.s:= scale(p_for)]
m1.2009[,p_farm.s:= scale(p_farm)]
m1.2009[,p_dos.s:= scale(p_dos)]
m1.2009[,p_dev.s:= scale(p_dev)]
m1.2009[,p_os.s:= scale(p_os)]
m1.2009[,tempa.s:= scale(tempa)]
m1.2009[,WDa.s:= scale(WDa)]
m1.2009[,WSa.s:= scale(WSa)]
m1.2009[,RHa.s:= scale(RHa)]
m1.2009[,Raina.s:= scale(Raina)]
m1.2009[,NO2a.s:= scale(NO2a)]
m1.formula <- as.formula(PM10~ aod
+tempa.s+WDa.s+WSa.s+Dust+MeanPbl.s #temporal
+elev.s+tden.s+pden.s+Dist2road.s+ndvi.s #spatial
+p_os.s #+p_dev.s+p_dos.s+p_farm.s+p_for.s+p_ind.s #land use
#+aod*Dust #interactions
+(1+aod|day/reg_num)) #+(1|stn) !!! stn screws up mod3
#full fit
m1.fit.2009 <- lmer(m1.formula,data=m1.2009,weights=normwt)
m1.2009$pred.m1 <- predict(m1.fit.2009)
res[res$year=="2009", 'm1.R2'] <- print(summary(lm(PM10~pred.m1,data=m1.2009))$r.squared)
#RMSPE
res[res$year=="2009", 'm1.PE'] <- print(rmse(residuals(m1.fit.2009)))
#spatial
###to check
spatial2009<-m1.2009 %>%
group_by(stn) %>%
summarise(barpm = mean(PM10, na.rm=TRUE), barpred = mean(pred.m1, na.rm=TRUE))
m1.fit.2009.spat<- lm(barpm ~ barpred, data=spatial2009)
res[res$year=="2009", 'm1.R2.s'] <- print(summary(lm(barpm ~ barpred, data=spatial2009))$r.squared)
res[res$year=="2009", 'm1.PE.s'] <- print(rmse(residuals(m1.fit.2009.spat)))
#temporal
tempo2009<-left_join(m1.2009,spatial2009)
tempo2009$delpm <-tempo2009$PM10-tempo2009$barpm
tempo2009$delpred <-tempo2009$pred.m1-tempo2009$barpred
mod_temporal <- lm(delpm ~ delpred, data=tempo2009)
res[res$year=="2009", 'm1.R2.t'] <- print(summary(lm(delpm ~ delpred, data=tempo2009))$r.squared)
saveRDS(m1.2009,"/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod1.AQ.2009.pred.rds")
#---------------->>>> CV
#s1
splits_s1 <- splitdf(m1.2009)
test_s1 <- splits_s1$testset
train_s1 <- splits_s1$trainset
out_train_s1 <- lmer(m1.formula,data = train_s1,weights=normwt)
test_s1$pred.m1.cv <- predict(object=out_train_s1 ,newdata=test_s1,allow.new.levels=TRUE,re.form=NULL )
test_s1$iter<-"s1"
#s2
splits_s2 <- splitdf(m1.2009)
test_s2 <- splits_s2$testset
train_s2 <- splits_s2$trainset
out_train_s2 <- lmer(m1.formula,data = train_s2,weights=normwt)
test_s2$pred.m1.cv <- predict(object=out_train_s2 ,newdata=test_s2,allow.new.levels=TRUE,re.form=NULL )
test_s2$iter<-"s2"
#s3
splits_s3 <- splitdf(m1.2009)
test_s3 <- splits_s3$testset
train_s3 <- splits_s3$trainset
out_train_s3 <- lmer(m1.formula,data = train_s3,weights=normwt)
test_s3$pred.m1.cv <- predict(object=out_train_s3 ,newdata=test_s3,allow.new.levels=TRUE,re.form=NULL )
test_s3$iter<-"s3"
#s4
splits_s4 <- splitdf(m1.2009)
test_s4 <- splits_s4$testset
train_s4 <- splits_s4$trainset
out_train_s4 <- lmer(m1.formula,data = train_s4,weights=normwt)
test_s4$pred.m1.cv <- predict(object=out_train_s4 ,newdata=test_s4,allow.new.levels=TRUE,re.form=NULL )
test_s4$iter<-"s4"
#s5
splits_s5 <- splitdf(m1.2009)
test_s5 <- splits_s5$testset
train_s5 <- splits_s5$trainset
out_train_s5 <- lmer(m1.formula,data = train_s5,weights=normwt)
test_s5$pred.m1.cv <- predict(object=out_train_s5 ,newdata=test_s5,allow.new.levels=TRUE,re.form=NULL )
test_s5$iter<-"s5"
#s6
splits_s6 <- splitdf(m1.2009)
test_s6 <- splits_s6$testset
train_s6 <- splits_s6$trainset
out_train_s6 <- lmer(m1.formula,data = train_s6,weights=normwt)
test_s6$pred.m1.cv <- predict(object=out_train_s6 ,newdata=test_s6,allow.new.levels=TRUE,re.form=NULL )
test_s6$iter<-"s6"
#s7
splits_s7 <- splitdf(m1.2009)
test_s7 <- splits_s7$testset
train_s7 <- splits_s7$trainset
out_train_s7 <- lmer(m1.formula,data = train_s7,weights=normwt)
test_s7$pred.m1.cv <- predict(object=out_train_s7 ,newdata=test_s7,allow.new.levels=TRUE,re.form=NULL )
test_s7$iter<-"s7"
#s8
splits_s8 <- splitdf(m1.2009)
test_s8 <- splits_s8$testset
train_s8 <- splits_s8$trainset
out_train_s8 <- lmer(m1.formula,data = train_s8,weights=normwt)
test_s8$pred.m1.cv <- predict(object=out_train_s8 ,newdata=test_s8,allow.new.levels=TRUE,re.form=NULL )
test_s8$iter<-"s8"
#s9
splits_s9 <- splitdf(m1.2009)
test_s9 <- splits_s9$testset
train_s9 <- splits_s9$trainset
out_train_s9 <- lmer(m1.formula,data = train_s9,weights=normwt)
test_s9$pred.m1.cv <- predict(object=out_train_s9 ,newdata=test_s9,allow.new.levels=TRUE,re.form=NULL )
test_s9$iter<-"s9"
#s10
splits_s10 <- splitdf(m1.2009)
test_s10 <- splits_s10$testset
train_s10 <- splits_s10$trainset
out_train_s10 <- lmer(m1.formula,data = train_s10,weights=normwt)
test_s10$pred.m1.cv <- predict(object=out_train_s10 ,newdata=test_s10,allow.new.levels=TRUE,re.form=NULL )
test_s10$iter<-"s10"
#BIND 1 dataset
m1.2009.cv<- data.table(rbind(test_s1,test_s2,test_s3,test_s4,test_s5,test_s6,test_s7,test_s8,test_s9, test_s10))
# cleanup (remove from WS) objects from CV
rm(list = ls(pattern = "train_|test_"))
#table updates
m1.fit.2009.cv<-lm(PM10~pred.m1.cv,data=m1.2009.cv)
res[res$year=="2009", 'm1cv.R2'] <- print(summary(lm(PM10~pred.m1.cv,data=m1.2009.cv))$r.squared)
res[res$year=="2009", 'm1cv.I'] <-print(summary(lm(PM10~pred.m1.cv,data=m1.2009.cv))$coef[1,1])
res[res$year=="2009", 'm1cv.I.se'] <-print(summary(lm(PM10~pred.m1.cv,data=m1.2009.cv))$coef[1,2])
res[res$year=="2009", 'm1cv.S'] <-print(summary(lm(PM10~pred.m1.cv,data=m1.2009.cv))$coef[2,1])
res[res$year=="2009", 'm1cv.S.se'] <-print(summary(lm(PM10~pred.m1.cv,data=m1.2009.cv))$coef[2,2])
#RMSPE
res[res$year=="2009", 'm1cv.PE'] <- print(rmse(residuals(m1.fit.2009.cv)))
#spatial
spatial2009.cv<-m1.2009.cv %>%
group_by(stn) %>%
summarise(barpm = mean(PM10, na.rm=TRUE), barpred = mean(pred.m1, na.rm=TRUE))
m1.fit.2009.cv.s <- lm(barpm ~ barpred, data=spatial2009.cv)
res[res$year=="2009", 'm1cv.R2.s'] <- print(summary(lm(barpm ~ barpred, data=spatial2009.cv))$r.squared)
res[res$year=="2009", 'm1cv.PE.s'] <- print(rmse(residuals(m1.fit.2009.cv.s)))
#temporal
tempo2009.cv<-left_join(m1.2009.cv,spatial2009.cv)
tempo2009.cv$delpm <-tempo2009.cv$PM10-tempo2009.cv$barpm
tempo2009.cv$delpred <-tempo2009.cv$pred.m1.cv-tempo2009.cv$barpred
mod_temporal.cv <- lm(delpm ~ delpred, data=tempo2009.cv)
res[res$year=="2009", 'm1cv.R2.t'] <- print(summary(lm(delpm ~ delpred, data=tempo2009.cv))$r.squared)
#-------->>> loc stage
luf<-fread("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN004_LU_full_dataset/local.csv")
setnames(luf,"tden","loc.tden")
setnames(luf,"elev50","loc.elev")
#add 50m LU to CV data
setkey(m1.2009.cv,stn)
setkey(luf,stn)
m1.2009.cv.loc <- merge(m1.2009.cv, luf, all.x = T)
#m1.2009.cv.loc<-na.omit(m1.2009.cv.loc)
#create residual mp3 variable
m1.2009.cv.loc$res.m1<-m1.2009.cv.loc$PM10-m1.2009.cv.loc$pred.m1.cv
#The GAM model
gam.out<-gam(res.m1~s(loc.tden)+s(tden,MeanPbl)+s(loc.tden,WSa)+s(loc_p_os,fx=FALSE,k=4,bs='cr')+s(loc.elev,fx=FALSE,k=4,bs='cr')+s(dA1,fx=FALSE,k=4,bs='cr')+s(dsea,fx=FALSE,k=4,bs='cr'),data=m1.2009.cv.loc)
#plot(bp.model.ps)
#summary(bp.model.ps)
## reg
m1.2009.cv.loc$pred.m1.loc <-predict(gam.out)
m1.2009.cv.loc$pred.m1.both <- m1.2009.cv.loc$pred.m1.cv + m1.2009.cv.loc$pred.m1.loc
res[res$year=="2009", 'm1cv.loc.R2'] <- print(summary(lm(PM10~pred.m1.both,data=m1.2009.cv.loc))$r.squared)
res[res$year=="2009", 'm1cv.loc.I'] <-print(summary(lm(PM10~pred.m1.both,data=m1.2009.cv.loc))$coef[1,1])
res[res$year=="2009", 'm1cv.loc.I.se'] <-print(summary(lm(PM10~pred.m1.both,data=m1.2009.cv.loc))$coef[1,2])
res[res$year=="2009", 'm1cv.loc.S'] <-print(summary(lm(PM10~pred.m1.both,data=m1.2009.cv.loc))$coef[2,1])
res[res$year=="2009", 'm1cv.loc.S.se'] <-print(summary(lm(PM10~pred.m1.both,data=m1.2009.cv.loc))$coef[2,2])
#RMSPE
res[res$year=="2009", 'm1cv.loc.PE'] <- print(rmse(residuals(m1.fit.2009.cv)))
#spatial
spatial2009.cv.loc<-m1.2009.cv.loc %>%
group_by(stn) %>%
summarise(barpm = mean(PM10, na.rm=TRUE), barpred = mean(pred.m1, na.rm=TRUE))
m1.fit.2009.cv.loc.s <- lm(barpm ~ barpred, data=spatial2009.cv.loc)
res[res$year=="2009", 'm1cv.loc.R2.s'] <- print(summary(lm(barpm ~ barpred, data=spatial2009.cv.loc))$r.squared)
res[res$year=="2009", 'm1cv.loc.PE.s'] <- print(rmse(residuals(m1.fit.2009.cv.loc.s)))
#temporal
tempo2009.loc.cv<-left_join(m1.2009.cv.loc,spatial2009.cv.loc)
tempo2009.loc.cv$delpm <-tempo2009.loc.cv$PM10-tempo2009.loc.cv$barpm
tempo2009.loc.cv$delpred <-tempo2009.loc.cv$pred.m1.both-tempo2009.loc.cv$barpred
mod_temporal.loc.cv <- lm(delpm ~ delpred, data=tempo2009.loc.cv)
res[res$year=="2009", 'm1cv.loc.R2.t'] <- print(summary(lm(delpm ~ delpred, data=tempo2009.loc.cv))$r.squared)
#############save midpoint
saveRDS(res, "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/res.AQ.2009.rds")
saveRDS(res, "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/resALL.AQ.rds")
saveRDS(m1.2009.cv.loc,"/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod1.AQ.2009.predCV.rds")
###############
#MOD2
###############
m2.2009<-readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod2.AQ.2009.rds")
m2.2009[,elev.s:= scale(elev)]
m2.2009[,tden.s:= scale(tden)]
m2.2009[,pden.s:= scale(pden)]
m2.2009[,dist2A1.s:= scale(dist2A1)]
m2.2009[,dist2water.s:= scale(dist2water)]
m2.2009[,dist2rail.s:= scale(dist2rail)]
m2.2009[,Dist2road.s:= scale(Dist2road)]
m2.2009[,ndvi.s:= scale(ndvi)]
m2.2009[,MeanPbl.s:= scale(MeanPbl)]
m2.2009[,p_ind.s:= scale(p_ind)]
m2.2009[,p_for.s:= scale(p_for)]
m2.2009[,p_farm.s:= scale(p_farm)]
m2.2009[,p_dos.s:= scale(p_dos)]
m2.2009[,p_dev.s:= scale(p_dev)]
m2.2009[,p_os.s:= scale(p_os)]
m2.2009[,tempa.s:= scale(tempa)]
m2.2009[,WDa.s:= scale(WDa)]
m2.2009[,WSa.s:= scale(WSa)]
m2.2009[,RHa.s:= scale(RHa)]
m2.2009[,Raina.s:= scale(Raina)]
m2.2009[,NO2a.s:= scale(NO2a)]
#generate predictions
m2.2009[, pred.m2 := predict(object=m1.fit.2009,newdata=m2.2009,allow.new.levels=TRUE,re.form=NULL)]
describe(m2.2009$pred.m2)
#delete implossible valuesOA[24~
m2.2009 <- m2.2009[pred.m2 > 0.00000000000001 , ]
m2.2009 <- m2.2009[pred.m2 < 1500 , ]
saveRDS(m2.2009,"/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod2.AQ.2009.pred2.rds")
#-------------->prepare for mod3
m2.2009[, bimon := (m + 1) %/% 2]
setkey(m2.2009,day, aodid)
m2.2009<-m2.2009[!is.na(meanPM10)]
#run the lmer part regressing stage 2 pred Vs mean pm
#in israel check per month, also check 30km band and other methods for meanpm
m2.smooth = lme(pred.m2 ~ meanPM10,random = list(aodid= ~1 + meanPM10),control=lmeControl(opt = "optim"), data= m2.2009 )
#xm2.smooth = lmer(pred.m2 ~ meanPM10+(1+ meanPM10|aodid), data= m2.2009 )
#correlate to see everything from mod2 and the mpm works
m2.2009[, pred.t31 := predict(m2.smooth)]
m2.2009[, resid := residuals(m2.smooth)]
res[res$year=="2009", 'm3.t31'] <- print(summary(lm(pred.m2~pred.t31,data=m2.2009))$r.squared)
#split the files to the separate bi monthly datsets
T2009_bimon1 <- subset(m2.2009 ,m2.2009$bimon == "1")
T2009_bimon2 <- subset(m2.2009 ,m2.2009$bimon == "2")
T2009_bimon3 <- subset(m2.2009 ,m2.2009$bimon == "3")
T2009_bimon4 <- subset(m2.2009 ,m2.2009$bimon == "4")
T2009_bimon5 <- subset(m2.2009 ,m2.2009$bimon == "5")
T2009_bimon6 <- subset(m2.2009 ,m2.2009$bimon == "6")
#run the separate splines (smooth) for x and y for each bimon
#whats the default band (distance) that the spline goes out and uses
fit2_1 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2009_bimon1 )
fit2_2 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2009_bimon2 )
fit2_3 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2009_bimon3 )
fit2_4 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2009_bimon4 )
fit2_5 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2009_bimon5 )
fit2_6 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2009_bimon6 )
#get the predicted-fitted
Xpred_1 <- (T2009_bimon1$pred.t31 - fit2_1$fitted)
Xpred_2 <- (T2009_bimon2$pred.t31 - fit2_2$fitted)
Xpred_3 <- (T2009_bimon3$pred.t31 - fit2_3$fitted)
Xpred_4 <- (T2009_bimon4$pred.t31 - fit2_4$fitted)
Xpred_5 <- (T2009_bimon5$pred.t31 - fit2_5$fitted)
Xpred_6 <- (T2009_bimon6$pred.t31 - fit2_6$fitted)
#remerge to 1 file
m2.2009$pred.t32 <- c( Xpred_1,Xpred_2, Xpred_3, Xpred_4, Xpred_5, Xpred_6)
#this is important so that its sorted as in the first gamm
setkey(m2.2009,day, aodid)
#rerun the lme on the predictions including the spatial spline (smooth)
Final_pred_2009 <- lme(pred.t32 ~ meanPM10 ,random = list(aodid= ~1 + meanPM10 ),control=lmeControl(opt = "optim"),data= m2.2009 )
m2.2009[, pred.t33 := predict(Final_pred_2009)]
#check correlations
res[res$year=="2009", 'm3.t33'] <- print(summary(lm(pred.m2 ~ pred.t33,data=m2.2009))$r.squared)
#------------------------>>>
#import mod3
data.m3 <- readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod3.AQ.2009.rds")
#for PM10
data.m3 <- data.m3[,c(1,2,5,29:32,52,53),with=FALSE]
data.m3[, bimon := (m + 1) %/% 2]
setkey(data.m3,day, aodid)
data.m3<-data.m3[!is.na(meanPM10)]
#generate m.3 initial pred
data.m3$pred.m3.mix <- predict(Final_pred_2009,data.m3)
#create unique grid
ugrid <-data.m3 %>%
group_by(aodid) %>%
summarise(lat_aod = mean(lat_aod, na.rm=TRUE), long_aod = mean(long_aod, na.rm=TRUE),x_aod_ITM = mean(x_aod_ITM, na.rm=TRUE), y_aod_ITM = mean(y_aod_ITM, na.rm=TRUE))
#### PREDICT Gam part
#split back into bimons to include the gam prediction in final prediction
data.m3_bimon1 <- data.m3[bimon == 1, ]
data.m3_bimon2 <- data.m3[bimon == 2, ]
data.m3_bimon3 <- data.m3[bimon == 3, ]
data.m3_bimon4 <- data.m3[bimon == 4, ]
data.m3_bimon5 <- data.m3[bimon == 5, ]
data.m3_bimon6 <- data.m3[bimon == 6, ]
#addin unique grid to each bimon
uniq_gid_bimon1 <- ugrid
uniq_gid_bimon2 <- ugrid
uniq_gid_bimon3 <- ugrid
uniq_gid_bimon4 <- ugrid
uniq_gid_bimon5 <- ugrid
uniq_gid_bimon6 <- ugrid
#get predictions for Bimon residuals
uniq_gid_bimon1$gpred <- predict.gam(fit2_1,uniq_gid_bimon1)
uniq_gid_bimon2$gpred <- predict.gam(fit2_2,uniq_gid_bimon2)
uniq_gid_bimon3$gpred <- predict.gam(fit2_3,uniq_gid_bimon3)
uniq_gid_bimon4$gpred <- predict.gam(fit2_4,uniq_gid_bimon4)
uniq_gid_bimon5$gpred <- predict.gam(fit2_5,uniq_gid_bimon5)
uniq_gid_bimon6$gpred <- predict.gam(fit2_6,uniq_gid_bimon6)
#merge things back togheter
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> merges
setkey(uniq_gid_bimon1,aodid)
setkey(data.m3_bimon1,aodid)
data.m3_bimon1 <- merge(data.m3_bimon1, uniq_gid_bimon1[,list(aodid,gpred)], all.x = T)
setkey(uniq_gid_bimon2,aodid)
setkey(data.m3_bimon2,aodid)
data.m3_bimon2 <- merge(data.m3_bimon2, uniq_gid_bimon2[,list(aodid,gpred)], all.x = T)
setkey(uniq_gid_bimon3,aodid)
setkey(data.m3_bimon3,aodid)
data.m3_bimon3 <- merge(data.m3_bimon3, uniq_gid_bimon3[,list(aodid,gpred)], all.x = T)
setkey(uniq_gid_bimon4,aodid)
setkey(data.m3_bimon4,aodid)
data.m3_bimon4 <- merge(data.m3_bimon4, uniq_gid_bimon4[,list(aodid,gpred)], all.x = T)
setkey(uniq_gid_bimon5,aodid)
setkey(data.m3_bimon5,aodid)
data.m3_bimon5 <- merge(data.m3_bimon5, uniq_gid_bimon5[,list(aodid,gpred)], all.x = T)
setkey(uniq_gid_bimon6,aodid)
setkey(data.m3_bimon6,aodid)
data.m3_bimon6 <- merge(data.m3_bimon6, uniq_gid_bimon6[,list(aodid,gpred)], all.x = T)
#reattach all parts
mod3 <- rbind(data.m3_bimon1,data.m3_bimon2,data.m3_bimon3,data.m3_bimon4,data.m3_bimon5,data.m3_bimon6)
# create pred.m3
mod3$pred.m3 <-mod3$pred.m3.mix+mod3$gpred
#hist(mod3$pred.m3)
#describe(mod3$pred.m3)
#recode negative into zero
mod3 <- mod3[pred.m3 >= 0]
saveRDS(mod3,"/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod3.AQ.2009.pred.rds")
#clean
keep(mod3,res,rmse, sure=TRUE)
gc()
#########################
#prepare for m3.R2
#########################
#load mod1
mod1<- readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod1.AQ.2009.pred.rds")
mod1[,aodid:= paste(mod1$long_aod,mod1$lat_aod,sep="-")]
mod1<-mod1[,c("aodid","day","PM10","pred.m1","stn"),with=FALSE]
#R2.m3
setkey(mod3,day,aodid)
setkey(mod1,day,aodid)
m1.2009 <- merge(mod1,mod3[, list(day,aodid,pred.m3)], all.x = T)
m3.fit.2009<- summary(lm(PM10~pred.m3,data=m1.2009))
res[res$year=="2009", 'm3.R2'] <- print(summary(lm(PM10~pred.m3,data=m1.2009))$r.squared)
#RMSPE
res[res$year=="2009", 'm3.PE'] <- print(rmse(residuals(m3.fit.2009)))
#spatial
###to check
spatial2009<-m1.2009 %>%
group_by(stn) %>%
summarise(barpm = mean(PM10, na.rm=TRUE), barpred = mean(pred.m3, na.rm=TRUE))
m1.fit.2009.spat<- lm(barpm ~ barpred, data=spatial2009)
res[res$year=="2009", 'm3.R2.s'] <- print(summary(lm(barpm ~ barpred, data=spatial2009))$r.squared)
res[res$year=="2009", 'm3.PE.s'] <- print(rmse(residuals(m1.fit.2009.spat)))
#temporal
tempo2009<-left_join(m1.2009,spatial2009)
tempo2009$delpm <-tempo2009$PM10-tempo2009$barpm
tempo2009$delpred <-tempo2009$pred.m3-tempo2009$barpred
mod_temporal <- lm(delpm ~ delpred, data=tempo2009)
res[res$year=="2009", 'm3.R2.t'] <- print(summary(lm(delpm ~ delpred, data=tempo2009))$r.squared)
#############save midpoint
saveRDS(res, "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/res.AQ.2009.rds")
saveRDS(res, "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/resALL.AQ.rds")
#########################
#import mod2
mod2<- readRDS( "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod2.AQ.2009.pred2.rds")
mod2<-mod2[,c("aodid","day","pred.m2"),with=FALSE]
#----------------> store the best available
mod3best <- mod3[, list(aodid, x_aod_ITM, y_aod_ITM, day, pred.m3)]
setkey(mod3best, day, aodid)
setkey(mod2, day, aodid)
mod3best <- merge(mod3best, mod2[,list(aodid, day, pred.m2)], all.x = T)
#reload mod1
mod1<- readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod1.AQ.2009.pred.rds")
mod1$aodid<-paste(mod1$long_aod,mod1$lat_aod,sep="-")
mod1<-mod1[,c("aodid","day","PM10","pred.m1"),with=FALSE]
setkey(mod1,day,aodid)
mod3best <- merge(mod3best, mod1, all.x = T)
mod3best[,bestpred := pred.m3]
mod3best[!is.na(pred.m2),bestpred := pred.m2]
mod3best[!is.na(pred.m1),bestpred := pred.m1]
#save
saveRDS(mod3best,"/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod3.AQ.2009.FINAL.rds")
#save for GIS
write.csv(mod3best[, list(LTPM = mean(bestpred, na.rm = T),
predvariance = var(bestpred, na.rm = T),
predmin = min(bestpred, na.rm = T),
predmax = max(bestpred, na.rm = T),
npred = sum(!is.na(bestpred)),
npred.m1 = sum(!is.na(pred.m1)),
npred.m2 = sum(!is.na(pred.m2)),
npred.m3 = sum(!is.na(pred.m3)),
x_aod_ITM = x_aod_ITM[1], y_aod_ITM = y_aod_ITM[1]),by=aodid], "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/LTPM3.AQ.2009.csv", row.names = F)
keep(res, sure=TRUE)
c()
###############
#LIBS
###############
library(lme4);library(reshape);library(foreign) ;library(ggplot2);library(plyr);library(data.table);library(reshape2);library(Hmisc);library(mgcv);library(gdata);library(car);library(dplyr);library(ggmap);library(broom);library(splines)
#sourcing
source("/media/NAS/Uni/org/files/Uni/Projects/code/$Rsnips/CV_splits.r")
source("/media/NAS/Uni/org/files/Uni/Projects/code/$Rsnips/rmspe.r")
#if needed load res table
#res<- readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/resALL.AQ.rds")
### import data
m1.2010 <-readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod1.AQ.2010.rds")
################# clean BAD STN PM10 and check if improved model?
raWDaf <- ddply(m1.2010, c( "stn"),
function(x) {
mod1 <- lm(PM10 ~ aod, data=x)
data.frame(R2 = round(summary(mod1)$r.squared, 5),
nsamps = length(summary(mod1)$resid))
})
raWDaf
raWDaf<-as.data.table(raWDaf)
bad<- raWDaf[R2< 0.01]
bad[,badid := paste(stn,sep="-")]
#################BAD STN
m1.2010[,badid := paste(stn,sep="-")]
####Take out bad stations
m1.2010 <- m1.2010[!(m1.2010$badid %in% bad$badid), ]
#scale vars
m1.2010[,elev.s:= scale(elev)]
m1.2010[,tden.s:= scale(tden)]
m1.2010[,pden.s:= scale(pden)]
m1.2010[,dist2A1.s:= scale(dist2A1)]
m1.2010[,dist2water.s:= scale(dist2water)]
m1.2010[,dist2rail.s:= scale(dist2rail)]
m1.2010[,Dist2road.s:= scale(Dist2road)]
m1.2010[,ndvi.s:= scale(ndvi)]
m1.2010[,MeanPbl.s:= scale(MeanPbl)]
m1.2010[,p_ind.s:= scale(p_ind)]
m1.2010[,p_for.s:= scale(p_for)]
m1.2010[,p_farm.s:= scale(p_farm)]
m1.2010[,p_dos.s:= scale(p_dos)]
m1.2010[,p_dev.s:= scale(p_dev)]
m1.2010[,p_os.s:= scale(p_os)]
m1.2010[,tempa.s:= scale(tempa)]
m1.2010[,WDa.s:= scale(WDa)]
m1.2010[,WSa.s:= scale(WSa)]
m1.2010[,RHa.s:= scale(RHa)]
m1.2010[,Raina.s:= scale(Raina)]
m1.2010[,NO2a.s:= scale(NO2a)]
m1.formula <- as.formula(PM10~ aod
+tempa.s+WDa.s+WSa.s+Dust+MeanPbl.s #temporal
+elev.s+tden.s+pden.s+Dist2road.s+ndvi.s #spatial
+p_os.s #+p_dev.s+p_dos.s+p_farm.s+p_for.s+p_ind.s #land use
#+aod*Dust #interactions
+(1+aod|day/reg_num)) #+(1|stn) !!! stn screws up mod3
#full fit
m1.fit.2010 <- lmer(m1.formula,data=m1.2010,weights=normwt)
m1.2010$pred.m1 <- predict(m1.fit.2010)
res[res$year=="2010", 'm1.R2'] <- print(summary(lm(PM10~pred.m1,data=m1.2010))$r.squared)
#RMSPE
res[res$year=="2010", 'm1.PE'] <- print(rmse(residuals(m1.fit.2010)))
#spatial
###to check
spatial2010<-m1.2010 %>%
group_by(stn) %>%
summarise(barpm = mean(PM10, na.rm=TRUE), barpred = mean(pred.m1, na.rm=TRUE))
m1.fit.2010.spat<- lm(barpm ~ barpred, data=spatial2010)
res[res$year=="2010", 'm1.R2.s'] <- print(summary(lm(barpm ~ barpred, data=spatial2010))$r.squared)
res[res$year=="2010", 'm1.PE.s'] <- print(rmse(residuals(m1.fit.2010.spat)))
#temporal
tempo2010<-left_join(m1.2010,spatial2010)
tempo2010$delpm <-tempo2010$PM10-tempo2010$barpm
tempo2010$delpred <-tempo2010$pred.m1-tempo2010$barpred
mod_temporal <- lm(delpm ~ delpred, data=tempo2010)
res[res$year=="2010", 'm1.R2.t'] <- print(summary(lm(delpm ~ delpred, data=tempo2010))$r.squared)
saveRDS(m1.2010,"/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod1.AQ.2010.pred.rds")
#---------------->>>> CV
#s1
splits_s1 <- splitdf(m1.2010)
test_s1 <- splits_s1$testset
train_s1 <- splits_s1$trainset
out_train_s1 <- lmer(m1.formula,data = train_s1,weights=normwt)
test_s1$pred.m1.cv <- predict(object=out_train_s1 ,newdata=test_s1,allow.new.levels=TRUE,re.form=NULL )
test_s1$iter<-"s1"
#s2
splits_s2 <- splitdf(m1.2010)
test_s2 <- splits_s2$testset
train_s2 <- splits_s2$trainset
out_train_s2 <- lmer(m1.formula,data = train_s2,weights=normwt)
test_s2$pred.m1.cv <- predict(object=out_train_s2 ,newdata=test_s2,allow.new.levels=TRUE,re.form=NULL )
test_s2$iter<-"s2"
#s3
splits_s3 <- splitdf(m1.2010)
test_s3 <- splits_s3$testset
train_s3 <- splits_s3$trainset
out_train_s3 <- lmer(m1.formula,data = train_s3,weights=normwt)
test_s3$pred.m1.cv <- predict(object=out_train_s3 ,newdata=test_s3,allow.new.levels=TRUE,re.form=NULL )
test_s3$iter<-"s3"
#s4
splits_s4 <- splitdf(m1.2010)
test_s4 <- splits_s4$testset
train_s4 <- splits_s4$trainset
out_train_s4 <- lmer(m1.formula,data = train_s4,weights=normwt)
test_s4$pred.m1.cv <- predict(object=out_train_s4 ,newdata=test_s4,allow.new.levels=TRUE,re.form=NULL )
test_s4$iter<-"s4"
#s5
splits_s5 <- splitdf(m1.2010)
test_s5 <- splits_s5$testset
train_s5 <- splits_s5$trainset
out_train_s5 <- lmer(m1.formula,data = train_s5,weights=normwt)
test_s5$pred.m1.cv <- predict(object=out_train_s5 ,newdata=test_s5,allow.new.levels=TRUE,re.form=NULL )
test_s5$iter<-"s5"
#s6
splits_s6 <- splitdf(m1.2010)
test_s6 <- splits_s6$testset
train_s6 <- splits_s6$trainset
out_train_s6 <- lmer(m1.formula,data = train_s6,weights=normwt)
test_s6$pred.m1.cv <- predict(object=out_train_s6 ,newdata=test_s6,allow.new.levels=TRUE,re.form=NULL )
test_s6$iter<-"s6"
#s7
splits_s7 <- splitdf(m1.2010)
test_s7 <- splits_s7$testset
train_s7 <- splits_s7$trainset
out_train_s7 <- lmer(m1.formula,data = train_s7,weights=normwt)
test_s7$pred.m1.cv <- predict(object=out_train_s7 ,newdata=test_s7,allow.new.levels=TRUE,re.form=NULL )
test_s7$iter<-"s7"
#s8
splits_s8 <- splitdf(m1.2010)
test_s8 <- splits_s8$testset
train_s8 <- splits_s8$trainset
out_train_s8 <- lmer(m1.formula,data = train_s8,weights=normwt)
test_s8$pred.m1.cv <- predict(object=out_train_s8 ,newdata=test_s8,allow.new.levels=TRUE,re.form=NULL )
test_s8$iter<-"s8"
#s9
splits_s9 <- splitdf(m1.2010)
test_s9 <- splits_s9$testset
train_s9 <- splits_s9$trainset
out_train_s9 <- lmer(m1.formula,data = train_s9,weights=normwt)
test_s9$pred.m1.cv <- predict(object=out_train_s9 ,newdata=test_s9,allow.new.levels=TRUE,re.form=NULL )
test_s9$iter<-"s9"
#s10
splits_s10 <- splitdf(m1.2010)
test_s10 <- splits_s10$testset
train_s10 <- splits_s10$trainset
out_train_s10 <- lmer(m1.formula,data = train_s10,weights=normwt)
test_s10$pred.m1.cv <- predict(object=out_train_s10 ,newdata=test_s10,allow.new.levels=TRUE,re.form=NULL )
test_s10$iter<-"s10"
#BIND 1 dataset
m1.2010.cv<- data.table(rbind(test_s1,test_s2,test_s3,test_s4,test_s5,test_s6,test_s7,test_s8,test_s9, test_s10))
# cleanup (remove from WS) objects from CV
rm(list = ls(pattern = "train_|test_"))
#table updates
m1.fit.2010.cv<-lm(PM10~pred.m1.cv,data=m1.2010.cv)
res[res$year=="2010", 'm1cv.R2'] <- print(summary(lm(PM10~pred.m1.cv,data=m1.2010.cv))$r.squared)
res[res$year=="2010", 'm1cv.I'] <-print(summary(lm(PM10~pred.m1.cv,data=m1.2010.cv))$coef[1,1])
res[res$year=="2010", 'm1cv.I.se'] <-print(summary(lm(PM10~pred.m1.cv,data=m1.2010.cv))$coef[1,2])
res[res$year=="2010", 'm1cv.S'] <-print(summary(lm(PM10~pred.m1.cv,data=m1.2010.cv))$coef[2,1])
res[res$year=="2010", 'm1cv.S.se'] <-print(summary(lm(PM10~pred.m1.cv,data=m1.2010.cv))$coef[2,2])
#RMSPE
res[res$year=="2010", 'm1cv.PE'] <- print(rmse(residuals(m1.fit.2010.cv)))
#spatial
spatial2010.cv<-m1.2010.cv %>%
group_by(stn) %>%
summarise(barpm = mean(PM10, na.rm=TRUE), barpred = mean(pred.m1, na.rm=TRUE))
m1.fit.2010.cv.s <- lm(barpm ~ barpred, data=spatial2010.cv)
res[res$year=="2010", 'm1cv.R2.s'] <- print(summary(lm(barpm ~ barpred, data=spatial2010.cv))$r.squared)
res[res$year=="2010", 'm1cv.PE.s'] <- print(rmse(residuals(m1.fit.2010.cv.s)))
#temporal
tempo2010.cv<-left_join(m1.2010.cv,spatial2010.cv)
tempo2010.cv$delpm <-tempo2010.cv$PM10-tempo2010.cv$barpm
tempo2010.cv$delpred <-tempo2010.cv$pred.m1.cv-tempo2010.cv$barpred
mod_temporal.cv <- lm(delpm ~ delpred, data=tempo2010.cv)
res[res$year=="2010", 'm1cv.R2.t'] <- print(summary(lm(delpm ~ delpred, data=tempo2010.cv))$r.squared)
#-------->>> loc stage
luf<-fread("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN004_LU_full_dataset/local.csv")
setnames(luf,"tden","loc.tden")
setnames(luf,"elev50","loc.elev")
#add 50m LU to CV data
setkey(m1.2010.cv,stn)
setkey(luf,stn)
m1.2010.cv.loc <- merge(m1.2010.cv, luf, all.x = T)
#m1.2010.cv.loc<-na.omit(m1.2010.cv.loc)
#create residual mp3 variable
m1.2010.cv.loc$res.m1<-m1.2010.cv.loc$PM10-m1.2010.cv.loc$pred.m1.cv
#The GAM model
gam.out<-gam(res.m1~s(loc.tden)+s(tden,MeanPbl)+s(loc.tden,WSa)+s(loc_p_os,fx=FALSE,k=4,bs='cr')+s(loc.elev,fx=FALSE,k=4,bs='cr')+s(dA1,fx=FALSE,k=4,bs='cr')+s(dsea,fx=FALSE,k=4,bs='cr'),data=m1.2010.cv.loc)
#plot(bp.model.ps)
#summary(bp.model.ps)
## reg
m1.2010.cv.loc$pred.m1.loc <-predict(gam.out)
m1.2010.cv.loc$pred.m1.both <- m1.2010.cv.loc$pred.m1.cv + m1.2010.cv.loc$pred.m1.loc
res[res$year=="2010", 'm1cv.loc.R2'] <- print(summary(lm(PM10~pred.m1.both,data=m1.2010.cv.loc))$r.squared)
res[res$year=="2010", 'm1cv.loc.I'] <-print(summary(lm(PM10~pred.m1.both,data=m1.2010.cv.loc))$coef[1,1])
res[res$year=="2010", 'm1cv.loc.I.se'] <-print(summary(lm(PM10~pred.m1.both,data=m1.2010.cv.loc))$coef[1,2])
res[res$year=="2010", 'm1cv.loc.S'] <-print(summary(lm(PM10~pred.m1.both,data=m1.2010.cv.loc))$coef[2,1])
res[res$year=="2010", 'm1cv.loc.S.se'] <-print(summary(lm(PM10~pred.m1.both,data=m1.2010.cv.loc))$coef[2,2])
#RMSPE
res[res$year=="2010", 'm1cv.loc.PE'] <- print(rmse(residuals(m1.fit.2010.cv)))
#spatial
spatial2010.cv.loc<-m1.2010.cv.loc %>%
group_by(stn) %>%
summarise(barpm = mean(PM10, na.rm=TRUE), barpred = mean(pred.m1, na.rm=TRUE))
m1.fit.2010.cv.loc.s <- lm(barpm ~ barpred, data=spatial2010.cv.loc)
res[res$year=="2010", 'm1cv.loc.R2.s'] <- print(summary(lm(barpm ~ barpred, data=spatial2010.cv.loc))$r.squared)
res[res$year=="2010", 'm1cv.loc.PE.s'] <- print(rmse(residuals(m1.fit.2010.cv.loc.s)))
#temporal
tempo2010.loc.cv<-left_join(m1.2010.cv.loc,spatial2010.cv.loc)
tempo2010.loc.cv$delpm <-tempo2010.loc.cv$PM10-tempo2010.loc.cv$barpm
tempo2010.loc.cv$delpred <-tempo2010.loc.cv$pred.m1.both-tempo2010.loc.cv$barpred
mod_temporal.loc.cv <- lm(delpm ~ delpred, data=tempo2010.loc.cv)
res[res$year=="2010", 'm1cv.loc.R2.t'] <- print(summary(lm(delpm ~ delpred, data=tempo2010.loc.cv))$r.squared)
#############save midpoint
saveRDS(res, "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/res.AQ.2010.rds")
saveRDS(res, "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/resALL.AQ.rds")
saveRDS(m1.2010.cv.loc,"/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod1.AQ.2010.predCV.rds")
###############
#MOD2
###############
m2.2010<-readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod2.AQ.2010.rds")
m2.2010[,elev.s:= scale(elev)]
m2.2010[,tden.s:= scale(tden)]
m2.2010[,pden.s:= scale(pden)]
m2.2010[,dist2A1.s:= scale(dist2A1)]
m2.2010[,dist2water.s:= scale(dist2water)]
m2.2010[,dist2rail.s:= scale(dist2rail)]
m2.2010[,Dist2road.s:= scale(Dist2road)]
m2.2010[,ndvi.s:= scale(ndvi)]
m2.2010[,MeanPbl.s:= scale(MeanPbl)]
m2.2010[,p_ind.s:= scale(p_ind)]
m2.2010[,p_for.s:= scale(p_for)]
m2.2010[,p_farm.s:= scale(p_farm)]
m2.2010[,p_dos.s:= scale(p_dos)]
m2.2010[,p_dev.s:= scale(p_dev)]
m2.2010[,p_os.s:= scale(p_os)]
m2.2010[,tempa.s:= scale(tempa)]
m2.2010[,WDa.s:= scale(WDa)]
m2.2010[,WSa.s:= scale(WSa)]
m2.2010[,RHa.s:= scale(RHa)]
m2.2010[,Raina.s:= scale(Raina)]
m2.2010[,NO2a.s:= scale(NO2a)]
#generate predictions
m2.2010[, pred.m2 := predict(object=m1.fit.2010,newdata=m2.2010,allow.new.levels=TRUE,re.form=NULL)]
describe(m2.2010$pred.m2)
#delete implossible valuesOA[24~
m2.2010 <- m2.2010[pred.m2 > 0.00000000000001 , ]
m2.2010 <- m2.2010[pred.m2 < 1500 , ]
saveRDS(m2.2010,"/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod2.AQ.2010.pred2.rds")
#-------------->prepare for mod3
m2.2010[, bimon := (m + 1) %/% 2]
setkey(m2.2010,day, aodid)
m2.2010<-m2.2010[!is.na(meanPM10)]
#run the lmer part regressing stage 2 pred Vs mean pm
#in israel check per month, also check 30km band and other methods for meanpm
m2.smooth = lme(pred.m2 ~ meanPM10,random = list(aodid= ~1 + meanPM10),control=lmeControl(opt = "optim"), data= m2.2010 )
#xm2.smooth = lmer(pred.m2 ~ meanPM10+(1+ meanPM10|aodid), data= m2.2010 )
#correlate to see everything from mod2 and the mpm works
m2.2010[, pred.t31 := predict(m2.smooth)]
m2.2010[, resid := residuals(m2.smooth)]
res[res$year=="2010", 'm3.t31'] <- print(summary(lm(pred.m2~pred.t31,data=m2.2010))$r.squared)
#split the files to the separate bi monthly datsets
T2010_bimon1 <- subset(m2.2010 ,m2.2010$bimon == "1")
T2010_bimon2 <- subset(m2.2010 ,m2.2010$bimon == "2")
T2010_bimon3 <- subset(m2.2010 ,m2.2010$bimon == "3")
T2010_bimon4 <- subset(m2.2010 ,m2.2010$bimon == "4")
T2010_bimon5 <- subset(m2.2010 ,m2.2010$bimon == "5")
T2010_bimon6 <- subset(m2.2010 ,m2.2010$bimon == "6")
#run the separate splines (smooth) for x and y for each bimon
#whats the default band (distance) that the spline goes out and uses
fit2_1 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2010_bimon1 )
fit2_2 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2010_bimon2 )
fit2_3 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2010_bimon3 )
fit2_4 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2010_bimon4 )
fit2_5 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2010_bimon5 )
fit2_6 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2010_bimon6 )
#get the predicted-fitted
Xpred_1 <- (T2010_bimon1$pred.t31 - fit2_1$fitted)
Xpred_2 <- (T2010_bimon2$pred.t31 - fit2_2$fitted)
Xpred_3 <- (T2010_bimon3$pred.t31 - fit2_3$fitted)
Xpred_4 <- (T2010_bimon4$pred.t31 - fit2_4$fitted)
Xpred_5 <- (T2010_bimon5$pred.t31 - fit2_5$fitted)
Xpred_6 <- (T2010_bimon6$pred.t31 - fit2_6$fitted)
#remerge to 1 file
m2.2010$pred.t32 <- c( Xpred_1,Xpred_2, Xpred_3, Xpred_4, Xpred_5, Xpred_6)
#this is important so that its sorted as in the first gamm
setkey(m2.2010,day, aodid)
#rerun the lme on the predictions including the spatial spline (smooth)
Final_pred_2010 <- lme(pred.t32 ~ meanPM10 ,random = list(aodid= ~1 + meanPM10 ),control=lmeControl(opt = "optim"),data= m2.2010 )
m2.2010[, pred.t33 := predict(Final_pred_2010)]
#check correlations
res[res$year=="2010", 'm3.t33'] <- print(summary(lm(pred.m2 ~ pred.t33,data=m2.2010))$r.squared)
#------------------------>>>
#import mod3
data.m3 <- readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod3.AQ.2010.rds")
#for PM10
data.m3 <- data.m3[,c(1,2,5,29:32,52,53),with=FALSE]
data.m3[, bimon := (m + 1) %/% 2]
setkey(data.m3,day, aodid)
data.m3<-data.m3[!is.na(meanPM10)]
#generate m.3 initial pred
data.m3$pred.m3.mix <- predict(Final_pred_2010,data.m3)
#create unique grid
ugrid <-data.m3 %>%
group_by(aodid) %>%
summarise(lat_aod = mean(lat_aod, na.rm=TRUE), long_aod = mean(long_aod, na.rm=TRUE),x_aod_ITM = mean(x_aod_ITM, na.rm=TRUE), y_aod_ITM = mean(y_aod_ITM, na.rm=TRUE))
#### PREDICT Gam part
#split back into bimons to include the gam prediction in final prediction
data.m3_bimon1 <- data.m3[bimon == 1, ]
data.m3_bimon2 <- data.m3[bimon == 2, ]
data.m3_bimon3 <- data.m3[bimon == 3, ]
data.m3_bimon4 <- data.m3[bimon == 4, ]
data.m3_bimon5 <- data.m3[bimon == 5, ]
data.m3_bimon6 <- data.m3[bimon == 6, ]
#addin unique grid to each bimon
uniq_gid_bimon1 <- ugrid
uniq_gid_bimon2 <- ugrid
uniq_gid_bimon3 <- ugrid
uniq_gid_bimon4 <- ugrid
uniq_gid_bimon5 <- ugrid
uniq_gid_bimon6 <- ugrid
#get predictions for Bimon residuals
uniq_gid_bimon1$gpred <- predict.gam(fit2_1,uniq_gid_bimon1)
uniq_gid_bimon2$gpred <- predict.gam(fit2_2,uniq_gid_bimon2)
uniq_gid_bimon3$gpred <- predict.gam(fit2_3,uniq_gid_bimon3)
uniq_gid_bimon4$gpred <- predict.gam(fit2_4,uniq_gid_bimon4)
uniq_gid_bimon5$gpred <- predict.gam(fit2_5,uniq_gid_bimon5)
uniq_gid_bimon6$gpred <- predict.gam(fit2_6,uniq_gid_bimon6)
#merge things back togheter
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> merges
setkey(uniq_gid_bimon1,aodid)
setkey(data.m3_bimon1,aodid)
data.m3_bimon1 <- merge(data.m3_bimon1, uniq_gid_bimon1[,list(aodid,gpred)], all.x = T)
setkey(uniq_gid_bimon2,aodid)
setkey(data.m3_bimon2,aodid)
data.m3_bimon2 <- merge(data.m3_bimon2, uniq_gid_bimon2[,list(aodid,gpred)], all.x = T)
setkey(uniq_gid_bimon3,aodid)
setkey(data.m3_bimon3,aodid)
data.m3_bimon3 <- merge(data.m3_bimon3, uniq_gid_bimon3[,list(aodid,gpred)], all.x = T)
setkey(uniq_gid_bimon4,aodid)
setkey(data.m3_bimon4,aodid)
data.m3_bimon4 <- merge(data.m3_bimon4, uniq_gid_bimon4[,list(aodid,gpred)], all.x = T)
setkey(uniq_gid_bimon5,aodid)
setkey(data.m3_bimon5,aodid)
data.m3_bimon5 <- merge(data.m3_bimon5, uniq_gid_bimon5[,list(aodid,gpred)], all.x = T)
setkey(uniq_gid_bimon6,aodid)
setkey(data.m3_bimon6,aodid)
data.m3_bimon6 <- merge(data.m3_bimon6, uniq_gid_bimon6[,list(aodid,gpred)], all.x = T)
#reattach all parts
mod3 <- rbind(data.m3_bimon1,data.m3_bimon2,data.m3_bimon3,data.m3_bimon4,data.m3_bimon5,data.m3_bimon6)
# create pred.m3
mod3$pred.m3 <-mod3$pred.m3.mix+mod3$gpred
#hist(mod3$pred.m3)
#describe(mod3$pred.m3)
#recode negative into zero
mod3 <- mod3[pred.m3 >= 0]
saveRDS(mod3,"/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod3.AQ.2010.pred.rds")
#clean
keep(mod3,res,rmse, sure=TRUE)
gc()
#########################
#prepare for m3.R2
#########################
#load mod1
mod1<- readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod1.AQ.2010.pred.rds")
mod1[,aodid:= paste(mod1$long_aod,mod1$lat_aod,sep="-")]
mod1<-mod1[,c("aodid","day","PM10","pred.m1","stn"),with=FALSE]
#R2.m3
setkey(mod3,day,aodid)
setkey(mod1,day,aodid)
m1.2010 <- merge(mod1,mod3[, list(day,aodid,pred.m3)], all.x = T)
m3.fit.2010<- summary(lm(PM10~pred.m3,data=m1.2010))
res[res$year=="2010", 'm3.R2'] <- print(summary(lm(PM10~pred.m3,data=m1.2010))$r.squared)
#RMSPE
res[res$year=="2010", 'm3.PE'] <- print(rmse(residuals(m3.fit.2010)))
#spatial
###to check
spatial2010<-m1.2010 %>%
group_by(stn) %>%
summarise(barpm = mean(PM10, na.rm=TRUE), barpred = mean(pred.m3, na.rm=TRUE))
m1.fit.2010.spat<- lm(barpm ~ barpred, data=spatial2010)
res[res$year=="2010", 'm3.R2.s'] <- print(summary(lm(barpm ~ barpred, data=spatial2010))$r.squared)
res[res$year=="2010", 'm3.PE.s'] <- print(rmse(residuals(m1.fit.2010.spat)))
#temporal
tempo2010<-left_join(m1.2010,spatial2010)
tempo2010$delpm <-tempo2010$PM10-tempo2010$barpm
tempo2010$delpred <-tempo2010$pred.m3-tempo2010$barpred
mod_temporal <- lm(delpm ~ delpred, data=tempo2010)
res[res$year=="2010", 'm3.R2.t'] <- print(summary(lm(delpm ~ delpred, data=tempo2010))$r.squared)
#############save midpoint
saveRDS(res, "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/res.AQ.2010.rds")
saveRDS(res, "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/resALL.AQ.rds")
#########################
#import mod2
mod2<- readRDS( "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod2.AQ.2010.pred2.rds")
mod2<-mod2[,c("aodid","day","pred.m2"),with=FALSE]
#----------------> store the best available
mod3best <- mod3[, list(aodid, x_aod_ITM, y_aod_ITM, day, pred.m3)]
setkey(mod3best, day, aodid)
setkey(mod2, day, aodid)
mod3best <- merge(mod3best, mod2[,list(aodid, day, pred.m2)], all.x = T)
#reload mod1
mod1<- readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod1.AQ.2010.pred.rds")
mod1$aodid<-paste(mod1$long_aod,mod1$lat_aod,sep="-")
mod1<-mod1[,c("aodid","day","PM10","pred.m1"),with=FALSE]
setkey(mod1,day,aodid)
mod3best <- merge(mod3best, mod1, all.x = T)
mod3best[,bestpred := pred.m3]
mod3best[!is.na(pred.m2),bestpred := pred.m2]
mod3best[!is.na(pred.m1),bestpred := pred.m1]
#save
saveRDS(mod3best,"/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod3.AQ.2010.FINAL.rds")
#save for GIS
write.csv(mod3best[, list(LTPM = mean(bestpred, na.rm = T),
predvariance = var(bestpred, na.rm = T),
predmin = min(bestpred, na.rm = T),
predmax = max(bestpred, na.rm = T),
npred = sum(!is.na(bestpred)),
npred.m1 = sum(!is.na(pred.m1)),
npred.m2 = sum(!is.na(pred.m2)),
npred.m3 = sum(!is.na(pred.m3)),
x_aod_ITM = x_aod_ITM[1], y_aod_ITM = y_aod_ITM[1]),by=aodid], "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/LTPM3.AQ.2010.csv", row.names = F)
keep(res, sure=TRUE)
c()
###############
#LIBS
###############
library(lme4);library(reshape);library(foreign) ;library(ggplot2);library(plyr);library(data.table);library(reshape2);library(Hmisc);library(mgcv);library(gdata);library(car);library(dplyr);library(ggmap);library(broom);library(splines)
#sourcing
source("/media/NAS/Uni/org/files/Uni/Projects/code/$Rsnips/CV_splits.r")
source("/media/NAS/Uni/org/files/Uni/Projects/code/$Rsnips/rmspe.r")
#if needed load res table
#res<- readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/resALL.AQ.rds")
### import data
m1.2011 <-readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod1.AQ.2011.rds")
################# clean BAD STN PM10 and check if improved model?
raWDaf <- ddply(m1.2011, c( "stn"),
function(x) {
mod1 <- lm(PM10 ~ aod, data=x)
data.frame(R2 = round(summary(mod1)$r.squared, 5),
nsamps = length(summary(mod1)$resid))
})
raWDaf
raWDaf<-as.data.table(raWDaf)
bad<- raWDaf[R2< 0.01]
bad[,badid := paste(stn,sep="-")]
#################BAD STN
m1.2011[,badid := paste(stn,sep="-")]
####Take out bad stations
m1.2011 <- m1.2011[!(m1.2011$badid %in% bad$badid), ]
#scale vars
m1.2011[,elev.s:= scale(elev)]
m1.2011[,tden.s:= scale(tden)]
m1.2011[,pden.s:= scale(pden)]
m1.2011[,dist2A1.s:= scale(dist2A1)]
m1.2011[,dist2water.s:= scale(dist2water)]
m1.2011[,dist2rail.s:= scale(dist2rail)]
m1.2011[,Dist2road.s:= scale(Dist2road)]
m1.2011[,ndvi.s:= scale(ndvi)]
m1.2011[,MeanPbl.s:= scale(MeanPbl)]
m1.2011[,p_ind.s:= scale(p_ind)]
m1.2011[,p_for.s:= scale(p_for)]
m1.2011[,p_farm.s:= scale(p_farm)]
m1.2011[,p_dos.s:= scale(p_dos)]
m1.2011[,p_dev.s:= scale(p_dev)]
m1.2011[,p_os.s:= scale(p_os)]
m1.2011[,tempa.s:= scale(tempa)]
m1.2011[,WDa.s:= scale(WDa)]
m1.2011[,WSa.s:= scale(WSa)]
m1.2011[,RHa.s:= scale(RHa)]
m1.2011[,Raina.s:= scale(Raina)]
m1.2011[,NO2a.s:= scale(NO2a)]
m1.formula <- as.formula(PM10~ aod
+tempa.s+WDa.s+WSa.s+Dust+MeanPbl.s #temporal
+elev.s+tden.s+pden.s+Dist2road.s+ndvi.s #spatial
+p_os.s #+p_dev.s+p_dos.s+p_farm.s+p_for.s+p_ind.s #land use
#+aod*Dust #interactions
+(1+aod|day/reg_num)) #+(1|stn) !!! stn screws up mod3
#full fit
m1.fit.2011 <- lmer(m1.formula,data=m1.2011,weights=normwt)
m1.2011$pred.m1 <- predict(m1.fit.2011)
res[res$year=="2011", 'm1.R2'] <- print(summary(lm(PM10~pred.m1,data=m1.2011))$r.squared)
#RMSPE
res[res$year=="2011", 'm1.PE'] <- print(rmse(residuals(m1.fit.2011)))
#spatial
###to check
spatial2011<-m1.2011 %>%
group_by(stn) %>%
summarise(barpm = mean(PM10, na.rm=TRUE), barpred = mean(pred.m1, na.rm=TRUE))
m1.fit.2011.spat<- lm(barpm ~ barpred, data=spatial2011)
res[res$year=="2011", 'm1.R2.s'] <- print(summary(lm(barpm ~ barpred, data=spatial2011))$r.squared)
res[res$year=="2011", 'm1.PE.s'] <- print(rmse(residuals(m1.fit.2011.spat)))
#temporal
tempo2011<-left_join(m1.2011,spatial2011)
tempo2011$delpm <-tempo2011$PM10-tempo2011$barpm
tempo2011$delpred <-tempo2011$pred.m1-tempo2011$barpred
mod_temporal <- lm(delpm ~ delpred, data=tempo2011)
res[res$year=="2011", 'm1.R2.t'] <- print(summary(lm(delpm ~ delpred, data=tempo2011))$r.squared)
saveRDS(m1.2011,"/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod1.AQ.2011.pred.rds")
#---------------->>>> CV
#s1
splits_s1 <- splitdf(m1.2011)
test_s1 <- splits_s1$testset
train_s1 <- splits_s1$trainset
out_train_s1 <- lmer(m1.formula,data = train_s1,weights=normwt)
test_s1$pred.m1.cv <- predict(object=out_train_s1 ,newdata=test_s1,allow.new.levels=TRUE,re.form=NULL )
test_s1$iter<-"s1"
#s2
splits_s2 <- splitdf(m1.2011)
test_s2 <- splits_s2$testset
train_s2 <- splits_s2$trainset
out_train_s2 <- lmer(m1.formula,data = train_s2,weights=normwt)
test_s2$pred.m1.cv <- predict(object=out_train_s2 ,newdata=test_s2,allow.new.levels=TRUE,re.form=NULL )
test_s2$iter<-"s2"
#s3
splits_s3 <- splitdf(m1.2011)
test_s3 <- splits_s3$testset
train_s3 <- splits_s3$trainset
out_train_s3 <- lmer(m1.formula,data = train_s3,weights=normwt)
test_s3$pred.m1.cv <- predict(object=out_train_s3 ,newdata=test_s3,allow.new.levels=TRUE,re.form=NULL )
test_s3$iter<-"s3"
#s4
splits_s4 <- splitdf(m1.2011)
test_s4 <- splits_s4$testset
train_s4 <- splits_s4$trainset
out_train_s4 <- lmer(m1.formula,data = train_s4,weights=normwt)
test_s4$pred.m1.cv <- predict(object=out_train_s4 ,newdata=test_s4,allow.new.levels=TRUE,re.form=NULL )
test_s4$iter<-"s4"
#s5
splits_s5 <- splitdf(m1.2011)
test_s5 <- splits_s5$testset
train_s5 <- splits_s5$trainset
out_train_s5 <- lmer(m1.formula,data = train_s5,weights=normwt)
test_s5$pred.m1.cv <- predict(object=out_train_s5 ,newdata=test_s5,allow.new.levels=TRUE,re.form=NULL )
test_s5$iter<-"s5"
#s6
splits_s6 <- splitdf(m1.2011)
test_s6 <- splits_s6$testset
train_s6 <- splits_s6$trainset
out_train_s6 <- lmer(m1.formula,data = train_s6,weights=normwt)
test_s6$pred.m1.cv <- predict(object=out_train_s6 ,newdata=test_s6,allow.new.levels=TRUE,re.form=NULL )
test_s6$iter<-"s6"
#s7
splits_s7 <- splitdf(m1.2011)
test_s7 <- splits_s7$testset
train_s7 <- splits_s7$trainset
out_train_s7 <- lmer(m1.formula,data = train_s7,weights=normwt)
test_s7$pred.m1.cv <- predict(object=out_train_s7 ,newdata=test_s7,allow.new.levels=TRUE,re.form=NULL )
test_s7$iter<-"s7"
#s8
splits_s8 <- splitdf(m1.2011)
test_s8 <- splits_s8$testset
train_s8 <- splits_s8$trainset
out_train_s8 <- lmer(m1.formula,data = train_s8,weights=normwt)
test_s8$pred.m1.cv <- predict(object=out_train_s8 ,newdata=test_s8,allow.new.levels=TRUE,re.form=NULL )
test_s8$iter<-"s8"
#s9
splits_s9 <- splitdf(m1.2011)
test_s9 <- splits_s9$testset
train_s9 <- splits_s9$trainset
out_train_s9 <- lmer(m1.formula,data = train_s9,weights=normwt)
test_s9$pred.m1.cv <- predict(object=out_train_s9 ,newdata=test_s9,allow.new.levels=TRUE,re.form=NULL )
test_s9$iter<-"s9"
#s10
splits_s10 <- splitdf(m1.2011)
test_s10 <- splits_s10$testset
train_s10 <- splits_s10$trainset
out_train_s10 <- lmer(m1.formula,data = train_s10,weights=normwt)
test_s10$pred.m1.cv <- predict(object=out_train_s10 ,newdata=test_s10,allow.new.levels=TRUE,re.form=NULL )
test_s10$iter<-"s10"
#BIND 1 dataset
m1.2011.cv<- data.table(rbind(test_s1,test_s2,test_s3,test_s4,test_s5,test_s6,test_s7,test_s8,test_s9, test_s10))
# cleanup (remove from WS) objects from CV
rm(list = ls(pattern = "train_|test_"))
#table updates
m1.fit.2011.cv<-lm(PM10~pred.m1.cv,data=m1.2011.cv)
res[res$year=="2011", 'm1cv.R2'] <- print(summary(lm(PM10~pred.m1.cv,data=m1.2011.cv))$r.squared)
res[res$year=="2011", 'm1cv.I'] <-print(summary(lm(PM10~pred.m1.cv,data=m1.2011.cv))$coef[1,1])
res[res$year=="2011", 'm1cv.I.se'] <-print(summary(lm(PM10~pred.m1.cv,data=m1.2011.cv))$coef[1,2])
res[res$year=="2011", 'm1cv.S'] <-print(summary(lm(PM10~pred.m1.cv,data=m1.2011.cv))$coef[2,1])
res[res$year=="2011", 'm1cv.S.se'] <-print(summary(lm(PM10~pred.m1.cv,data=m1.2011.cv))$coef[2,2])
#RMSPE
res[res$year=="2011", 'm1cv.PE'] <- print(rmse(residuals(m1.fit.2011.cv)))
#spatial
spatial2011.cv<-m1.2011.cv %>%
group_by(stn) %>%
summarise(barpm = mean(PM10, na.rm=TRUE), barpred = mean(pred.m1, na.rm=TRUE))
m1.fit.2011.cv.s <- lm(barpm ~ barpred, data=spatial2011.cv)
res[res$year=="2011", 'm1cv.R2.s'] <- print(summary(lm(barpm ~ barpred, data=spatial2011.cv))$r.squared)
res[res$year=="2011", 'm1cv.PE.s'] <- print(rmse(residuals(m1.fit.2011.cv.s)))
#temporal
tempo2011.cv<-left_join(m1.2011.cv,spatial2011.cv)
tempo2011.cv$delpm <-tempo2011.cv$PM10-tempo2011.cv$barpm
tempo2011.cv$delpred <-tempo2011.cv$pred.m1.cv-tempo2011.cv$barpred
mod_temporal.cv <- lm(delpm ~ delpred, data=tempo2011.cv)
res[res$year=="2011", 'm1cv.R2.t'] <- print(summary(lm(delpm ~ delpred, data=tempo2011.cv))$r.squared)
#-------->>> loc stage
luf<-fread("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN004_LU_full_dataset/local.csv")
setnames(luf,"tden","loc.tden")
setnames(luf,"elev50","loc.elev")
#add 50m LU to CV data
setkey(m1.2011.cv,stn)
setkey(luf,stn)
m1.2011.cv.loc <- merge(m1.2011.cv, luf, all.x = T)
#m1.2011.cv.loc<-na.omit(m1.2011.cv.loc)
#create residual mp3 variable
m1.2011.cv.loc$res.m1<-m1.2011.cv.loc$PM10-m1.2011.cv.loc$pred.m1.cv
#The GAM model
gam.out<-gam(res.m1~s(loc.tden)+s(tden,MeanPbl)+s(loc.tden,WSa)+s(loc_p_os,fx=FALSE,k=4,bs='cr')+s(loc.elev,fx=FALSE,k=4,bs='cr')+s(dA1,fx=FALSE,k=4,bs='cr')+s(dsea,fx=FALSE,k=4,bs='cr'),data=m1.2011.cv.loc)
#plot(bp.model.ps)
#summary(bp.model.ps)
## reg
m1.2011.cv.loc$pred.m1.loc <-predict(gam.out)
m1.2011.cv.loc$pred.m1.both <- m1.2011.cv.loc$pred.m1.cv + m1.2011.cv.loc$pred.m1.loc
res[res$year=="2011", 'm1cv.loc.R2'] <- print(summary(lm(PM10~pred.m1.both,data=m1.2011.cv.loc))$r.squared)
res[res$year=="2011", 'm1cv.loc.I'] <-print(summary(lm(PM10~pred.m1.both,data=m1.2011.cv.loc))$coef[1,1])
res[res$year=="2011", 'm1cv.loc.I.se'] <-print(summary(lm(PM10~pred.m1.both,data=m1.2011.cv.loc))$coef[1,2])
res[res$year=="2011", 'm1cv.loc.S'] <-print(summary(lm(PM10~pred.m1.both,data=m1.2011.cv.loc))$coef[2,1])
res[res$year=="2011", 'm1cv.loc.S.se'] <-print(summary(lm(PM10~pred.m1.both,data=m1.2011.cv.loc))$coef[2,2])
#RMSPE
res[res$year=="2011", 'm1cv.loc.PE'] <- print(rmse(residuals(m1.fit.2011.cv)))
#spatial
spatial2011.cv.loc<-m1.2011.cv.loc %>%
group_by(stn) %>%
summarise(barpm = mean(PM10, na.rm=TRUE), barpred = mean(pred.m1, na.rm=TRUE))
m1.fit.2011.cv.loc.s <- lm(barpm ~ barpred, data=spatial2011.cv.loc)
res[res$year=="2011", 'm1cv.loc.R2.s'] <- print(summary(lm(barpm ~ barpred, data=spatial2011.cv.loc))$r.squared)
res[res$year=="2011", 'm1cv.loc.PE.s'] <- print(rmse(residuals(m1.fit.2011.cv.loc.s)))
#temporal
tempo2011.loc.cv<-left_join(m1.2011.cv.loc,spatial2011.cv.loc)
tempo2011.loc.cv$delpm <-tempo2011.loc.cv$PM10-tempo2011.loc.cv$barpm
tempo2011.loc.cv$delpred <-tempo2011.loc.cv$pred.m1.both-tempo2011.loc.cv$barpred
mod_temporal.loc.cv <- lm(delpm ~ delpred, data=tempo2011.loc.cv)
res[res$year=="2011", 'm1cv.loc.R2.t'] <- print(summary(lm(delpm ~ delpred, data=tempo2011.loc.cv))$r.squared)
#############save midpoint
saveRDS(res, "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/res.AQ.2011.rds")
saveRDS(res, "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/resALL.AQ.rds")
saveRDS(m1.2011.cv.loc,"/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod1.AQ.2011.predCV.rds")
###############
#MOD2
###############
m2.2011<-readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod2.AQ.2011.rds")
m2.2011[,elev.s:= scale(elev)]
m2.2011[,tden.s:= scale(tden)]
m2.2011[,pden.s:= scale(pden)]
m2.2011[,dist2A1.s:= scale(dist2A1)]
m2.2011[,dist2water.s:= scale(dist2water)]
m2.2011[,dist2rail.s:= scale(dist2rail)]
m2.2011[,Dist2road.s:= scale(Dist2road)]
m2.2011[,ndvi.s:= scale(ndvi)]
m2.2011[,MeanPbl.s:= scale(MeanPbl)]
m2.2011[,p_ind.s:= scale(p_ind)]
m2.2011[,p_for.s:= scale(p_for)]
m2.2011[,p_farm.s:= scale(p_farm)]
m2.2011[,p_dos.s:= scale(p_dos)]
m2.2011[,p_dev.s:= scale(p_dev)]
m2.2011[,p_os.s:= scale(p_os)]
m2.2011[,tempa.s:= scale(tempa)]
m2.2011[,WDa.s:= scale(WDa)]
m2.2011[,WSa.s:= scale(WSa)]
m2.2011[,RHa.s:= scale(RHa)]
m2.2011[,Raina.s:= scale(Raina)]
m2.2011[,NO2a.s:= scale(NO2a)]
#generate predictions
m2.2011[, pred.m2 := predict(object=m1.fit.2011,newdata=m2.2011,allow.new.levels=TRUE,re.form=NULL)]
describe(m2.2011$pred.m2)
#delete implossible valuesOA[24~
m2.2011 <- m2.2011[pred.m2 > 0.00000000000001 , ]
m2.2011 <- m2.2011[pred.m2 < 1500 , ]
saveRDS(m2.2011,"/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod2.AQ.2011.pred2.rds")
#-------------->prepare for mod3
m2.2011[, bimon := (m + 1) %/% 2]
setkey(m2.2011,day, aodid)
m2.2011<-m2.2011[!is.na(meanPM10)]
#run the lmer part regressing stage 2 pred Vs mean pm
#in israel check per month, also check 30km band and other methods for meanpm
m2.smooth = lme(pred.m2 ~ meanPM10,random = list(aodid= ~1 + meanPM10),control=lmeControl(opt = "optim"), data= m2.2011 )
#xm2.smooth = lmer(pred.m2 ~ meanPM10+(1+ meanPM10|aodid), data= m2.2011 )
#correlate to see everything from mod2 and the mpm works
m2.2011[, pred.t31 := predict(m2.smooth)]
m2.2011[, resid := residuals(m2.smooth)]
res[res$year=="2011", 'm3.t31'] <- print(summary(lm(pred.m2~pred.t31,data=m2.2011))$r.squared)
#split the files to the separate bi monthly datsets
T2011_bimon1 <- subset(m2.2011 ,m2.2011$bimon == "1")
T2011_bimon2 <- subset(m2.2011 ,m2.2011$bimon == "2")
T2011_bimon3 <- subset(m2.2011 ,m2.2011$bimon == "3")
T2011_bimon4 <- subset(m2.2011 ,m2.2011$bimon == "4")
T2011_bimon5 <- subset(m2.2011 ,m2.2011$bimon == "5")
T2011_bimon6 <- subset(m2.2011 ,m2.2011$bimon == "6")
#run the separate splines (smooth) for x and y for each bimon
#whats the default band (distance) that the spline goes out and uses
fit2_1 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2011_bimon1 )
fit2_2 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2011_bimon2 )
fit2_3 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2011_bimon3 )
fit2_4 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2011_bimon4 )
fit2_5 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2011_bimon5 )
fit2_6 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2011_bimon6 )
#get the predicted-fitted
Xpred_1 <- (T2011_bimon1$pred.t31 - fit2_1$fitted)
Xpred_2 <- (T2011_bimon2$pred.t31 - fit2_2$fitted)
Xpred_3 <- (T2011_bimon3$pred.t31 - fit2_3$fitted)
Xpred_4 <- (T2011_bimon4$pred.t31 - fit2_4$fitted)
Xpred_5 <- (T2011_bimon5$pred.t31 - fit2_5$fitted)
Xpred_6 <- (T2011_bimon6$pred.t31 - fit2_6$fitted)
#remerge to 1 file
m2.2011$pred.t32 <- c( Xpred_1,Xpred_2, Xpred_3, Xpred_4, Xpred_5, Xpred_6)
#this is important so that its sorted as in the first gamm
setkey(m2.2011,day, aodid)
#rerun the lme on the predictions including the spatial spline (smooth)
Final_pred_2011 <- lme(pred.t32 ~ meanPM10 ,random = list(aodid= ~1 + meanPM10 ),control=lmeControl(opt = "optim"),data= m2.2011 )
m2.2011[, pred.t33 := predict(Final_pred_2011)]
#check correlations
res[res$year=="2011", 'm3.t33'] <- print(summary(lm(pred.m2 ~ pred.t33,data=m2.2011))$r.squared)
#------------------------>>>
#import mod3
data.m3 <- readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod3.AQ.2011.rds")
#for PM10
data.m3 <- data.m3[,c(1,2,5,29:32,52,53),with=FALSE]
data.m3[, bimon := (m + 1) %/% 2]
setkey(data.m3,day, aodid)
data.m3<-data.m3[!is.na(meanPM10)]
#generate m.3 initial pred
data.m3$pred.m3.mix <- predict(Final_pred_2011,data.m3)
#create unique grid
ugrid <-data.m3 %>%
group_by(aodid) %>%
summarise(lat_aod = mean(lat_aod, na.rm=TRUE), long_aod = mean(long_aod, na.rm=TRUE),x_aod_ITM = mean(x_aod_ITM, na.rm=TRUE), y_aod_ITM = mean(y_aod_ITM, na.rm=TRUE))
#### PREDICT Gam part
#split back into bimons to include the gam prediction in final prediction
data.m3_bimon1 <- data.m3[bimon == 1, ]
data.m3_bimon2 <- data.m3[bimon == 2, ]
data.m3_bimon3 <- data.m3[bimon == 3, ]
data.m3_bimon4 <- data.m3[bimon == 4, ]
data.m3_bimon5 <- data.m3[bimon == 5, ]
data.m3_bimon6 <- data.m3[bimon == 6, ]
#addin unique grid to each bimon
uniq_gid_bimon1 <- ugrid
uniq_gid_bimon2 <- ugrid
uniq_gid_bimon3 <- ugrid
uniq_gid_bimon4 <- ugrid
uniq_gid_bimon5 <- ugrid
uniq_gid_bimon6 <- ugrid
#get predictions for Bimon residuals
uniq_gid_bimon1$gpred <- predict.gam(fit2_1,uniq_gid_bimon1)
uniq_gid_bimon2$gpred <- predict.gam(fit2_2,uniq_gid_bimon2)
uniq_gid_bimon3$gpred <- predict.gam(fit2_3,uniq_gid_bimon3)
uniq_gid_bimon4$gpred <- predict.gam(fit2_4,uniq_gid_bimon4)
uniq_gid_bimon5$gpred <- predict.gam(fit2_5,uniq_gid_bimon5)
uniq_gid_bimon6$gpred <- predict.gam(fit2_6,uniq_gid_bimon6)
#merge things back togheter
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> merges
setkey(uniq_gid_bimon1,aodid)
setkey(data.m3_bimon1,aodid)
data.m3_bimon1 <- merge(data.m3_bimon1, uniq_gid_bimon1[,list(aodid,gpred)], all.x = T)
setkey(uniq_gid_bimon2,aodid)
setkey(data.m3_bimon2,aodid)
data.m3_bimon2 <- merge(data.m3_bimon2, uniq_gid_bimon2[,list(aodid,gpred)], all.x = T)
setkey(uniq_gid_bimon3,aodid)
setkey(data.m3_bimon3,aodid)
data.m3_bimon3 <- merge(data.m3_bimon3, uniq_gid_bimon3[,list(aodid,gpred)], all.x = T)
setkey(uniq_gid_bimon4,aodid)
setkey(data.m3_bimon4,aodid)
data.m3_bimon4 <- merge(data.m3_bimon4, uniq_gid_bimon4[,list(aodid,gpred)], all.x = T)
setkey(uniq_gid_bimon5,aodid)
setkey(data.m3_bimon5,aodid)
data.m3_bimon5 <- merge(data.m3_bimon5, uniq_gid_bimon5[,list(aodid,gpred)], all.x = T)
setkey(uniq_gid_bimon6,aodid)
setkey(data.m3_bimon6,aodid)
data.m3_bimon6 <- merge(data.m3_bimon6, uniq_gid_bimon6[,list(aodid,gpred)], all.x = T)
#reattach all parts
mod3 <- rbind(data.m3_bimon1,data.m3_bimon2,data.m3_bimon3,data.m3_bimon4,data.m3_bimon5,data.m3_bimon6)
# create pred.m3
mod3$pred.m3 <-mod3$pred.m3.mix+mod3$gpred
#hist(mod3$pred.m3)
#describe(mod3$pred.m3)
#recode negative into zero
mod3 <- mod3[pred.m3 >= 0]
saveRDS(mod3,"/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod3.AQ.2011.pred.rds")
#clean
keep(mod3,res,rmse, sure=TRUE)
gc()
#########################
#prepare for m3.R2
#########################
#load mod1
mod1<- readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod1.AQ.2011.pred.rds")
mod1[,aodid:= paste(mod1$long_aod,mod1$lat_aod,sep="-")]
mod1<-mod1[,c("aodid","day","PM10","pred.m1","stn"),with=FALSE]
#R2.m3
setkey(mod3,day,aodid)
setkey(mod1,day,aodid)
m1.2011 <- merge(mod1,mod3[, list(day,aodid,pred.m3)], all.x = T)
m3.fit.2011<- summary(lm(PM10~pred.m3,data=m1.2011))
res[res$year=="2011", 'm3.R2'] <- print(summary(lm(PM10~pred.m3,data=m1.2011))$r.squared)
#RMSPE
res[res$year=="2011", 'm3.PE'] <- print(rmse(residuals(m3.fit.2011)))
#spatial
###to check
spatial2011<-m1.2011 %>%
group_by(stn) %>%
summarise(barpm = mean(PM10, na.rm=TRUE), barpred = mean(pred.m3, na.rm=TRUE))
m1.fit.2011.spat<- lm(barpm ~ barpred, data=spatial2011)
res[res$year=="2011", 'm3.R2.s'] <- print(summary(lm(barpm ~ barpred, data=spatial2011))$r.squared)
res[res$year=="2011", 'm3.PE.s'] <- print(rmse(residuals(m1.fit.2011.spat)))
#temporal
tempo2011<-left_join(m1.2011,spatial2011)
tempo2011$delpm <-tempo2011$PM10-tempo2011$barpm
tempo2011$delpred <-tempo2011$pred.m3-tempo2011$barpred
mod_temporal <- lm(delpm ~ delpred, data=tempo2011)
res[res$year=="2011", 'm3.R2.t'] <- print(summary(lm(delpm ~ delpred, data=tempo2011))$r.squared)
#############save midpoint
saveRDS(res, "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/res.AQ.2011.rds")
saveRDS(res, "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/resALL.AQ.rds")
#########################
#import mod2
mod2<- readRDS( "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod2.AQ.2011.pred2.rds")
mod2<-mod2[,c("aodid","day","pred.m2"),with=FALSE]
#----------------> store the best available
mod3best <- mod3[, list(aodid, x_aod_ITM, y_aod_ITM, day, pred.m3)]
setkey(mod3best, day, aodid)
setkey(mod2, day, aodid)
mod3best <- merge(mod3best, mod2[,list(aodid, day, pred.m2)], all.x = T)
#reload mod1
mod1<- readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod1.AQ.2011.pred.rds")
mod1$aodid<-paste(mod1$long_aod,mod1$lat_aod,sep="-")
mod1<-mod1[,c("aodid","day","PM10","pred.m1"),with=FALSE]
setkey(mod1,day,aodid)
mod3best <- merge(mod3best, mod1, all.x = T)
mod3best[,bestpred := pred.m3]
mod3best[!is.na(pred.m2),bestpred := pred.m2]
mod3best[!is.na(pred.m1),bestpred := pred.m1]
#save
saveRDS(mod3best,"/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod3.AQ.2011.FINAL.rds")
#save for GIS
write.csv(mod3best[, list(LTPM = mean(bestpred, na.rm = T),
predvariance = var(bestpred, na.rm = T),
predmin = min(bestpred, na.rm = T),
predmax = max(bestpred, na.rm = T),
npred = sum(!is.na(bestpred)),
npred.m1 = sum(!is.na(pred.m1)),
npred.m2 = sum(!is.na(pred.m2)),
npred.m3 = sum(!is.na(pred.m3)),
x_aod_ITM = x_aod_ITM[1], y_aod_ITM = y_aod_ITM[1]),by=aodid], "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/LTPM3.AQ.2011.csv", row.names = F)
keep(res, sure=TRUE)
c()
###############
#LIBS
###############
library(lme4);library(reshape);library(foreign) ;library(ggplot2);library(plyr);library(data.table);library(reshape2);library(Hmisc);library(mgcv);library(gdata);library(car);library(dplyr);library(ggmap);library(broom);library(splines)
#sourcing
source("/media/NAS/Uni/org/files/Uni/Projects/code/$Rsnips/CV_splits.r")
source("/media/NAS/Uni/org/files/Uni/Projects/code/$Rsnips/rmspe.r")
#if needed load res table
#res<- readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/resALL.AQ.rds")
### import data
m1.2012 <-readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod1.AQ.2012.rds")
#rescale
m1.2012[,elev.s:= scale(elev)]
m1.2012[,tden.s:= scale(tden)]
m1.2012[,pden.s:= scale(pden)]
m1.2012[,dist2A1.s:= scale(dist2A1)]
m1.2012[,dist2water.s:= scale(dist2water)]
m1.2012[,dist2rail.s:= scale(dist2rail)]
m1.2012[,Dist2road.s:= scale(Dist2road)]
m1.2012[,ndvi.s:= scale(ndvi)]
m1.2012[,MeanPbl.s:= scale(MeanPbl)]
m1.2012[,p_ind.s:= scale(p_ind)]
m1.2012[,p_for.s:= scale(p_for)]
m1.2012[,p_farm.s:= scale(p_farm)]
m1.2012[,p_dos.s:= scale(p_dos)]
m1.2012[,p_dev.s:= scale(p_dev)]
m1.2012[,p_os.s:= scale(p_os)]
m1.2012[,tempa.s:= scale(tempa)]
m1.2012[,WDa.s:= scale(WDa)]
m1.2012[,WSa.s:= scale(WSa)]
m1.2012[,RHa.s:= scale(RHa)]
m1.2012[,Raina.s:= scale(Raina)]
m1.2012[,NO2a.s:= scale(NO2a)]
m1.formula <- as.formula(PM10~ aod
+tempa.s+WDa.s+WSa.s+Dust+MeanPbl.s #temporal
+elev.s+tden.s+pden.s+Dist2road.s+ndvi.s #spatial
+p_os.s #+p_dev.s+p_dos.s+p_farm.s+p_for.s+p_ind.s #land use
#+aod*Dust #interactions
+(1+aod|day/reg_num)) #+(1|stn) !!! stn screws up mod3
#full fit
m1.fit.2012 <- lmer(m1.formula,data=m1.2012,weights=normwt)
m1.2012$pred.m1 <- predict(m1.fit.2012)
res[res$year=="2012", 'm1.R2'] <- print(summary(lm(PM10~pred.m1,data=m1.2012))$r.squared)
#RMSPE
res[res$year=="2012", 'm1.PE'] <- print(rmse(residuals(m1.fit.2012)))
#spatial
###to check
spatial2012<-m1.2012 %>%
group_by(stn) %>%
summarise(barpm = mean(PM10, na.rm=TRUE), barpred = mean(pred.m1, na.rm=TRUE))
m1.fit.2012.spat<- lm(barpm ~ barpred, data=spatial2012)
res[res$year=="2012", 'm1.R2.s'] <- print(summary(lm(barpm ~ barpred, data=spatial2012))$r.squared)
res[res$year=="2012", 'm1.PE.s'] <- print(rmse(residuals(m1.fit.2012.spat)))
#temporal
tempo2012<-left_join(m1.2012,spatial2012)
tempo2012$delpm <-tempo2012$PM10-tempo2012$barpm
tempo2012$delpred <-tempo2012$pred.m1-tempo2012$barpred
mod_temporal <- lm(delpm ~ delpred, data=tempo2012)
res[res$year=="2012", 'm1.R2.t'] <- print(summary(lm(delpm ~ delpred, data=tempo2012))$r.squared)
saveRDS(m1.2012,"/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod1.AQ.2012.pred.rds")
#---------------->>>> CV
#s1
splits_s1 <- splitdf(m1.2012)
test_s1 <- splits_s1$testset
train_s1 <- splits_s1$trainset
out_train_s1 <- lmer(m1.formula,data = train_s1,weights=normwt)
test_s1$pred.m1.cv <- predict(object=out_train_s1 ,newdata=test_s1,allow.new.levels=TRUE,re.form=NULL )
test_s1$iter<-"s1"
#s2
splits_s2 <- splitdf(m1.2012)
test_s2 <- splits_s2$testset
train_s2 <- splits_s2$trainset
out_train_s2 <- lmer(m1.formula,data = train_s2,weights=normwt)
test_s2$pred.m1.cv <- predict(object=out_train_s2 ,newdata=test_s2,allow.new.levels=TRUE,re.form=NULL )
test_s2$iter<-"s2"
#s3
splits_s3 <- splitdf(m1.2012)
test_s3 <- splits_s3$testset
train_s3 <- splits_s3$trainset
out_train_s3 <- lmer(m1.formula,data = train_s3,weights=normwt)
test_s3$pred.m1.cv <- predict(object=out_train_s3 ,newdata=test_s3,allow.new.levels=TRUE,re.form=NULL )
test_s3$iter<-"s3"
#s4
splits_s4 <- splitdf(m1.2012)
test_s4 <- splits_s4$testset
train_s4 <- splits_s4$trainset
out_train_s4 <- lmer(m1.formula,data = train_s4,weights=normwt)
test_s4$pred.m1.cv <- predict(object=out_train_s4 ,newdata=test_s4,allow.new.levels=TRUE,re.form=NULL )
test_s4$iter<-"s4"
#s5
splits_s5 <- splitdf(m1.2012)
test_s5 <- splits_s5$testset
train_s5 <- splits_s5$trainset
out_train_s5 <- lmer(m1.formula,data = train_s5,weights=normwt)
test_s5$pred.m1.cv <- predict(object=out_train_s5 ,newdata=test_s5,allow.new.levels=TRUE,re.form=NULL )
test_s5$iter<-"s5"
#s6
splits_s6 <- splitdf(m1.2012)
test_s6 <- splits_s6$testset
train_s6 <- splits_s6$trainset
out_train_s6 <- lmer(m1.formula,data = train_s6,weights=normwt)
test_s6$pred.m1.cv <- predict(object=out_train_s6 ,newdata=test_s6,allow.new.levels=TRUE,re.form=NULL )
test_s6$iter<-"s6"
#s7
splits_s7 <- splitdf(m1.2012)
test_s7 <- splits_s7$testset
train_s7 <- splits_s7$trainset
out_train_s7 <- lmer(m1.formula,data = train_s7,weights=normwt)
test_s7$pred.m1.cv <- predict(object=out_train_s7 ,newdata=test_s7,allow.new.levels=TRUE,re.form=NULL )
test_s7$iter<-"s7"
#s8
splits_s8 <- splitdf(m1.2012)
test_s8 <- splits_s8$testset
train_s8 <- splits_s8$trainset
out_train_s8 <- lmer(m1.formula,data = train_s8,weights=normwt)
test_s8$pred.m1.cv <- predict(object=out_train_s8 ,newdata=test_s8,allow.new.levels=TRUE,re.form=NULL )
test_s8$iter<-"s8"
#s9
splits_s9 <- splitdf(m1.2012)
test_s9 <- splits_s9$testset
train_s9 <- splits_s9$trainset
out_train_s9 <- lmer(m1.formula,data = train_s9,weights=normwt)
test_s9$pred.m1.cv <- predict(object=out_train_s9 ,newdata=test_s9,allow.new.levels=TRUE,re.form=NULL )
test_s9$iter<-"s9"
#s10
splits_s10 <- splitdf(m1.2012)
test_s10 <- splits_s10$testset
train_s10 <- splits_s10$trainset
out_train_s10 <- lmer(m1.formula,data = train_s10,weights=normwt)
test_s10$pred.m1.cv <- predict(object=out_train_s10 ,newdata=test_s10,allow.new.levels=TRUE,re.form=NULL )
test_s10$iter<-"s10"
#BIND 1 dataset
m1.2012.cv<- data.table(rbind(test_s1,test_s2,test_s3,test_s4,test_s5,test_s6,test_s7,test_s8,test_s9, test_s10))
# cleanup (remove from WS) objects from CV
rm(list = ls(pattern = "train_|test_"))
#table updates
m1.fit.2012.cv<-lm(PM10~pred.m1.cv,data=m1.2012.cv)
res[res$year=="2012", 'm1cv.R2'] <- print(summary(lm(PM10~pred.m1.cv,data=m1.2012.cv))$r.squared)
res[res$year=="2012", 'm1cv.I'] <-print(summary(lm(PM10~pred.m1.cv,data=m1.2012.cv))$coef[1,1])
res[res$year=="2012", 'm1cv.I.se'] <-print(summary(lm(PM10~pred.m1.cv,data=m1.2012.cv))$coef[1,2])
res[res$year=="2012", 'm1cv.S'] <-print(summary(lm(PM10~pred.m1.cv,data=m1.2012.cv))$coef[2,1])
res[res$year=="2012", 'm1cv.S.se'] <-print(summary(lm(PM10~pred.m1.cv,data=m1.2012.cv))$coef[2,2])
#RMSPE
res[res$year=="2012", 'm1cv.PE'] <- print(rmse(residuals(m1.fit.2012.cv)))
#spatial
spatial2012.cv<-m1.2012.cv %>%
group_by(stn) %>%
summarise(barpm = mean(PM10, na.rm=TRUE), barpred = mean(pred.m1, na.rm=TRUE))
m1.fit.2012.cv.s <- lm(barpm ~ barpred, data=spatial2012.cv)
res[res$year=="2012", 'm1cv.R2.s'] <- print(summary(lm(barpm ~ barpred, data=spatial2012.cv))$r.squared)
res[res$year=="2012", 'm1cv.PE.s'] <- print(rmse(residuals(m1.fit.2012.cv.s)))
#temporal
tempo2012.cv<-left_join(m1.2012.cv,spatial2012.cv)
tempo2012.cv$delpm <-tempo2012.cv$PM10-tempo2012.cv$barpm
tempo2012.cv$delpred <-tempo2012.cv$pred.m1.cv-tempo2012.cv$barpred
mod_temporal.cv <- lm(delpm ~ delpred, data=tempo2012.cv)
res[res$year=="2012", 'm1cv.R2.t'] <- print(summary(lm(delpm ~ delpred, data=tempo2012.cv))$r.squared)
#-------->>> loc stage
luf<-fread("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN004_LU_full_dataset/local.csv")
setnames(luf,"tden","loc.tden")
setnames(luf,"elev50","loc.elev")
#add 50m LU to CV data
setkey(m1.2012.cv,stn)
setkey(luf,stn)
m1.2012.cv.loc <- merge(m1.2012.cv, luf, all.x = T)
#m1.2012.cv.loc<-na.omit(m1.2012.cv.loc)
#create residual mp3 variable
m1.2012.cv.loc$res.m1<-m1.2012.cv.loc$PM10-m1.2012.cv.loc$pred.m1.cv
#The GAM model
gam.out<-gam(res.m1~s(loc.tden)+s(tden,MeanPbl)+s(loc.tden,WSa)+s(loc_p_os,fx=FALSE,k=4,bs='cr')+s(loc.elev,fx=FALSE,k=4,bs='cr')+s(dA1,fx=FALSE,k=4,bs='cr')+s(dsea,fx=FALSE,k=4,bs='cr'),data=m1.2012.cv.loc)
#plot(bp.model.ps)
#summary(bp.model.ps)
## reg
m1.2012.cv.loc$pred.m1.loc <-predict(gam.out)
m1.2012.cv.loc$pred.m1.both <- m1.2012.cv.loc$pred.m1.cv + m1.2012.cv.loc$pred.m1.loc
res[res$year=="2012", 'm1cv.loc.R2'] <- print(summary(lm(PM10~pred.m1.both,data=m1.2012.cv.loc))$r.squared)
res[res$year=="2012", 'm1cv.loc.I'] <-print(summary(lm(PM10~pred.m1.both,data=m1.2012.cv.loc))$coef[1,1])
res[res$year=="2012", 'm1cv.loc.I.se'] <-print(summary(lm(PM10~pred.m1.both,data=m1.2012.cv.loc))$coef[1,2])
res[res$year=="2012", 'm1cv.loc.S'] <-print(summary(lm(PM10~pred.m1.both,data=m1.2012.cv.loc))$coef[2,1])
res[res$year=="2012", 'm1cv.loc.S.se'] <-print(summary(lm(PM10~pred.m1.both,data=m1.2012.cv.loc))$coef[2,2])
#RMSPE
res[res$year=="2012", 'm1cv.loc.PE'] <- print(rmse(residuals(m1.fit.2012.cv)))
#spatial
spatial2012.cv.loc<-m1.2012.cv.loc %>%
group_by(stn) %>%
summarise(barpm = mean(PM10, na.rm=TRUE), barpred = mean(pred.m1, na.rm=TRUE))
m1.fit.2012.cv.loc.s <- lm(barpm ~ barpred, data=spatial2012.cv.loc)
res[res$year=="2012", 'm1cv.loc.R2.s'] <- print(summary(lm(barpm ~ barpred, data=spatial2012.cv.loc))$r.squared)
res[res$year=="2012", 'm1cv.loc.PE.s'] <- print(rmse(residuals(m1.fit.2012.cv.loc.s)))
#temporal
tempo2012.loc.cv<-left_join(m1.2012.cv.loc,spatial2012.cv.loc)
tempo2012.loc.cv$delpm <-tempo2012.loc.cv$PM10-tempo2012.loc.cv$barpm
tempo2012.loc.cv$delpred <-tempo2012.loc.cv$pred.m1.both-tempo2012.loc.cv$barpred
mod_temporal.loc.cv <- lm(delpm ~ delpred, data=tempo2012.loc.cv)
res[res$year=="2012", 'm1cv.loc.R2.t'] <- print(summary(lm(delpm ~ delpred, data=tempo2012.loc.cv))$r.squared)
#############save midpoint
saveRDS(res, "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/res.AQ.2012.rds")
saveRDS(res, "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/resALL.AQ.rds")
saveRDS(m1.2012.cv.loc,"/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod1.AQ.2012.predCV.rds")
###############
#MOD2
###############
m2.2012<-readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod2.AQ.2012.rds")
m2.2012[,elev.s:= scale(elev)]
m2.2012[,tden.s:= scale(tden)]
m2.2012[,pden.s:= scale(pden)]
m2.2012[,dist2A1.s:= scale(dist2A1)]
m2.2012[,dist2water.s:= scale(dist2water)]
m2.2012[,dist2rail.s:= scale(dist2rail)]
m2.2012[,Dist2road.s:= scale(Dist2road)]
m2.2012[,ndvi.s:= scale(ndvi)]
m2.2012[,MeanPbl.s:= scale(MeanPbl)]
m2.2012[,p_ind.s:= scale(p_ind)]
m2.2012[,p_for.s:= scale(p_for)]
m2.2012[,p_farm.s:= scale(p_farm)]
m2.2012[,p_dos.s:= scale(p_dos)]
m2.2012[,p_dev.s:= scale(p_dev)]
m2.2012[,p_os.s:= scale(p_os)]
m2.2012[,tempa.s:= scale(tempa)]
m2.2012[,WDa.s:= scale(WDa)]
m2.2012[,WSa.s:= scale(WSa)]
m2.2012[,RHa.s:= scale(RHa)]
m2.2012[,Raina.s:= scale(Raina)]
m2.2012[,NO2a.s:= scale(NO2a)]
#generate predictions
m2.2012[, pred.m2 := predict(object=m1.fit.2012,newdata=m2.2012,allow.new.levels=TRUE,re.form=NULL)]
describe(m2.2012$pred.m2)
#delete implossible valuesOA[24~
m2.2012 <- m2.2012[pred.m2 > 0.00000000000001 , ]
m2.2012 <- m2.2012[pred.m2 < 1500 , ]
saveRDS(m2.2012,"/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod2.AQ.2012.pred2.rds")
#-------------->prepare for mod3
m2.2012[, bimon := (m + 1) %/% 2]
setkey(m2.2012,day, aodid)
m2.2012<-m2.2012[!is.na(meanPM10)]
#run the lmer part regressing stage 2 pred Vs mean pm
#in israel check per month, also check 30km band and other methods for meanpm
m2.smooth = lme(pred.m2 ~ meanPM10,random = list(aodid= ~1 + meanPM10),control=lmeControl(opt = "optim"), data= m2.2012 )
#xm2.smooth = lmer(pred.m2 ~ meanPM10+(1+ meanPM10|aodid), data= m2.2012 )
#correlate to see everything from mod2 and the mpm works
m2.2012[, pred.t31 := predict(m2.smooth)]
m2.2012[, resid := residuals(m2.smooth)]
res[res$year=="2012", 'm3.t31'] <- print(summary(lm(pred.m2~pred.t31,data=m2.2012))$r.squared)
#split the files to the separate bi monthly datsets
T2012_bimon1 <- subset(m2.2012 ,m2.2012$bimon == "1")
T2012_bimon2 <- subset(m2.2012 ,m2.2012$bimon == "2")
T2012_bimon3 <- subset(m2.2012 ,m2.2012$bimon == "3")
T2012_bimon4 <- subset(m2.2012 ,m2.2012$bimon == "4")
T2012_bimon5 <- subset(m2.2012 ,m2.2012$bimon == "5")
T2012_bimon6 <- subset(m2.2012 ,m2.2012$bimon == "6")
#run the separate splines (smooth) for x and y for each bimon
#whats the default band (distance) that the spline goes out and uses
fit2_1 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2012_bimon1 )
fit2_2 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2012_bimon2 )
fit2_3 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2012_bimon3 )
fit2_4 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2012_bimon4 )
fit2_5 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2012_bimon5 )
fit2_6 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2012_bimon6 )
#get the predicted-fitted
Xpred_1 <- (T2012_bimon1$pred.t31 - fit2_1$fitted)
Xpred_2 <- (T2012_bimon2$pred.t31 - fit2_2$fitted)
Xpred_3 <- (T2012_bimon3$pred.t31 - fit2_3$fitted)
Xpred_4 <- (T2012_bimon4$pred.t31 - fit2_4$fitted)
Xpred_5 <- (T2012_bimon5$pred.t31 - fit2_5$fitted)
Xpred_6 <- (T2012_bimon6$pred.t31 - fit2_6$fitted)
#remerge to 1 file
m2.2012$pred.t32 <- c( Xpred_1,Xpred_2, Xpred_3, Xpred_4, Xpred_5, Xpred_6)
#this is important so that its sorted as in the first gamm
setkey(m2.2012,day, aodid)
#rerun the lme on the predictions including the spatial spline (smooth)
Final_pred_2012 <- lme(pred.t32 ~ meanPM10 ,random = list(aodid= ~1 + meanPM10 ),control=lmeControl(opt = "optim"),data= m2.2012 )
m2.2012[, pred.t33 := predict(Final_pred_2012)]
#check correlations
res[res$year=="2012", 'm3.t33'] <- print(summary(lm(pred.m2 ~ pred.t33,data=m2.2012))$r.squared)
#------------------------>>>
#import mod3
data.m3 <- readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod3.AQ.2012.rds")
#for PM10
data.m3 <- data.m3[,c(1,2,5,29:32,52,53),with=FALSE]
data.m3[, bimon := (m + 1) %/% 2]
setkey(data.m3,day, aodid)
data.m3<-data.m3[!is.na(meanPM10)]
#generate m.3 initial pred
data.m3$pred.m3.mix <- predict(Final_pred_2012,data.m3)
#create unique grid
ugrid <-data.m3 %>%
group_by(aodid) %>%
summarise(lat_aod = mean(lat_aod, na.rm=TRUE), long_aod = mean(long_aod, na.rm=TRUE),x_aod_ITM = mean(x_aod_ITM, na.rm=TRUE), y_aod_ITM = mean(y_aod_ITM, na.rm=TRUE))
#### PREDICT Gam part
#split back into bimons to include the gam prediction in final prediction
data.m3_bimon1 <- data.m3[bimon == 1, ]
data.m3_bimon2 <- data.m3[bimon == 2, ]
data.m3_bimon3 <- data.m3[bimon == 3, ]
data.m3_bimon4 <- data.m3[bimon == 4, ]
data.m3_bimon5 <- data.m3[bimon == 5, ]
data.m3_bimon6 <- data.m3[bimon == 6, ]
#addin unique grid to each bimon
uniq_gid_bimon1 <- ugrid
uniq_gid_bimon2 <- ugrid
uniq_gid_bimon3 <- ugrid
uniq_gid_bimon4 <- ugrid
uniq_gid_bimon5 <- ugrid
uniq_gid_bimon6 <- ugrid
#get predictions for Bimon residuals
uniq_gid_bimon1$gpred <- predict.gam(fit2_1,uniq_gid_bimon1)
uniq_gid_bimon2$gpred <- predict.gam(fit2_2,uniq_gid_bimon2)
uniq_gid_bimon3$gpred <- predict.gam(fit2_3,uniq_gid_bimon3)
uniq_gid_bimon4$gpred <- predict.gam(fit2_4,uniq_gid_bimon4)
uniq_gid_bimon5$gpred <- predict.gam(fit2_5,uniq_gid_bimon5)
uniq_gid_bimon6$gpred <- predict.gam(fit2_6,uniq_gid_bimon6)
#merge things back togheter
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> merges
setkey(uniq_gid_bimon1,aodid)
setkey(data.m3_bimon1,aodid)
data.m3_bimon1 <- merge(data.m3_bimon1, uniq_gid_bimon1[,list(aodid,gpred)], all.x = T)
setkey(uniq_gid_bimon2,aodid)
setkey(data.m3_bimon2,aodid)
data.m3_bimon2 <- merge(data.m3_bimon2, uniq_gid_bimon2[,list(aodid,gpred)], all.x = T)
setkey(uniq_gid_bimon3,aodid)
setkey(data.m3_bimon3,aodid)
data.m3_bimon3 <- merge(data.m3_bimon3, uniq_gid_bimon3[,list(aodid,gpred)], all.x = T)
setkey(uniq_gid_bimon4,aodid)
setkey(data.m3_bimon4,aodid)
data.m3_bimon4 <- merge(data.m3_bimon4, uniq_gid_bimon4[,list(aodid,gpred)], all.x = T)
setkey(uniq_gid_bimon5,aodid)
setkey(data.m3_bimon5,aodid)
data.m3_bimon5 <- merge(data.m3_bimon5, uniq_gid_bimon5[,list(aodid,gpred)], all.x = T)
setkey(uniq_gid_bimon6,aodid)
setkey(data.m3_bimon6,aodid)
data.m3_bimon6 <- merge(data.m3_bimon6, uniq_gid_bimon6[,list(aodid,gpred)], all.x = T)
#reattach all parts
mod3 <- rbind(data.m3_bimon1,data.m3_bimon2,data.m3_bimon3,data.m3_bimon4,data.m3_bimon5,data.m3_bimon6)
# create pred.m3
mod3$pred.m3 <-mod3$pred.m3.mix+mod3$gpred
#hist(mod3$pred.m3)
#describe(mod3$pred.m3)
#recode negative into zero
mod3 <- mod3[pred.m3 >= 0]
saveRDS(mod3,"/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod3.AQ.2012.pred.rds")
#clean
keep(mod3,res,rmse, sure=TRUE)
gc()
#########################
#prepare for m3.R2
#########################
#load mod1
mod1<- readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod1.AQ.2012.pred.rds")
mod1[,aodid:= paste(mod1$long_aod,mod1$lat_aod,sep="-")]
mod1<-mod1[,c("aodid","day","PM10","pred.m1","stn"),with=FALSE]
#R2.m3
setkey(mod3,day,aodid)
setkey(mod1,day,aodid)
m1.2012 <- merge(mod1,mod3[, list(day,aodid,pred.m3)], all.x = T)
m3.fit.2012<- summary(lm(PM10~pred.m3,data=m1.2012))
res[res$year=="2012", 'm3.R2'] <- print(summary(lm(PM10~pred.m3,data=m1.2012))$r.squared)
#RMSPE
res[res$year=="2012", 'm3.PE'] <- print(rmse(residuals(m3.fit.2012)))
#spatial
###to check
spatial2012<-m1.2012 %>%
group_by(stn) %>%
summarise(barpm = mean(PM10, na.rm=TRUE), barpred = mean(pred.m3, na.rm=TRUE))
m1.fit.2012.spat<- lm(barpm ~ barpred, data=spatial2012)
res[res$year=="2012", 'm3.R2.s'] <- print(summary(lm(barpm ~ barpred, data=spatial2012))$r.squared)
res[res$year=="2012", 'm3.PE.s'] <- print(rmse(residuals(m1.fit.2012.spat)))
#temporal
tempo2012<-left_join(m1.2012,spatial2012)
tempo2012$delpm <-tempo2012$PM10-tempo2012$barpm
tempo2012$delpred <-tempo2012$pred.m3-tempo2012$barpred
mod_temporal <- lm(delpm ~ delpred, data=tempo2012)
res[res$year=="2012", 'm3.R2.t'] <- print(summary(lm(delpm ~ delpred, data=tempo2012))$r.squared)
#############save midpoint
saveRDS(res, "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/res.AQ.2012.rds")
saveRDS(res, "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/resALL.AQ.rds")
#########################
#import mod2
mod2<- readRDS( "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod2.AQ.2012.pred2.rds")
mod2<-mod2[,c("aodid","day","pred.m2"),with=FALSE]
#----------------> store the best available
mod3best <- mod3[, list(aodid, x_aod_ITM, y_aod_ITM, day, pred.m3)]
setkey(mod3best, day, aodid)
setkey(mod2, day, aodid)
mod3best <- merge(mod3best, mod2[,list(aodid, day, pred.m2)], all.x = T)
#reload mod1
mod1<- readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod1.AQ.2012.pred.rds")
mod1$aodid<-paste(mod1$long_aod,mod1$lat_aod,sep="-")
mod1<-mod1[,c("aodid","day","PM10","pred.m1"),with=FALSE]
setkey(mod1,day,aodid)
mod3best <- merge(mod3best, mod1, all.x = T)
mod3best[,bestpred := pred.m3]
mod3best[!is.na(pred.m2),bestpred := pred.m2]
mod3best[!is.na(pred.m1),bestpred := pred.m1]
#save
saveRDS(mod3best,"/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod3.AQ.2012.FINAL.rds")
#save for GIS
write.csv(mod3best[, list(LTPM = mean(bestpred, na.rm = T),
predvariance = var(bestpred, na.rm = T),
predmin = min(bestpred, na.rm = T),
predmax = max(bestpred, na.rm = T),
npred = sum(!is.na(bestpred)),
npred.m1 = sum(!is.na(pred.m1)),
npred.m2 = sum(!is.na(pred.m2)),
npred.m3 = sum(!is.na(pred.m3)),
x_aod_ITM = x_aod_ITM[1], y_aod_ITM = y_aod_ITM[1]),by=aodid], "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/LTPM3.AQ.2012.csv", row.names = F)
keep(res, sure=TRUE)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/norm_PQN.R
\name{pqn}
\alias{pqn}
\alias{pqn,BAf-method}
\title{Probabilistic Quotient Normaliation}
\usage{
pqn(X, preNorm = FALSE, ...)
\S4method{pqn}{BAf}(X, preNorm = FALSE, by_s = NULL, by_b = NULL, ...)
}
\arguments{
\item{X}{a \code{\link{matrix}} or a \code{\link{BAf-class}} object}
\item{preNorm}{if the integral normalization should be applied prior to the
PQN. Please refer to the Dieterle \emph{et al.}'s paper.}
\item{...}{not used}
\item{by_s, by_b}{same as \code{by_s} and \code{by_s} in
\code{\link{apply_per_group}}. If any of these are given the \code{pqn} is
applied per group divided by these variables.}
}
\value{
\code{\link{matrix}} (or \code{\link{BAf-class}} object) after the
normalization
}
\description{
Normalize the data applying Probabilistic Quotient method introduced in
Dieterle et al.(2006).
}
\examples{
data(sba)
sba2 <- pqn(sba[sba@sinfo$cohort != "EMPTY", ])
plot_QC_sample_signal_boxplot(sba2)
}
\references{
Dieterle et al. (2006) Probabilistic quotient normalization.. - Anal.Chem
}
\seealso{
\code{\link{apply_per_group}}
\code{\link{normn_MA}}
}
\author{
Mun-Gwan Hong <\email{mun-gwan.hong@scilifelab.se}>
}
| /man/pqn.Rd | no_license | Schwenk-Lab/BAf-R_package | R | false | true | 1,239 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/norm_PQN.R
\name{pqn}
\alias{pqn}
\alias{pqn,BAf-method}
\title{Probabilistic Quotient Normaliation}
\usage{
pqn(X, preNorm = FALSE, ...)
\S4method{pqn}{BAf}(X, preNorm = FALSE, by_s = NULL, by_b = NULL, ...)
}
\arguments{
\item{X}{a \code{\link{matrix}} or a \code{\link{BAf-class}} object}
\item{preNorm}{if the integral normalization should be applied prior to the
PQN. Please refer to the Dieterle \emph{et al.}'s paper.}
\item{...}{not used}
\item{by_s, by_b}{same as \code{by_s} and \code{by_s} in
\code{\link{apply_per_group}}. If any of these are given the \code{pqn} is
applied per group divided by these variables.}
}
\value{
\code{\link{matrix}} (or \code{\link{BAf-class}} object) after the
normalization
}
\description{
Normalize the data applying Probabilistic Quotient method introduced in
Dieterle et al.(2006).
}
\examples{
data(sba)
sba2 <- pqn(sba[sba@sinfo$cohort != "EMPTY", ])
plot_QC_sample_signal_boxplot(sba2)
}
\references{
Dieterle et al. (2006) Probabilistic quotient normalization.. - Anal.Chem
}
\seealso{
\code{\link{apply_per_group}}
\code{\link{normn_MA}}
}
\author{
Mun-Gwan Hong <\email{mun-gwan.hong@scilifelab.se}>
}
|
#' Fetch data from Yahoo Finance
#'
#' @param identifiers a vector of Yahoo Finance tickers
#' @param fields can be any of "open", "high", "low", "close", "volume", or "adjclose"
#' @param from a Date object or string in YYYY-MM-DD format. If supplied, only data on or after this date will be returned
#' @param to a Date object or string in YYYY-MM-DD format. If supplied, only data on or before this date will be returned
#' @return a xts object
#' @export
#' @examples
#' tryCatch({
#' pdfetch_YAHOO(c("^gspc","^ixic"))
#' pdfetch_YAHOO(c("^gspc","^ixic"), "adjclose")
#' },
#' error = function(e) {},
#' warning = function(w) {}
#' )
pdfetch_YAHOO <- function(identifiers,
fields=c("open","high","low","close","volume","adjclose"),
from=as.Date("2007-01-01"),
to=Sys.Date()) {
valid.fields <- c("open","high","low","close","volume","adjclose")
if (!missing(from))
from <- as.Date(from)
if (!missing(to))
to <- as.Date(to)
if (missing(fields))
fields <- valid.fields
if (length(setdiff(fields,valid.fields)) > 0)
stop(paste0("Invalid fields, must be one of ", valid.fields))
results <- list()
for (i in 1:length(identifiers)) {
url <- paste0("http://chart.yahoo.com/table.csv?s=",identifiers[i],
"&c=", year(from),
"&a=", month(from)-1,
"&b=", day(from),
"&f=", year(to),
"&d=", month(to)-1,
"&e=", day(to))
req <- GET(url)
fr <- content(req)
x <- xts(fr[,match(fields, valid.fields)+1], as.Date(fr[, 1]))
dim(x) <- c(nrow(x),ncol(x))
if (length(fields)==1)
colnames(x) <- identifiers[i]
else
colnames(x) <- paste(identifiers[i], fields, sep=".")
results[[identifiers[i]]] <- x
}
storenames <- sapply(results, names)
results <- do.call(merge.xts, results)
colnames(results) <- storenames
results
}
#' Fetch data from St Louis Fed's FRED database
#'
#' @param identifiers a vector of FRED series IDs
#' @return a xts object
#' @export
#' @examples
#' tryCatch(pdfetch_FRED(c("GDPC1", "PCECC96")),
#' error = function(e) {},
#' warning = function(w) {}
#' )
pdfetch_FRED <- function(identifiers) {
results <- list()
for (i in 1:length(identifiers)) {
url <- paste0("https://research.stlouisfed.org/fred2/series/",identifiers[i],"/downloaddata/",identifiers[i],".txt")
req <- GET(url)
fileLines <- readLines(textConnection(content(req)))
freq <- sub(",", "", strsplit(fileLines[6], " +")[[1]][2])
skip <- grep("DATE", fileLines)[1]
fr <- utils::read.fwf(textConnection(content(req)), skip=skip, widths=c(10,20), na.strings=".", colClasses=c("character","numeric"))
dates <- as.Date(fr[,1], origin="1970-01-01")
if (freq == "Annual")
dates <- year_end(dates)
else if (freq == "Semiannual")
dates <- halfyear_end(dates)
else if (freq == "Quarterly")
dates <- quarter_end(dates)
else if (freq == "Monthly")
dates <- month_end(dates)
ix <- !is.na(dates)
x <- xts(as.matrix(fr[ix,2]), dates[ix])
dim(x) <- c(nrow(x),1)
colnames(x) <- identifiers[i]
results[[identifiers[i]]] <- x
}
do.call(merge.xts, results)
}
#' Fetch data from European Central Bank's statistical data warehouse
#'
#' @param identifiers a vector of ECB series IDs
#' @return a xts object
#' @export
#' @examples
#' tryCatch(pdfetch_ECB("FM.B.U2.EUR.4F.KR.DFR.CHG"),
#' error = function(e) {},
#' warning = function(w) {}
#' )
pdfetch_ECB <- function(identifiers) {
results <- list()
for (i in 1:length(identifiers)) {
req <- GET(paste0("http://sdw.ecb.europa.eu/quickviewexport.do?SERIES_KEY=",identifiers[i],"&type=csv"))
tmp <- content(req, as="text")
fr <- utils::read.csv(textConnection(tmp), header=F, stringsAsFactors=F)[-c(1:5),]
if (inherits(fr, "character"))
stop(paste0("Series ", identifiers[i], " not found"))
freq <- strsplit(identifiers[i], "\\.")[[1]][2]
if (freq == "A") {
dates <- as.Date(ISOdate(as.numeric(fr[,1]), 12, 31))
} else if (freq == "H") {
year <- as.numeric(substr(fr[,1], 1, 4))
month <- as.numeric(substr(fr[,1], 6, 6))*6
dates <- month_end(as.Date(ISOdate(year, month, 1)))
} else if (freq == "Q") {
dates <- quarter_end(as.Date(as.yearqtr(fr[,1])))
} else if (freq == "M") {
dates <- month_end(as.Date(as.yearmon(fr[,1], "%Y%b")))
} else if (freq == "B" || freq == "D") {
dates <- as.Date(fr[,1])
} else {
stop("Unsupported frequency")
}
x <- xts(as.matrix(suppressWarnings(as.numeric(fr[,2]))), dates, origin="1970-01-01")
dim(x) <- c(nrow(x),1)
colnames(x) <- identifiers[i]
results[[identifiers[i]]] <- x
}
do.call(merge.xts, results)
}
# Download Eurostat DSD file
pdfetch_EUROSTAT_GETDSD <- function(flowRef) {
url <- paste0("http://ec.europa.eu/eurostat/SDMX/diss-web/rest/datastructure/ESTAT/DSD_", flowRef)
req <- GET(url, add_headers(useragent="RCurl"))
doc <- xmlInternalTreeParse(content(req, as="text"))
doc
}
#' Fetch description for a Eurostat dataset
#' @param flowRef Eurostat dataset code
#' @export
#' @examples
#' tryCatch(pdfetch_EUROSTAT_DSD("namq_gdp_c"),
#' error = function(e) {},
#' warning = function(w) {}
#' )
pdfetch_EUROSTAT_DSD <- function(flowRef) {
doc <- pdfetch_EUROSTAT_GETDSD(flowRef)
concepts <- setdiff(unlist(getNodeSet(doc, "//str:Dimension/@id")), c("OBS_VALUE","OBS_STATUS","OBS_FLAG"))
for (concept in concepts) {
codelist_id <- unclass(getNodeSet(doc, paste0("//str:Dimension[@id='",concept,"']//str:Enumeration/Ref/@id"))[[1]])
codes <- unlist(getNodeSet(doc, paste0("//str:Codelist[@id='",codelist_id,"']/str:Code/@id")))
descriptions <- unlist(getNodeSet(doc, paste0("//str:Codelist[@id='",codelist_id,"']/str:Code/com:Name/text()")))
max.code.length <- max(sapply(codes, nchar))
print("")
print(paste(rep("=", 50), collapse=""))
print(concept)
print(paste(rep("=", 50), collapse=""))
for (j in 1:length(codes)) {
print(sprintf(paste0("%-",max.code.length+5,"s %s"), codes[j], xmlValue(descriptions[[j]])))
}
}
}
#' Fetch data from Eurostat
#'
#' Eurostat stores its statistics in data cubes, which can be browsed at
#' \url{http://epp.eurostat.ec.europa.eu/portal/page/portal/statistics/search_database}. To access data, specify the name of a data cube and optionally filter it based on its dimensions.
#'
#' @param flowRef Eurostat dataset code
#' @param from a Date object or string in YYYY-MM-DD format. If supplied, only data on or after this date will be returned
#' @param to a Date object or string in YYYY-MM-DD format. If supplied, only data on or before this date will be returned
#' @param ... optional dimension filters for the dataset
#' @return a xts object
#' @export
#' @examples
#' tryCatch(pdfetch_EUROSTAT("namq_gdp_c", FREQ="Q", S_ADJ="SWDA", UNIT="MIO_EUR",
#' INDIC_NA="B1GM", GEO=c("DE","UK")),
#' error = function(e) {},
#' warning = function(w) {}
#' )
pdfetch_EUROSTAT <- function(flowRef, from, to, ...) {
arguments <- list(...)
doc <- pdfetch_EUROSTAT_GETDSD(flowRef)
concepts <- setdiff(unlist(getNodeSet(doc, "//str:Dimension/@id")), c("OBS_VALUE","OBS_STATUS","OBS_FLAG"))
key <- paste(sapply(concepts, function(concept) {
if (concept %in% names(arguments)) {
paste(arguments[[concept]], collapse="+")
} else {
""
}
}), collapse=".")
if (!missing(from))
from <- as.Date(from)
if (!missing(to))
to <- as.Date(to)
if (!missing(from) && !missing(to))
url <- paste0("http://ec.europa.eu/eurostat/SDMX/diss-web/rest/data/",flowRef,"/",key,"/?startPeriod=",from,"&endPeriod=",to)
else if (!missing(from))
url <- paste0("http://ec.europa.eu/eurostat/SDMX/diss-web/rest/data/",flowRef,"/",key,"/?startPeriod=",from)
else if (!missing(to))
url <- paste0("http://ec.europa.eu/eurostat/SDMX/diss-web/rest/data/",flowRef,"/",key,"/?endPeriod=",to)
else
url <- paste0("http://ec.europa.eu/eurostat/SDMX/diss-web/rest/data/",flowRef,"/",key)
req <- GET(url, add_headers(useragent="RCurl"))
doc <- xmlInternalTreeParse(content(req, as="text"))
results <- list()
seriesSet <- getNodeSet(doc, "//generic:Series")
if (length(seriesSet) == 0) {
warning("No series found")
return(NULL)
}
for (i in 1:length(seriesSet)) {
series <- seriesSet[[i]]
idvalues <- list()
for (node in getNodeSet(series, "generic:SeriesKey/generic:Value", "generic"))
idvalues[[xmlGetAttr(node, "id")]] <- xmlGetAttr(node, "value")
id <- paste(sapply(concepts, function(concept) idvalues[[concept]]), collapse=".")
freq <- xmlGetAttr(getNodeSet(series, "generic:SeriesKey/generic:Value[@id='FREQ']", "generic")[[1]], "value")
if (freq == "A") {
dates <- as.Date(ISOdate(as.numeric(unlist(getNodeSet(series, ".//generic:ObsDimension/@value", "generic"))),12,31))
} else if (freq == "Q") {
dates <- as.Date(as.yearqtr(
sapply(
unlist(getNodeSet(series, ".//generic:ObsDimension/@value", "generic")),
function(x) paste(substr(x, 1, 4), substr(x, 7, 8), sep="-")
)
))
dates <- quarter_end(dates)
} else if (freq == "M") {
dates <- as.Date(as.yearmon(unlist(getNodeSet(series, ".//generic:ObsDimension/@value", "generic")), format="%Y-%m"))
dates <- month_end(dates)
} else if (freq == "D") {
dates <- as.Date(unlist(getNodeSet(series, ".//generic:ObsDimension/@value", "generic")))
} else {
print(unlist(getNodeSet(series, ".//generic:ObsDimension/@value", "generic")))
stop("Unsupported frequency")
}
values <- as.numeric(getNodeSet(series, ".//generic:ObsValue/@value", "generic"))
x <- xts(values, dates)
dim(x) <- c(nrow(x),1)
colnames(x) <- id
results[[i]] <- x
}
na.trim(do.call(merge.xts, results), is.na="all")
}
#' Fetch data from World Bank
#'
#' @param indicators a vector of World Bank indicators
#' @param countries a vector of countrie identifiers, which can be 2- or
#' 3-character ISO codes. The special option "all" retrieves all countries.
#' @return a xts object
#' @export
#' @examples
#' tryCatch(pdfetch_WB("NY.GDP.MKTP.CD", c("BR","MX")),
#' error = function(e) {},
#' warning = function(w) {}
#' )
pdfetch_WB <- function(indicators, countries="all") {
countries <- paste(countries, collapse=";")
indicators <- paste(indicators, collapse=";")
query <- paste0("http://api.worldbank.org/countries/",countries,"/indicators/",indicators,"?format=json&per_page=1000")
req <- GET(query)
x <- fromJSON(content(req, as="text"))[[2]]
if (!inherits(x, "data.frame")) {
warning("No series found")
return(NULL)
}
results <- data.frame(indicator=paste(x$indicator$id, x$country$id, sep="."),
value=as.numeric(x$value),
date=as.Date(ISOdate(as.numeric(x$date), 12, 31))) # This dating won't always work, need to detect frequency
results <- dcast(results, date ~ indicator)
results <- na.trim(xts(subset(results, select=-date), results$date), is.na="all")
results
}
#' Fetch data from the Bank of England Interactive Statistical Database
#'
#' @param identifiers a vector of BoE series codes
#' @param from start date
#' @param to end date; if not given, today's date will be used
#' @return a xts object
#' @export
#' @examples
#' tryCatch(pdfetch_BOE(c("LPMVWYR", "LPMVWYR"), "2012-01-01"),
#' error = function(e) {},
#' warning = function(w) {}
#' )
pdfetch_BOE <- function(identifiers, from, to=Sys.Date()) {
if (length(identifiers) > 300)
stop("At most 300 series can be downloaded at once")
from <- as.Date(from)
to <- as.Date(to)
url <- paste0("http://www.bankofengland.co.uk/boeapps/iadb/fromshowcolumns.asp?csv.x=yes",
"&SeriesCodes=",paste(identifiers, collapse=","),
"&CSVF=TN&VPD=Y&UsingCodes=Y",
"&Datefrom=", format(from, "%d/%b/%Y"),
"&Dateto=", format(to, "%d/%b/%Y"))
tmp <- tempfile()
utils::download.file(url, destfile=tmp, quiet=T)
fr <- utils::read.csv(tmp, header=T)
unlink(tmp)
dates <- as.Date(fr[,1], "%d %b %Y")
xts(fr[,-1], dates)
}
#' Fetch data from U.S. Bureau of Labor Statistics
#'
#' @param identifiers a vector of BLS time series IDs
#' @param from start year
#' @param to end year. Note that the request will fail if this is a future year
#' that is beyond the last available data point in the series.
#' @return a xts object
#' @export
#' @examples
#' tryCatch(pdfetch_BLS(c("EIUIR","EIUIR100"), 2005, 2010),
#' error = function(e) {},
#' warning = function(w) {}
#' )
pdfetch_BLS <- function(identifiers, from, to) {
if (!is.numeric(from) || !is.numeric(to))
stop("Both from and to must be integers")
if (to < from)
stop("to must be greater than or equal to from")
years <- seq(from, to, by=10)
if (years[length(years)] != to || length(years) == 1)
years <- c(years, to)
results <- list()
for (id in identifiers)
results[[id]] <- NA
for (i in 2:length(years)) {
from <- years[i-1]+1
to <- years[i]
if (i == 2)
from <- years[i-1]
req <- list(seriesid=identifiers, startyear=unbox(from), endyear=unbox(to))
resp <- POST("http://api.bls.gov/publicAPI/v1/timeseries/data/", body=req, encode="json")
resp <- fromJSON(content(resp, as="text"))
if (resp$status != "REQUEST_SUCCEEDED")
stop("Request failed")
series <- resp$Results$series
for (j in 1:length(identifiers)) {
seriesID <- series$seriesID[j]
if (length(series$data[[j]]) > 0)
results[[seriesID]] <- rbind(results[[seriesID]], series$data[[j]])
}
}
ix <- sapply(results, function(x) inherits(x, "data.frame"))
if (!all(ix))
warning(paste("No data found for series", identifiers[!ix], "in specified time range"))
if (all(!ix))
return(NULL)
results <- results[ix]
for (id in names(results)) {
dat <- subset(results[[id]], period != 'M13')
freq <- substr(dat$period[1], 1, 1)
periods <- as.numeric(substr(dat$period, 2, 3))
years <- as.numeric(dat$year)
if (freq == "M")
dates <- as.Date(ISOdate(years, periods, 1))
else if (freq == "Q")
dates <- as.Date(ISOdate(years, periods*3, 1))
else if (freq == "A")
dates <- as.Date(ISOdate(years, 12, 31))
else
stop(paste("Unrecognized frequency", freq))
dates <- month_end(dates)
results[[id]] <- xts(as.numeric(dat$value), dates)
colnames(results[[id]]) <- id
}
identifiers <- identifiers[identifiers %in% names(results)]
na.trim(do.call(merge.xts, results), is.na="all")[, identifiers]
}
#' Fetch data from the French National Institute of Statistics and Economic Studies (INSEE)
#'
#' @param identifiers a vector of INSEE series codes
#' @return a xts object
#' @export
#' @examples
#' tryCatch(pdfetch_INSEE(c("000810635")),
#' error = function(e) {},
#' warning = function(w) {}
#' )
pdfetch_INSEE <- function(identifiers) {
results <- list()
for (id in identifiers) {
url <- paste0("http://www.bdm.insee.fr/bdm2/affichageSeries.action?idbank=",id)
page <- tryCatch({
req <- GET(url, add_headers("Accept-Language"="en-US,en;q=0.8"))
content(req, as="text")
}, warning = function(w) {
})
if (!is.null(page)) {
doc <- htmlParse(page)
dat <- readHTMLTable(doc)[[1]]
if (names(dat)[2] == "Month") {
year <- as.numeric(as.character(dat[,1]))
month <- as.character(dat[,2])
dates <- as.Date(paste(year, month, 1), format="%Y %b %d")
} else if (names(dat[2]) == "Quarter") {
year <- as.numeric(as.character(dat[,1]))
month <- as.numeric(as.character(dat[,2]))*3
dates <- as.Date(ISOdate(year, month, 1))
} else if (ncol(dat) == 2) {
year <- as.numeric(as.character(dat[,1]))
dates <- as.Date(ISOdate(year, 12, 31))
} else {
stop("Unrecognized frequency")
}
values <- as.numeric(gsub("[^0-9]", "", dat[,ncol(dat)]))
dates <- month_end(dates)
x <- xts(values, dates)
colnames(x) <- id
results[[id]] <- x
} else {
warning(paste("Series", id, "not found"))
}
}
if (length(results) == 0)
return(NULL)
na.trim(do.call(merge.xts, results), is.na="all")
}
#' Fetch data from the UK Office of National Statistics
#' @param identifiers a vector of ONS series codes
#' @param dataset ONS dataset name
#' @return a xts object
#' @export
#' @examples
#' tryCatch(pdfetch_ONS(c("LF24","LF2G"), "lms"),
#' error = function(e) {},
#' warning = function(w) {}
#' )
pdfetch_ONS <- function(identifiers, dataset) {
identifiers <- toupper(identifiers)
dataset <- tolower(dataset)
results <- list()
for (id in identifiers) {
url <- paste0("http://www.ons.gov.uk/ons/datasets-and-tables/downloads/csv.csv?dataset=",
dataset,"&cdid=",id)
tmp <- tempfile()
retval <- tryCatch({
utils::download.file(url, destfile=tmp, quiet=T)
}, warning = function(w) {
warning(paste("Unable to download series",id,"from dataset",dataset))
unlink(tmp)
w
}, error = function(e) {
print(e)
unlink(tmp)
e
})
if (inherits(retval, "warning") || inherits(retval, "error"))
next
fr <- utils::read.csv(tmp, header=T, stringsAsFactors=F)
fr <- fr[2:(which(fr[,1]=='\xa9 Crown Copyright')-1),]
fr[,2] <- as.numeric(fr[,2])
unlink(tmp)
datesA <- grep("^[0-9]{4}$", fr[,1])
datesQ <- grep("^[0-9]{4} Q[1-4]$", fr[,1])
datesM <- grep("^[0-9]{4} [A-Z]{3}$", fr[,1])
dates <- NULL
if (length(datesM) > 0) {
dates <- as.Date(paste(fr[datesM,1],1), "%Y %b %d")
dateix <- datesM
} else if (length(datesQ) > 0) {
y <- as.numeric(substr(fr[datesQ,1], 1, 4))
m <- as.numeric(substr(fr[datesQ,1], 7, 7))*3
dates <- as.Date(ISOdate(y, m, 1))
dateix <- datesQ
} else if (length(datesA) > 0) {
dates <- as.Date(ISOdate(as.numeric(fr[datesA,1]), 12, 31))
dateix <- datesA
}
if (!is.null(dates)) {
dates <- month_end(dates)
x <- xts(fr[dateix,2], dates)
colnames(x) <- id
results[[id]] <- x
}
}
if (length(results) == 0)
return(NULL)
na.trim(do.call(merge.xts, results), is.na="all")
}
#' Fetch data from the US Energy Information Administration
#' @param identifiers a vector of EIA series codes
#' @param api_key EIA API key
#' @return a xts object
#' @export
#' @examples
#' tryCatch(pdfetch_EIA(c("ELEC.GEN.ALL-AK-99.A","ELEC.GEN.ALL-AK-99.Q"), EIA_KEY),
#' error = function(e) {},
#' warning = function(w) {}
#' )
pdfetch_EIA <- function(identifiers, api_key) {
results <- list()
for (i in 1:length(identifiers)) {
id <- identifiers[i]
url <- paste0("http://api.eia.gov/series/?series_id=",id,"&api_key=",api_key)
req <- GET(url)
res <- fromJSON(content(req, as="text"))
if (is.null(res$request)) {
warning(paste("Invalid series code",id))
next
}
freq <- res$series$f
dates <- unlist(lapply(res$series$data[[1]], function(x) x[1]))
data <- as.numeric(unlist(lapply(res$series$data[[1]], function(x) x[2])))
if (freq == "A") {
dates <- as.Date(ISOdate(as.numeric(dates), 12, 31))
} else if (freq == "Q") {
y <- as.numeric(substr(dates, 1, 4))
m <- 3*as.numeric(substr(dates, 6, 6))
dates <- month_end(as.Date(ISOdate(y,m,1)))
} else if (freq == "M") {
y <- as.numeric(substr(dates, 1, 4))
m <- as.numeric(substr(dates, 5, 6))
dates <- month_end(as.Date(ISOdate(y,m,1)))
} else if (freq == "W" || freq == "D") {
dates <- as.Date(dates, "%Y%m%d")
} else {
warning(paste("Unrecognized frequency",freq,"for series",id))
}
x <- xts(rev(data), rev(dates))
colnames(x) <- id
results[[i]] <- x
}
if (length(results) == 0)
return(NULL)
na.trim(do.call(merge.xts, results), is.na="all")
}
| /pdfetch/R/pdfetch.R | no_license | ingted/R-Examples | R | false | false | 20,641 | r | #' Fetch data from Yahoo Finance
#'
#' @param identifiers a vector of Yahoo Finance tickers
#' @param fields can be any of "open", "high", "low", "close", "volume", or "adjclose"
#' @param from a Date object or string in YYYY-MM-DD format. If supplied, only data on or after this date will be returned
#' @param to a Date object or string in YYYY-MM-DD format. If supplied, only data on or before this date will be returned
#' @return a xts object
#' @export
#' @examples
#' tryCatch({
#' pdfetch_YAHOO(c("^gspc","^ixic"))
#' pdfetch_YAHOO(c("^gspc","^ixic"), "adjclose")
#' },
#' error = function(e) {},
#' warning = function(w) {}
#' )
pdfetch_YAHOO <- function(identifiers,
fields=c("open","high","low","close","volume","adjclose"),
from=as.Date("2007-01-01"),
to=Sys.Date()) {
valid.fields <- c("open","high","low","close","volume","adjclose")
if (!missing(from))
from <- as.Date(from)
if (!missing(to))
to <- as.Date(to)
if (missing(fields))
fields <- valid.fields
if (length(setdiff(fields,valid.fields)) > 0)
stop(paste0("Invalid fields, must be one of ", valid.fields))
results <- list()
for (i in 1:length(identifiers)) {
url <- paste0("http://chart.yahoo.com/table.csv?s=",identifiers[i],
"&c=", year(from),
"&a=", month(from)-1,
"&b=", day(from),
"&f=", year(to),
"&d=", month(to)-1,
"&e=", day(to))
req <- GET(url)
fr <- content(req)
x <- xts(fr[,match(fields, valid.fields)+1], as.Date(fr[, 1]))
dim(x) <- c(nrow(x),ncol(x))
if (length(fields)==1)
colnames(x) <- identifiers[i]
else
colnames(x) <- paste(identifiers[i], fields, sep=".")
results[[identifiers[i]]] <- x
}
storenames <- sapply(results, names)
results <- do.call(merge.xts, results)
colnames(results) <- storenames
results
}
#' Fetch data from St Louis Fed's FRED database
#'
#' @param identifiers a vector of FRED series IDs
#' @return a xts object
#' @export
#' @examples
#' tryCatch(pdfetch_FRED(c("GDPC1", "PCECC96")),
#' error = function(e) {},
#' warning = function(w) {}
#' )
pdfetch_FRED <- function(identifiers) {
results <- list()
for (i in 1:length(identifiers)) {
url <- paste0("https://research.stlouisfed.org/fred2/series/",identifiers[i],"/downloaddata/",identifiers[i],".txt")
req <- GET(url)
fileLines <- readLines(textConnection(content(req)))
freq <- sub(",", "", strsplit(fileLines[6], " +")[[1]][2])
skip <- grep("DATE", fileLines)[1]
fr <- utils::read.fwf(textConnection(content(req)), skip=skip, widths=c(10,20), na.strings=".", colClasses=c("character","numeric"))
dates <- as.Date(fr[,1], origin="1970-01-01")
if (freq == "Annual")
dates <- year_end(dates)
else if (freq == "Semiannual")
dates <- halfyear_end(dates)
else if (freq == "Quarterly")
dates <- quarter_end(dates)
else if (freq == "Monthly")
dates <- month_end(dates)
ix <- !is.na(dates)
x <- xts(as.matrix(fr[ix,2]), dates[ix])
dim(x) <- c(nrow(x),1)
colnames(x) <- identifiers[i]
results[[identifiers[i]]] <- x
}
do.call(merge.xts, results)
}
#' Fetch data from European Central Bank's statistical data warehouse
#'
#' @param identifiers a vector of ECB series IDs
#' @return a xts object
#' @export
#' @examples
#' tryCatch(pdfetch_ECB("FM.B.U2.EUR.4F.KR.DFR.CHG"),
#' error = function(e) {},
#' warning = function(w) {}
#' )
pdfetch_ECB <- function(identifiers) {
results <- list()
for (i in 1:length(identifiers)) {
req <- GET(paste0("http://sdw.ecb.europa.eu/quickviewexport.do?SERIES_KEY=",identifiers[i],"&type=csv"))
tmp <- content(req, as="text")
fr <- utils::read.csv(textConnection(tmp), header=F, stringsAsFactors=F)[-c(1:5),]
if (inherits(fr, "character"))
stop(paste0("Series ", identifiers[i], " not found"))
freq <- strsplit(identifiers[i], "\\.")[[1]][2]
if (freq == "A") {
dates <- as.Date(ISOdate(as.numeric(fr[,1]), 12, 31))
} else if (freq == "H") {
year <- as.numeric(substr(fr[,1], 1, 4))
month <- as.numeric(substr(fr[,1], 6, 6))*6
dates <- month_end(as.Date(ISOdate(year, month, 1)))
} else if (freq == "Q") {
dates <- quarter_end(as.Date(as.yearqtr(fr[,1])))
} else if (freq == "M") {
dates <- month_end(as.Date(as.yearmon(fr[,1], "%Y%b")))
} else if (freq == "B" || freq == "D") {
dates <- as.Date(fr[,1])
} else {
stop("Unsupported frequency")
}
x <- xts(as.matrix(suppressWarnings(as.numeric(fr[,2]))), dates, origin="1970-01-01")
dim(x) <- c(nrow(x),1)
colnames(x) <- identifiers[i]
results[[identifiers[i]]] <- x
}
do.call(merge.xts, results)
}
# Download Eurostat DSD file
pdfetch_EUROSTAT_GETDSD <- function(flowRef) {
url <- paste0("http://ec.europa.eu/eurostat/SDMX/diss-web/rest/datastructure/ESTAT/DSD_", flowRef)
req <- GET(url, add_headers(useragent="RCurl"))
doc <- xmlInternalTreeParse(content(req, as="text"))
doc
}
#' Fetch description for a Eurostat dataset
#' @param flowRef Eurostat dataset code
#' @export
#' @examples
#' tryCatch(pdfetch_EUROSTAT_DSD("namq_gdp_c"),
#' error = function(e) {},
#' warning = function(w) {}
#' )
pdfetch_EUROSTAT_DSD <- function(flowRef) {
doc <- pdfetch_EUROSTAT_GETDSD(flowRef)
concepts <- setdiff(unlist(getNodeSet(doc, "//str:Dimension/@id")), c("OBS_VALUE","OBS_STATUS","OBS_FLAG"))
for (concept in concepts) {
codelist_id <- unclass(getNodeSet(doc, paste0("//str:Dimension[@id='",concept,"']//str:Enumeration/Ref/@id"))[[1]])
codes <- unlist(getNodeSet(doc, paste0("//str:Codelist[@id='",codelist_id,"']/str:Code/@id")))
descriptions <- unlist(getNodeSet(doc, paste0("//str:Codelist[@id='",codelist_id,"']/str:Code/com:Name/text()")))
max.code.length <- max(sapply(codes, nchar))
print("")
print(paste(rep("=", 50), collapse=""))
print(concept)
print(paste(rep("=", 50), collapse=""))
for (j in 1:length(codes)) {
print(sprintf(paste0("%-",max.code.length+5,"s %s"), codes[j], xmlValue(descriptions[[j]])))
}
}
}
#' Fetch data from Eurostat
#'
#' Eurostat stores its statistics in data cubes, which can be browsed at
#' \url{http://epp.eurostat.ec.europa.eu/portal/page/portal/statistics/search_database}. To access data, specify the name of a data cube and optionally filter it based on its dimensions.
#'
#' @param flowRef Eurostat dataset code
#' @param from a Date object or string in YYYY-MM-DD format. If supplied, only data on or after this date will be returned
#' @param to a Date object or string in YYYY-MM-DD format. If supplied, only data on or before this date will be returned
#' @param ... optional dimension filters for the dataset
#' @return a xts object
#' @export
#' @examples
#' tryCatch(pdfetch_EUROSTAT("namq_gdp_c", FREQ="Q", S_ADJ="SWDA", UNIT="MIO_EUR",
#' INDIC_NA="B1GM", GEO=c("DE","UK")),
#' error = function(e) {},
#' warning = function(w) {}
#' )
pdfetch_EUROSTAT <- function(flowRef, from, to, ...) {
arguments <- list(...)
doc <- pdfetch_EUROSTAT_GETDSD(flowRef)
concepts <- setdiff(unlist(getNodeSet(doc, "//str:Dimension/@id")), c("OBS_VALUE","OBS_STATUS","OBS_FLAG"))
key <- paste(sapply(concepts, function(concept) {
if (concept %in% names(arguments)) {
paste(arguments[[concept]], collapse="+")
} else {
""
}
}), collapse=".")
if (!missing(from))
from <- as.Date(from)
if (!missing(to))
to <- as.Date(to)
if (!missing(from) && !missing(to))
url <- paste0("http://ec.europa.eu/eurostat/SDMX/diss-web/rest/data/",flowRef,"/",key,"/?startPeriod=",from,"&endPeriod=",to)
else if (!missing(from))
url <- paste0("http://ec.europa.eu/eurostat/SDMX/diss-web/rest/data/",flowRef,"/",key,"/?startPeriod=",from)
else if (!missing(to))
url <- paste0("http://ec.europa.eu/eurostat/SDMX/diss-web/rest/data/",flowRef,"/",key,"/?endPeriod=",to)
else
url <- paste0("http://ec.europa.eu/eurostat/SDMX/diss-web/rest/data/",flowRef,"/",key)
req <- GET(url, add_headers(useragent="RCurl"))
doc <- xmlInternalTreeParse(content(req, as="text"))
results <- list()
seriesSet <- getNodeSet(doc, "//generic:Series")
if (length(seriesSet) == 0) {
warning("No series found")
return(NULL)
}
for (i in 1:length(seriesSet)) {
series <- seriesSet[[i]]
idvalues <- list()
for (node in getNodeSet(series, "generic:SeriesKey/generic:Value", "generic"))
idvalues[[xmlGetAttr(node, "id")]] <- xmlGetAttr(node, "value")
id <- paste(sapply(concepts, function(concept) idvalues[[concept]]), collapse=".")
freq <- xmlGetAttr(getNodeSet(series, "generic:SeriesKey/generic:Value[@id='FREQ']", "generic")[[1]], "value")
if (freq == "A") {
dates <- as.Date(ISOdate(as.numeric(unlist(getNodeSet(series, ".//generic:ObsDimension/@value", "generic"))),12,31))
} else if (freq == "Q") {
dates <- as.Date(as.yearqtr(
sapply(
unlist(getNodeSet(series, ".//generic:ObsDimension/@value", "generic")),
function(x) paste(substr(x, 1, 4), substr(x, 7, 8), sep="-")
)
))
dates <- quarter_end(dates)
} else if (freq == "M") {
dates <- as.Date(as.yearmon(unlist(getNodeSet(series, ".//generic:ObsDimension/@value", "generic")), format="%Y-%m"))
dates <- month_end(dates)
} else if (freq == "D") {
dates <- as.Date(unlist(getNodeSet(series, ".//generic:ObsDimension/@value", "generic")))
} else {
print(unlist(getNodeSet(series, ".//generic:ObsDimension/@value", "generic")))
stop("Unsupported frequency")
}
values <- as.numeric(getNodeSet(series, ".//generic:ObsValue/@value", "generic"))
x <- xts(values, dates)
dim(x) <- c(nrow(x),1)
colnames(x) <- id
results[[i]] <- x
}
na.trim(do.call(merge.xts, results), is.na="all")
}
#' Fetch data from World Bank
#'
#' @param indicators a vector of World Bank indicators
#' @param countries a vector of countrie identifiers, which can be 2- or
#' 3-character ISO codes. The special option "all" retrieves all countries.
#' @return a xts object
#' @export
#' @examples
#' tryCatch(pdfetch_WB("NY.GDP.MKTP.CD", c("BR","MX")),
#' error = function(e) {},
#' warning = function(w) {}
#' )
pdfetch_WB <- function(indicators, countries="all") {
countries <- paste(countries, collapse=";")
indicators <- paste(indicators, collapse=";")
query <- paste0("http://api.worldbank.org/countries/",countries,"/indicators/",indicators,"?format=json&per_page=1000")
req <- GET(query)
x <- fromJSON(content(req, as="text"))[[2]]
if (!inherits(x, "data.frame")) {
warning("No series found")
return(NULL)
}
results <- data.frame(indicator=paste(x$indicator$id, x$country$id, sep="."),
value=as.numeric(x$value),
date=as.Date(ISOdate(as.numeric(x$date), 12, 31))) # This dating won't always work, need to detect frequency
results <- dcast(results, date ~ indicator)
results <- na.trim(xts(subset(results, select=-date), results$date), is.na="all")
results
}
#' Fetch data from the Bank of England Interactive Statistical Database
#'
#' @param identifiers a vector of BoE series codes
#' @param from start date
#' @param to end date; if not given, today's date will be used
#' @return a xts object
#' @export
#' @examples
#' tryCatch(pdfetch_BOE(c("LPMVWYR", "LPMVWYR"), "2012-01-01"),
#' error = function(e) {},
#' warning = function(w) {}
#' )
pdfetch_BOE <- function(identifiers, from, to=Sys.Date()) {
if (length(identifiers) > 300)
stop("At most 300 series can be downloaded at once")
from <- as.Date(from)
to <- as.Date(to)
url <- paste0("http://www.bankofengland.co.uk/boeapps/iadb/fromshowcolumns.asp?csv.x=yes",
"&SeriesCodes=",paste(identifiers, collapse=","),
"&CSVF=TN&VPD=Y&UsingCodes=Y",
"&Datefrom=", format(from, "%d/%b/%Y"),
"&Dateto=", format(to, "%d/%b/%Y"))
tmp <- tempfile()
utils::download.file(url, destfile=tmp, quiet=T)
fr <- utils::read.csv(tmp, header=T)
unlink(tmp)
dates <- as.Date(fr[,1], "%d %b %Y")
xts(fr[,-1], dates)
}
#' Fetch data from U.S. Bureau of Labor Statistics
#'
#' @param identifiers a vector of BLS time series IDs
#' @param from start year
#' @param to end year. Note that the request will fail if this is a future year
#' that is beyond the last available data point in the series.
#' @return a xts object
#' @export
#' @examples
#' tryCatch(pdfetch_BLS(c("EIUIR","EIUIR100"), 2005, 2010),
#' error = function(e) {},
#' warning = function(w) {}
#' )
pdfetch_BLS <- function(identifiers, from, to) {
if (!is.numeric(from) || !is.numeric(to))
stop("Both from and to must be integers")
if (to < from)
stop("to must be greater than or equal to from")
years <- seq(from, to, by=10)
if (years[length(years)] != to || length(years) == 1)
years <- c(years, to)
results <- list()
for (id in identifiers)
results[[id]] <- NA
for (i in 2:length(years)) {
from <- years[i-1]+1
to <- years[i]
if (i == 2)
from <- years[i-1]
req <- list(seriesid=identifiers, startyear=unbox(from), endyear=unbox(to))
resp <- POST("http://api.bls.gov/publicAPI/v1/timeseries/data/", body=req, encode="json")
resp <- fromJSON(content(resp, as="text"))
if (resp$status != "REQUEST_SUCCEEDED")
stop("Request failed")
series <- resp$Results$series
for (j in 1:length(identifiers)) {
seriesID <- series$seriesID[j]
if (length(series$data[[j]]) > 0)
results[[seriesID]] <- rbind(results[[seriesID]], series$data[[j]])
}
}
ix <- sapply(results, function(x) inherits(x, "data.frame"))
if (!all(ix))
warning(paste("No data found for series", identifiers[!ix], "in specified time range"))
if (all(!ix))
return(NULL)
results <- results[ix]
for (id in names(results)) {
dat <- subset(results[[id]], period != 'M13')
freq <- substr(dat$period[1], 1, 1)
periods <- as.numeric(substr(dat$period, 2, 3))
years <- as.numeric(dat$year)
if (freq == "M")
dates <- as.Date(ISOdate(years, periods, 1))
else if (freq == "Q")
dates <- as.Date(ISOdate(years, periods*3, 1))
else if (freq == "A")
dates <- as.Date(ISOdate(years, 12, 31))
else
stop(paste("Unrecognized frequency", freq))
dates <- month_end(dates)
results[[id]] <- xts(as.numeric(dat$value), dates)
colnames(results[[id]]) <- id
}
identifiers <- identifiers[identifiers %in% names(results)]
na.trim(do.call(merge.xts, results), is.na="all")[, identifiers]
}
#' Fetch data from the French National Institute of Statistics and Economic Studies (INSEE)
#'
#' @param identifiers a vector of INSEE series codes
#' @return a xts object
#' @export
#' @examples
#' tryCatch(pdfetch_INSEE(c("000810635")),
#' error = function(e) {},
#' warning = function(w) {}
#' )
pdfetch_INSEE <- function(identifiers) {
results <- list()
for (id in identifiers) {
url <- paste0("http://www.bdm.insee.fr/bdm2/affichageSeries.action?idbank=",id)
page <- tryCatch({
req <- GET(url, add_headers("Accept-Language"="en-US,en;q=0.8"))
content(req, as="text")
}, warning = function(w) {
})
if (!is.null(page)) {
doc <- htmlParse(page)
dat <- readHTMLTable(doc)[[1]]
if (names(dat)[2] == "Month") {
year <- as.numeric(as.character(dat[,1]))
month <- as.character(dat[,2])
dates <- as.Date(paste(year, month, 1), format="%Y %b %d")
} else if (names(dat[2]) == "Quarter") {
year <- as.numeric(as.character(dat[,1]))
month <- as.numeric(as.character(dat[,2]))*3
dates <- as.Date(ISOdate(year, month, 1))
} else if (ncol(dat) == 2) {
year <- as.numeric(as.character(dat[,1]))
dates <- as.Date(ISOdate(year, 12, 31))
} else {
stop("Unrecognized frequency")
}
values <- as.numeric(gsub("[^0-9]", "", dat[,ncol(dat)]))
dates <- month_end(dates)
x <- xts(values, dates)
colnames(x) <- id
results[[id]] <- x
} else {
warning(paste("Series", id, "not found"))
}
}
if (length(results) == 0)
return(NULL)
na.trim(do.call(merge.xts, results), is.na="all")
}
#' Fetch data from the UK Office of National Statistics
#' @param identifiers a vector of ONS series codes
#' @param dataset ONS dataset name
#' @return a xts object
#' @export
#' @examples
#' tryCatch(pdfetch_ONS(c("LF24","LF2G"), "lms"),
#' error = function(e) {},
#' warning = function(w) {}
#' )
pdfetch_ONS <- function(identifiers, dataset) {
identifiers <- toupper(identifiers)
dataset <- tolower(dataset)
results <- list()
for (id in identifiers) {
url <- paste0("http://www.ons.gov.uk/ons/datasets-and-tables/downloads/csv.csv?dataset=",
dataset,"&cdid=",id)
tmp <- tempfile()
retval <- tryCatch({
utils::download.file(url, destfile=tmp, quiet=T)
}, warning = function(w) {
warning(paste("Unable to download series",id,"from dataset",dataset))
unlink(tmp)
w
}, error = function(e) {
print(e)
unlink(tmp)
e
})
if (inherits(retval, "warning") || inherits(retval, "error"))
next
fr <- utils::read.csv(tmp, header=T, stringsAsFactors=F)
fr <- fr[2:(which(fr[,1]=='\xa9 Crown Copyright')-1),]
fr[,2] <- as.numeric(fr[,2])
unlink(tmp)
datesA <- grep("^[0-9]{4}$", fr[,1])
datesQ <- grep("^[0-9]{4} Q[1-4]$", fr[,1])
datesM <- grep("^[0-9]{4} [A-Z]{3}$", fr[,1])
dates <- NULL
if (length(datesM) > 0) {
dates <- as.Date(paste(fr[datesM,1],1), "%Y %b %d")
dateix <- datesM
} else if (length(datesQ) > 0) {
y <- as.numeric(substr(fr[datesQ,1], 1, 4))
m <- as.numeric(substr(fr[datesQ,1], 7, 7))*3
dates <- as.Date(ISOdate(y, m, 1))
dateix <- datesQ
} else if (length(datesA) > 0) {
dates <- as.Date(ISOdate(as.numeric(fr[datesA,1]), 12, 31))
dateix <- datesA
}
if (!is.null(dates)) {
dates <- month_end(dates)
x <- xts(fr[dateix,2], dates)
colnames(x) <- id
results[[id]] <- x
}
}
if (length(results) == 0)
return(NULL)
na.trim(do.call(merge.xts, results), is.na="all")
}
#' Fetch data from the US Energy Information Administration
#' @param identifiers a vector of EIA series codes
#' @param api_key EIA API key
#' @return a xts object
#' @export
#' @examples
#' tryCatch(pdfetch_EIA(c("ELEC.GEN.ALL-AK-99.A","ELEC.GEN.ALL-AK-99.Q"), EIA_KEY),
#' error = function(e) {},
#' warning = function(w) {}
#' )
pdfetch_EIA <- function(identifiers, api_key) {
results <- list()
for (i in 1:length(identifiers)) {
id <- identifiers[i]
url <- paste0("http://api.eia.gov/series/?series_id=",id,"&api_key=",api_key)
req <- GET(url)
res <- fromJSON(content(req, as="text"))
if (is.null(res$request)) {
warning(paste("Invalid series code",id))
next
}
freq <- res$series$f
dates <- unlist(lapply(res$series$data[[1]], function(x) x[1]))
data <- as.numeric(unlist(lapply(res$series$data[[1]], function(x) x[2])))
if (freq == "A") {
dates <- as.Date(ISOdate(as.numeric(dates), 12, 31))
} else if (freq == "Q") {
y <- as.numeric(substr(dates, 1, 4))
m <- 3*as.numeric(substr(dates, 6, 6))
dates <- month_end(as.Date(ISOdate(y,m,1)))
} else if (freq == "M") {
y <- as.numeric(substr(dates, 1, 4))
m <- as.numeric(substr(dates, 5, 6))
dates <- month_end(as.Date(ISOdate(y,m,1)))
} else if (freq == "W" || freq == "D") {
dates <- as.Date(dates, "%Y%m%d")
} else {
warning(paste("Unrecognized frequency",freq,"for series",id))
}
x <- xts(rev(data), rev(dates))
colnames(x) <- id
results[[i]] <- x
}
if (length(results) == 0)
return(NULL)
na.trim(do.call(merge.xts, results), is.na="all")
}
|
testlist <- list(type = -488447262L, z = 2.28752394024497e-321)
result <- do.call(esreg::G1_fun,testlist)
str(result) | /esreg/inst/testfiles/G1_fun/libFuzzer_G1_fun/G1_fun_valgrind_files/1609893298-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 117 | r | testlist <- list(type = -488447262L, z = 2.28752394024497e-321)
result <- do.call(esreg::G1_fun,testlist)
str(result) |
# The following functions are based on Dr. Peng cousera course ( R Programming ) Assignment 2.
#---------------------------------------------------------------------------------------------
# 1. makeCacheMatrix: This function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(inverse) m <<- inverse
getinverse <- function() m
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
# 2. cacheSolve: This function computes the inverse of the special "matrix" returned by
# makeCacheMatrix above. If the inverse has already been calculated
# (and the matrix has not changed), then the cachesolve should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
m
}
| /cachematrix.R | no_license | mugishajean/ProgrammingAssignment2 | R | false | false | 1,107 | r | # The following functions are based on Dr. Peng cousera course ( R Programming ) Assignment 2.
#---------------------------------------------------------------------------------------------
# 1. makeCacheMatrix: This function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(inverse) m <<- inverse
getinverse <- function() m
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
# 2. cacheSolve: This function computes the inverse of the special "matrix" returned by
# makeCacheMatrix above. If the inverse has already been calculated
# (and the matrix has not changed), then the cachesolve should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
m
}
|
#### 6.7 Lab 3: PCR and PLS Regression ####
# Importing libraries
library(ISLR)
library(pls)
# Setting seed
set.seed(2)
#### 6.7.1 Principal Components Regression ####
# Removing all rows with missing values
Hitters = na.omit(Hitters)
# Fitting a PCR on data
pcr.fit = pcr(Salary ~ ., data=Hitters, scale=TRUE, validation="CV")
'
Comments:
scale=TRUE, standardizes each predictor
validation="CV", computes 10-Fold CV for each possible value of M
'
# Viewing the summary of the fit
summary(pcr.fit)
'
Both CV errors and variance are provided using the summary function
'
# Plotting the MSE
validationplot(pcr.fit, val.type="MSEP")
# Setting the seed
set.seed(1)
# Organizing features (X) and the target (y) variables
x = model.matrix(Salary ~ ., Hitters)[,-1]
y = Hitters$Salary
# Creating a boolean vector for train data
train = sample(1:nrow(x), nrow(x)/2)
# Created a test vector by choosing indicies that are not part of train
test = (-train)
# Getting the response variables for the test data
y.test = y[test]
# Fitting the scaled data using PCR and implementing CV
pcr.fit = pcr(Salary ~ ., data=Hitters, subset=train, scale=TRUE, validation="CV")
# Visualizing the errors
validationplot(pcr.fit, val.type="MSEP") # M = 7 gives the lowest CV error
# Predicting test data using M = 7
pcr.pred = predict(pcr.fit, x[test,], ncomp=7)
# Calculating the MSE
mean((pcr.pred - y.test)^2)
# Applying PCR using M = 7 on entire dataset
pcr.fit = pcr(y ~ x, scale=TRUE, ncomp=7)
summary(pcr.fit)
#### 6.7.2 Partial Least Squares ####
# Fitting PLS on data including the response
set.seed(1)
pls.fit = plsr(Salary ~ ., data=Hitters, subset=train, scale=TRUE, validation="CV")
summary(pls.fit)
# Predicting on the test data using the fitted PLSR
pls.pred = predict(pls.fit, x[test,], ncomp=2)
# Calculating the MSE
mean((pls.pred - y.test)^2)
# Fitting data with 2 components
pls.fit = plsr(Salary ~ ., data=Hitters, scale=TRUE, ncomp=2)
summary(pls.fit)
| /chapter06/06_Lab_Part3_tr.R | no_license | saadlaouadi/ISLR | R | false | false | 1,969 | r | #### 6.7 Lab 3: PCR and PLS Regression ####
# Importing libraries
library(ISLR)
library(pls)
# Setting seed
set.seed(2)
#### 6.7.1 Principal Components Regression ####
# Removing all rows with missing values
Hitters = na.omit(Hitters)
# Fitting a PCR on data
pcr.fit = pcr(Salary ~ ., data=Hitters, scale=TRUE, validation="CV")
'
Comments:
scale=TRUE, standardizes each predictor
validation="CV", computes 10-Fold CV for each possible value of M
'
# Viewing the summary of the fit
summary(pcr.fit)
'
Both CV errors and variance are provided using the summary function
'
# Plotting the MSE
validationplot(pcr.fit, val.type="MSEP")
# Setting the seed
set.seed(1)
# Organizing features (X) and the target (y) variables
x = model.matrix(Salary ~ ., Hitters)[,-1]
y = Hitters$Salary
# Creating a boolean vector for train data
train = sample(1:nrow(x), nrow(x)/2)
# Created a test vector by choosing indicies that are not part of train
test = (-train)
# Getting the response variables for the test data
y.test = y[test]
# Fitting the scaled data using PCR and implementing CV
pcr.fit = pcr(Salary ~ ., data=Hitters, subset=train, scale=TRUE, validation="CV")
# Visualizing the errors
validationplot(pcr.fit, val.type="MSEP") # M = 7 gives the lowest CV error
# Predicting test data using M = 7
pcr.pred = predict(pcr.fit, x[test,], ncomp=7)
# Calculating the MSE
mean((pcr.pred - y.test)^2)
# Applying PCR using M = 7 on entire dataset
pcr.fit = pcr(y ~ x, scale=TRUE, ncomp=7)
summary(pcr.fit)
#### 6.7.2 Partial Least Squares ####
# Fitting PLS on data including the response
set.seed(1)
pls.fit = plsr(Salary ~ ., data=Hitters, subset=train, scale=TRUE, validation="CV")
summary(pls.fit)
# Predicting on the test data using the fitted PLSR
pls.pred = predict(pls.fit, x[test,], ncomp=2)
# Calculating the MSE
mean((pls.pred - y.test)^2)
# Fitting data with 2 components
pls.fit = plsr(Salary ~ ., data=Hitters, scale=TRUE, ncomp=2)
summary(pls.fit)
|
# Set parameters
result_path = '~/Documents/workspace/phospho_network/example/script_files/output/rfL_fit'
algos = 'rf' # en for elastic net, rf for random forest
alphas = seq(0,1,0.05) #required for elastic net
i_penalty = T # required for elastic net use different penalty based on heat diffusion?
ncore = 1 # number of cores used
outerfold = 10
innerfold = 5
scale_method = "0-1" # 0-1 or "scale"
directional = T # Used except pred_choose is flat. Should only upstream nodes in the pathway be considered?
pred_choose = 'hf' # method of choose different predictor : hf: by heat diffusion,connect:all connected nodes, direct: direct nodes, flat: all nodes in network
k = 0.001 # used if pred_choose is hf, a parameter to define the extend of predictors by keep nodes receive more heat than k*(heat_response_CNV)
max_level = Inf # used if pred_choose is up or connect, max level consdered for predictor selection
#LSF setting
cluster = T
queue = 'short'
time = '2:00'
mem = '38000'
# Set inputs:
rna_filename = '~/Documents/workspace/phospho_network/example/script_files/rna_processed.csv'
cnv_filename = '~/Documents/workspace/phospho_network/example/script_files/cnv_processed.csv'
mut_filename = '~/Documents/workspace/phospho_network/example/script_files/rna_processed.csv'
mis_mut_filename = '~/Documents/workspace/phospho_network/example/script_files/mutation_missense.csv'
heat_influence_file = '~/Documents/workspace/temp_files/heat_influence.csv' #used if pred_choose is hf
network_file = '~/Documents/workspace/temp_files/network.csv'
# target value input:
mdata_filename = '~/Documents/workspace/phospho_network/example/script_files/rppa_processed.csv'
| /src/scratch/test/configs/rf_fit.R | no_license | chrischen1/phospho_network | R | false | false | 1,859 | r | # Set parameters
result_path = '~/Documents/workspace/phospho_network/example/script_files/output/rfL_fit'
algos = 'rf' # en for elastic net, rf for random forest
alphas = seq(0,1,0.05) #required for elastic net
i_penalty = T # required for elastic net use different penalty based on heat diffusion?
ncore = 1 # number of cores used
outerfold = 10
innerfold = 5
scale_method = "0-1" # 0-1 or "scale"
directional = T # Used except pred_choose is flat. Should only upstream nodes in the pathway be considered?
pred_choose = 'hf' # method of choose different predictor : hf: by heat diffusion,connect:all connected nodes, direct: direct nodes, flat: all nodes in network
k = 0.001 # used if pred_choose is hf, a parameter to define the extend of predictors by keep nodes receive more heat than k*(heat_response_CNV)
max_level = Inf # used if pred_choose is up or connect, max level consdered for predictor selection
#LSF setting
cluster = T
queue = 'short'
time = '2:00'
mem = '38000'
# Set inputs:
rna_filename = '~/Documents/workspace/phospho_network/example/script_files/rna_processed.csv'
cnv_filename = '~/Documents/workspace/phospho_network/example/script_files/cnv_processed.csv'
mut_filename = '~/Documents/workspace/phospho_network/example/script_files/rna_processed.csv'
mis_mut_filename = '~/Documents/workspace/phospho_network/example/script_files/mutation_missense.csv'
heat_influence_file = '~/Documents/workspace/temp_files/heat_influence.csv' #used if pred_choose is hf
network_file = '~/Documents/workspace/temp_files/network.csv'
# target value input:
mdata_filename = '~/Documents/workspace/phospho_network/example/script_files/rppa_processed.csv'
|
#============================#
# ==== Do paper searches ====
#============================#
# - Keywords
# - #set will be used for things that need to be set for your specific
# file structure to get the code to run. Things like data directories
# - #note I will use the tags #note for things I think are important
# - purpose of code: Take the top newspapers from each state and do a keyword serach on bing to generate a weight matrix
#=================================#
# ==== laod packages and data ====
#=================================#
# clear objecrts and worksatation
rm(list = ls(pos = ".GlobalEnv"), pos = ".GlobalEnv")
options(scipen = 999)
cat("\f")
# set which computer you are using
opt_nate_com <- TRUE
# decide how long you want to wait on average between searches. and range for random draws
wait_total <- .5
wait_range <- .25
# set what rank papers to serach
search_rank <- c(1:5)
# set year or years
# year_search <- c(2004:2016)
year_search <- c(2004:2017)
opt_load_xwalk <- TRUE
library(data.table)
library(easimple)
library(rvest)
library(httr)
library(RCurl)
if(opt_nate_com){
#set
# load top paper xwalk
paper_xwalk <- fread("c:/Users/Nmath_000/Documents/Research/FIscal Policy Interdependence/data/int/top_state_papers_checked/most_recent/top_state_papers_checked.csv")
# load state xwalk
state_xwalk <- fread("C:/Users/Nmath_000/Documents/Research/FIscal Policy Interdependence/xwalks/state_names_google.csv")
# path to existing search xwwalk
search_xwalk_path <- "C:/Users/Nmath_000/Documents/Research/FIscal Policy Interdependence/data/weight_matrices/bing_searches/most_recent/bing_paper_searches.csv"
# out path for xwalk of results
out_path <- "c:/Users/Nmath_000/Documents/Research/FIscal Policy Interdependence/data/weight_matrices/bing_searches/"
# load special search xwlk
special_search <- fread("c:/Users/Nmath_000/Documents/Research/FIscal Policy Interdependence/xwalks/special_search_bing.csv")
# if using faiths comp
}else{
# #set
# # load top paper xwalk
# paper_xwalk <- fread("C:/Users/Faith/Documents/Nate/Fiscal Policy Interdependence/top_state_papers_12_29.csv")
#
# # load state xwalk
# state_xwalk <- fread("C:/Users/Faith/Documents/Nate/Fiscal Policy Interdependence/state_names_google.csv")
#
# # path to existing search xwalk
# search_xwalk_path <- "C:/Users/Faith/Documents/Nate/Fiscal Policy Interdependence/google_searches/most_recent/google_paper_searches.csv"
#
# # path out for results
# out_path <- "C:/Users/Faith/Documents/Nate/Fiscal Policy Interdependence/google_searches/"
}
#set example url
# im not gonna try to recreate this from scratch. Go do a search of the terms and www.montgomeryadvertiser.com as an example site. We can edit that portion out to get the searches we need
example_url_l <- list()
example_url_sub_l <- list()
example_url_l[["2016"]] <- "https://www.bing.com/search?q=site%3awww.montgomeryadvertiser.com+(government+%7c+%22government+spending%22+%7c+%22public+policy%22+%7c+legislature+%7c+%22fiscal+policy%22+%7c+spending)+&filters=ex1%3a%22ez5_16801_17166%22&qs=n&sp=-1&pq=site%3awww.montgomeryadvertiser.com+(government+%7c+%22government+spending%22+%7c+%22public+policy%22+%7c+legislature+%7c+%22fiscal+policy%22+%7c+spending+%7c+test+%7c+other)+&sc=0-147&cvid=EA26EFA0EC56440E8B6BDA349F9D9291&qpvt=site%3awww.montgomeryadvertiser.com+(government+%7c+%22government+spending%22+%7c+%22public+policy%22+%7c+legislature+%7c+%22fiscal+policy%22+%7c+spending)+"
example_url_sub_l[["2016"]] <- "https://www.bing.com/search?q=site%3awww.montgomeryadvertiser.com+(government+%7c+%22government+spending%22+%7c+%22public+policy%22+%7c+legislature+%7c+%22fiscal+policy%22+%7c+spending)+%26+wisconsin+&filters=ex1%3a%22ez5_16801_17166%22&qs=n&sp=-1&pq=site%3awww.montgomeryadvertiser.com+(government+%7c+%22government+spending%22+%7c+%22public+policy%22+%7c+legislature+%7c+%22fiscal+policy%22+%7c+spending)+&sc=0-132&cvid=3A3E1337AD4B45C48E8E3A9DB2C28320&qpvt=site%3awww.montgomeryadvertiser.com+(government+%7c+%22government+spending%22+%7c+%22public+policy%22+%7c+legislature+%7c+%22fiscal+policy%22+%7c+spending)+%26+wisconsin+"
# format news links for how we want to do it
paper_xwalk[, new_website2 := gsub("https\\:\\/\\/", "", new_website)]
paper_xwalk[, new_website2 := gsub("http\\:\\/\\/", "", new_website2)]
# time stamp
time_stamp <- ea_timestamp()
#==========================#
# ==== make/load xwalk ====
#==========================#
# make search xwalk
search_xwalk <- paper_xwalk[, c("newspaper", "state", "new_website2", "n_twit_fol", "twit_rank2", "trend_scaled", "trend_rank", "year"), with = FALSE]
search_xwalk[, merge := 1]
state_list <- state_xwalk[, "state", with = FALSE]
state_list[, merge := 1]
setnames(state_list, c("state"), c("search_state"))
search_xwalk <- merge(search_xwalk, state_list, by = "merge", allow.cartesian = TRUE)
search_xwalk[, merge := NULL]
# if we are load it, to that
#note #dix need to do this part still once I have one started
if(opt_load_xwalk){
# load up all the results we already have
search_xwalk_done <- fread(search_xwalk_path)
# merg on results. this way if we want to add more papers in the preivous scrtipt to paper_xwalk
# it will link up seemlessly
# also dont take rank or trend from the done results. This way we can update the google trends
# stuff and not have to redo identical searches
search_xwalk_done <- search_xwalk_done[, -c("n_twit_fol", "twit_rank2", "trend_scaled", "trend_rank")]
merge_by <- intersect(colnames(search_xwalk), colnames(search_xwalk_done))
search_xwalk <- merge(search_xwalk, search_xwalk_done, by = merge_by, all.x = TRUE)
# dont forget to check out this merge
print("dont forget to check out this merge")
}else{
search_xwalk[, results := as.character(NA)]
search_xwalk[, url_used := as.character(NA)]
c[, flag_zero := as.character(NA)]
}
#===========================#
# ==== perform searches ====
#===========================#
# subset paper xwalk to those in the rank we are looking for
#note #fix doing this with twitter rank for now
paper_xwalk <- paper_xwalk[twit_rank2 %in% search_rank, ]
# subset paper xwalk to the years we are going to search
paper_xwalk <- paper_xwalk[year %in% year_search, ]
# now sort the xwalk. The reason I am doing this is so that
# the searcher will complete seraches in the most usefull order
# That is we start with onlt one rank and complete entire years at a time.
# that way get first get a workable year, then a workable time series, then we beef it up with less popular
# papers
setorder(paper_xwalk, twit_rank2, -year, state)
# loop over xwalk of paper websites
# i <- 32
for(i in 1:nrow(paper_xwalk)){
# keep track of year
year_i <- paper_xwalk[i, year]
# keep track of newpaper
newspaper_i <- paper_xwalk[i, newspaper]
# keep track of state
state_i <- paper_xwalk[i, state]
# print out where we are at
print(paste0("starting on paper ", newspaper_i, " from ", state_i, " in ", year_i))
# get site we need
site_i <- paper_xwalk[i, new_website2]
print(site_i)
# see which states still need filling out
to_do <- search_xwalk[newspaper == newspaper_i & year == year_i & is.na(results)]
# if none, skip this paper
if(nrow(to_do) == 0) next()
# check if the state it is in needs to be done
if(state_i %chin% to_do$search_state){
# if so do the search
# get relevent url
emample_url_i <- example_url_l[[as.character(year_i)]]
# first change site
url_i <- gsub("www\\.montgomeryadvertiser\\.com", site_i, emample_url_i)
# check if that is in the link. If not error because there is an issue
if(!grepl("www\\.montgomeryadvertiser\\.com", emample_url_i)) stop("URL does not use www.montgomeryadvertiser.com")
# go to page
page_i <- read_html(url_i)
# wait for a while
wait_l <- (wait_total- wait_range)
wait_u <- (wait_total + wait_range)
wait_time <- runif(1, wait_l,wait_u)
Sys.sleep(wait_time)
# set results xpath
results_xp <- '//*[@id="b_tween"]/span[1]'
# get results node
results_node <- html_nodes(page_i, xpath=results_xp)
# get results text
results_i <- html_text(results_node, trim = TRUE)
# check if results are zero #Note #fix
flag_zero <- FALSE
if(length(results_i)==0){
flag_zero <- TRUE
results_i <- 0
}else{
if(!grepl("Results|Result|result|results", results_i)){
flag_zero <- TRUE
}
}
# put it in the xwalk
search_xwalk[newspaper == newspaper_i & state == search_state & year == year_i, results := results_i]
# put url used in xwalk
search_xwalk[newspaper == newspaper_i & state == search_state & year == year_i, url_used := url_i]
#if we are getting zero results here than mark that this one has an issue and move on.
# also fill in all the subseraches has having an issue as well so we don't waist time searching there.
if(flag_zero){
# mark it as an issue
search_xwalk[ newspaper == newspaper_i & state == state_i & year == year_i, flag_zero := TRUE]
# Now skip to the next loop because we don't want to spend time searching any of these
next()
}# close if flag zero
}# close serach for overal state
# Now loop over all states that still need to be done
# j <- 1
for(j in 1:nrow(to_do)){
# grab state
state_j <- to_do[j, search_state]
print(state_j)
# if it's the same staet as above skip it
if(state_i == state_j) next()
# grab search term and make url
## start if there are no special requirments and just using state name
if(nrow(special_search[state == state_j & year == year_i,]) ==0){
# grab stat name
state_search_j <- gsub(" ", "%20", state_j)
# make URL
emample_url_sub_i <- example_url_sub_l[[as.character(year_i)]]
url_ij <- gsub("www\\.montgomeryadvertiser\\.com", site_i, emample_url_sub_i)
# check if that is in the link. If not error because there is an issue
if(!grepl("www\\.montgomeryadvertiser\\.com", emample_url_sub_i)) stop("URL does not use www.montgomeryadvertiser.com")
url_ij <- gsub("wisconsin", state_search_j, url_ij )
# if this is a case with special search terms #note #fix
}else{
# grab starter URL with terms needed
url_base <- special_search[state == state_j & year == year_i, special_url]
# now just replace the site
url_ij <- gsub("www\\.montgomeryadvertiser\\.com", site_i, url_base)
# check if that is in the link. If not error because there is an issue
if(!grepl("www\\.montgomeryadvertiser\\.com", url_base)) stop("URL does not use www.montgomeryadvertiser.com")
}
# go to page
page_i <- read_html(url_ij)
# wait for a while
wait_l <- (wait_total- wait_range)
wait_u <- (wait_total + wait_range)
wait_time <- runif(1, wait_l,wait_u)
Sys.sleep(wait_time)
# set results xpath
results_xp <- '//*[@id="b_tween"]/span[1]'
# get results node
results_node <- html_nodes(page_i, xpath=results_xp)
# get results text
results_ij <- html_text(results_node, trim = TRUE)
# check if results are zero #Note #fix
flag_zero <- FALSE
if(length(results_ij)==0){
flag_zero <- TRUE
results_ij <- 0
}else{
if(!grepl("Results|Result|result|results", results_ij)){
flag_zero <- TRUE
}
}
# put it in the xwalk
search_xwalk[newspaper == newspaper_i & search_state == state_j & year == year_i, results := results_ij]
# put url used in xwalk
search_xwalk[newspaper == newspaper_i & search_state == state_j & year == year_i, url_used := url_ij]
#if we are getting zero results here than mark that this one has an issue and move on.
# also fill in all the subseraches has having an issue as well so we don't waist time searching there.
if(flag_zero){
# mark it as an issue
search_xwalk[newspaper == newspaper_i & search_state == state_j & year == year_i, flag_zero := TRUE]
}
# close loop over states
}
# save the xwalk in case the system crashes or something. Don't want to lose the progress we have.
ea_save(search_xwalk,
in_val_path = out_path,
in_val_file = "bing_paper_searches.csv",
in_val_timestamp = time_stamp)
# close for loop over paper websites
}
| /automated_bing_search.R | no_license | Nathan-Mather/Mather-Code-Examples | R | false | false | 13,225 | r | #============================#
# ==== Do paper searches ====
#============================#
# - Keywords
# - #set will be used for things that need to be set for your specific
# file structure to get the code to run. Things like data directories
# - #note I will use the tags #note for things I think are important
# - purpose of code: Take the top newspapers from each state and do a keyword serach on bing to generate a weight matrix
#=================================#
# ==== laod packages and data ====
#=================================#
# clear objecrts and worksatation
rm(list = ls(pos = ".GlobalEnv"), pos = ".GlobalEnv")
options(scipen = 999)
cat("\f")
# set which computer you are using
opt_nate_com <- TRUE
# decide how long you want to wait on average between searches. and range for random draws
wait_total <- .5
wait_range <- .25
# set what rank papers to serach
search_rank <- c(1:5)
# set year or years
# year_search <- c(2004:2016)
year_search <- c(2004:2017)
opt_load_xwalk <- TRUE
library(data.table)
library(easimple)
library(rvest)
library(httr)
library(RCurl)
if(opt_nate_com){
#set
# load top paper xwalk
paper_xwalk <- fread("c:/Users/Nmath_000/Documents/Research/FIscal Policy Interdependence/data/int/top_state_papers_checked/most_recent/top_state_papers_checked.csv")
# load state xwalk
state_xwalk <- fread("C:/Users/Nmath_000/Documents/Research/FIscal Policy Interdependence/xwalks/state_names_google.csv")
# path to existing search xwwalk
search_xwalk_path <- "C:/Users/Nmath_000/Documents/Research/FIscal Policy Interdependence/data/weight_matrices/bing_searches/most_recent/bing_paper_searches.csv"
# out path for xwalk of results
out_path <- "c:/Users/Nmath_000/Documents/Research/FIscal Policy Interdependence/data/weight_matrices/bing_searches/"
# load special search xwlk
special_search <- fread("c:/Users/Nmath_000/Documents/Research/FIscal Policy Interdependence/xwalks/special_search_bing.csv")
# if using faiths comp
}else{
# #set
# # load top paper xwalk
# paper_xwalk <- fread("C:/Users/Faith/Documents/Nate/Fiscal Policy Interdependence/top_state_papers_12_29.csv")
#
# # load state xwalk
# state_xwalk <- fread("C:/Users/Faith/Documents/Nate/Fiscal Policy Interdependence/state_names_google.csv")
#
# # path to existing search xwalk
# search_xwalk_path <- "C:/Users/Faith/Documents/Nate/Fiscal Policy Interdependence/google_searches/most_recent/google_paper_searches.csv"
#
# # path out for results
# out_path <- "C:/Users/Faith/Documents/Nate/Fiscal Policy Interdependence/google_searches/"
}
#set example url
# im not gonna try to recreate this from scratch. Go do a search of the terms and www.montgomeryadvertiser.com as an example site. We can edit that portion out to get the searches we need
example_url_l <- list()
example_url_sub_l <- list()
example_url_l[["2016"]] <- "https://www.bing.com/search?q=site%3awww.montgomeryadvertiser.com+(government+%7c+%22government+spending%22+%7c+%22public+policy%22+%7c+legislature+%7c+%22fiscal+policy%22+%7c+spending)+&filters=ex1%3a%22ez5_16801_17166%22&qs=n&sp=-1&pq=site%3awww.montgomeryadvertiser.com+(government+%7c+%22government+spending%22+%7c+%22public+policy%22+%7c+legislature+%7c+%22fiscal+policy%22+%7c+spending+%7c+test+%7c+other)+&sc=0-147&cvid=EA26EFA0EC56440E8B6BDA349F9D9291&qpvt=site%3awww.montgomeryadvertiser.com+(government+%7c+%22government+spending%22+%7c+%22public+policy%22+%7c+legislature+%7c+%22fiscal+policy%22+%7c+spending)+"
example_url_sub_l[["2016"]] <- "https://www.bing.com/search?q=site%3awww.montgomeryadvertiser.com+(government+%7c+%22government+spending%22+%7c+%22public+policy%22+%7c+legislature+%7c+%22fiscal+policy%22+%7c+spending)+%26+wisconsin+&filters=ex1%3a%22ez5_16801_17166%22&qs=n&sp=-1&pq=site%3awww.montgomeryadvertiser.com+(government+%7c+%22government+spending%22+%7c+%22public+policy%22+%7c+legislature+%7c+%22fiscal+policy%22+%7c+spending)+&sc=0-132&cvid=3A3E1337AD4B45C48E8E3A9DB2C28320&qpvt=site%3awww.montgomeryadvertiser.com+(government+%7c+%22government+spending%22+%7c+%22public+policy%22+%7c+legislature+%7c+%22fiscal+policy%22+%7c+spending)+%26+wisconsin+"
# format news links for how we want to do it
paper_xwalk[, new_website2 := gsub("https\\:\\/\\/", "", new_website)]
paper_xwalk[, new_website2 := gsub("http\\:\\/\\/", "", new_website2)]
# time stamp
time_stamp <- ea_timestamp()
#==========================#
# ==== make/load xwalk ====
#==========================#
# make search xwalk
search_xwalk <- paper_xwalk[, c("newspaper", "state", "new_website2", "n_twit_fol", "twit_rank2", "trend_scaled", "trend_rank", "year"), with = FALSE]
search_xwalk[, merge := 1]
state_list <- state_xwalk[, "state", with = FALSE]
state_list[, merge := 1]
setnames(state_list, c("state"), c("search_state"))
search_xwalk <- merge(search_xwalk, state_list, by = "merge", allow.cartesian = TRUE)
search_xwalk[, merge := NULL]
# if we are load it, to that
#note #dix need to do this part still once I have one started
if(opt_load_xwalk){
# load up all the results we already have
search_xwalk_done <- fread(search_xwalk_path)
# merg on results. this way if we want to add more papers in the preivous scrtipt to paper_xwalk
# it will link up seemlessly
# also dont take rank or trend from the done results. This way we can update the google trends
# stuff and not have to redo identical searches
search_xwalk_done <- search_xwalk_done[, -c("n_twit_fol", "twit_rank2", "trend_scaled", "trend_rank")]
merge_by <- intersect(colnames(search_xwalk), colnames(search_xwalk_done))
search_xwalk <- merge(search_xwalk, search_xwalk_done, by = merge_by, all.x = TRUE)
# dont forget to check out this merge
print("dont forget to check out this merge")
}else{
search_xwalk[, results := as.character(NA)]
search_xwalk[, url_used := as.character(NA)]
c[, flag_zero := as.character(NA)]
}
#===========================#
# ==== perform searches ====
#===========================#
# subset paper xwalk to those in the rank we are looking for
#note #fix doing this with twitter rank for now
paper_xwalk <- paper_xwalk[twit_rank2 %in% search_rank, ]
# subset paper xwalk to the years we are going to search
paper_xwalk <- paper_xwalk[year %in% year_search, ]
# now sort the xwalk. The reason I am doing this is so that
# the searcher will complete seraches in the most usefull order
# That is we start with onlt one rank and complete entire years at a time.
# that way get first get a workable year, then a workable time series, then we beef it up with less popular
# papers
setorder(paper_xwalk, twit_rank2, -year, state)
# loop over xwalk of paper websites
# i <- 32
for(i in 1:nrow(paper_xwalk)){
# keep track of year
year_i <- paper_xwalk[i, year]
# keep track of newpaper
newspaper_i <- paper_xwalk[i, newspaper]
# keep track of state
state_i <- paper_xwalk[i, state]
# print out where we are at
print(paste0("starting on paper ", newspaper_i, " from ", state_i, " in ", year_i))
# get site we need
site_i <- paper_xwalk[i, new_website2]
print(site_i)
# see which states still need filling out
to_do <- search_xwalk[newspaper == newspaper_i & year == year_i & is.na(results)]
# if none, skip this paper
if(nrow(to_do) == 0) next()
# check if the state it is in needs to be done
if(state_i %chin% to_do$search_state){
# if so do the search
# get relevent url
emample_url_i <- example_url_l[[as.character(year_i)]]
# first change site
url_i <- gsub("www\\.montgomeryadvertiser\\.com", site_i, emample_url_i)
# check if that is in the link. If not error because there is an issue
if(!grepl("www\\.montgomeryadvertiser\\.com", emample_url_i)) stop("URL does not use www.montgomeryadvertiser.com")
# go to page
page_i <- read_html(url_i)
# wait for a while
wait_l <- (wait_total- wait_range)
wait_u <- (wait_total + wait_range)
wait_time <- runif(1, wait_l,wait_u)
Sys.sleep(wait_time)
# set results xpath
results_xp <- '//*[@id="b_tween"]/span[1]'
# get results node
results_node <- html_nodes(page_i, xpath=results_xp)
# get results text
results_i <- html_text(results_node, trim = TRUE)
# check if results are zero #Note #fix
flag_zero <- FALSE
if(length(results_i)==0){
flag_zero <- TRUE
results_i <- 0
}else{
if(!grepl("Results|Result|result|results", results_i)){
flag_zero <- TRUE
}
}
# put it in the xwalk
search_xwalk[newspaper == newspaper_i & state == search_state & year == year_i, results := results_i]
# put url used in xwalk
search_xwalk[newspaper == newspaper_i & state == search_state & year == year_i, url_used := url_i]
#if we are getting zero results here than mark that this one has an issue and move on.
# also fill in all the subseraches has having an issue as well so we don't waist time searching there.
if(flag_zero){
# mark it as an issue
search_xwalk[ newspaper == newspaper_i & state == state_i & year == year_i, flag_zero := TRUE]
# Now skip to the next loop because we don't want to spend time searching any of these
next()
}# close if flag zero
}# close serach for overal state
# Now loop over all states that still need to be done
# j <- 1
for(j in 1:nrow(to_do)){
# grab state
state_j <- to_do[j, search_state]
print(state_j)
# if it's the same staet as above skip it
if(state_i == state_j) next()
# grab search term and make url
## start if there are no special requirments and just using state name
if(nrow(special_search[state == state_j & year == year_i,]) ==0){
# grab stat name
state_search_j <- gsub(" ", "%20", state_j)
# make URL
emample_url_sub_i <- example_url_sub_l[[as.character(year_i)]]
url_ij <- gsub("www\\.montgomeryadvertiser\\.com", site_i, emample_url_sub_i)
# check if that is in the link. If not error because there is an issue
if(!grepl("www\\.montgomeryadvertiser\\.com", emample_url_sub_i)) stop("URL does not use www.montgomeryadvertiser.com")
url_ij <- gsub("wisconsin", state_search_j, url_ij )
# if this is a case with special search terms #note #fix
}else{
# grab starter URL with terms needed
url_base <- special_search[state == state_j & year == year_i, special_url]
# now just replace the site
url_ij <- gsub("www\\.montgomeryadvertiser\\.com", site_i, url_base)
# check if that is in the link. If not error because there is an issue
if(!grepl("www\\.montgomeryadvertiser\\.com", url_base)) stop("URL does not use www.montgomeryadvertiser.com")
}
# go to page
page_i <- read_html(url_ij)
# wait for a while
wait_l <- (wait_total- wait_range)
wait_u <- (wait_total + wait_range)
wait_time <- runif(1, wait_l,wait_u)
Sys.sleep(wait_time)
# set results xpath
results_xp <- '//*[@id="b_tween"]/span[1]'
# get results node
results_node <- html_nodes(page_i, xpath=results_xp)
# get results text
results_ij <- html_text(results_node, trim = TRUE)
# check if results are zero #Note #fix
flag_zero <- FALSE
if(length(results_ij)==0){
flag_zero <- TRUE
results_ij <- 0
}else{
if(!grepl("Results|Result|result|results", results_ij)){
flag_zero <- TRUE
}
}
# put it in the xwalk
search_xwalk[newspaper == newspaper_i & search_state == state_j & year == year_i, results := results_ij]
# put url used in xwalk
search_xwalk[newspaper == newspaper_i & search_state == state_j & year == year_i, url_used := url_ij]
#if we are getting zero results here than mark that this one has an issue and move on.
# also fill in all the subseraches has having an issue as well so we don't waist time searching there.
if(flag_zero){
# mark it as an issue
search_xwalk[newspaper == newspaper_i & search_state == state_j & year == year_i, flag_zero := TRUE]
}
# close loop over states
}
# save the xwalk in case the system crashes or something. Don't want to lose the progress we have.
ea_save(search_xwalk,
in_val_path = out_path,
in_val_file = "bing_paper_searches.csv",
in_val_timestamp = time_stamp)
# close for loop over paper websites
}
|
#' Render / Convert PDF
#'
#' High quality conversion of pdf page(s) to png, jpeg or tiff format, or render into a
#' raw bitmap array for further processing in R. This functionality is only available if
#' libpoppler was compiled with cairo support.
#'
#' @export
#' @rdname pdf_render_page
#' @param pdf file path or raw vector with pdf data
#' @param page which page to render
#' @param numeric convert raw output to (0-1) real values
#' @param dpi resolution (dots per inch) to render
#' @param opw owner password
#' @param upw user password
#' @family pdftools
#' @aliases render
#' @examples # Rendering should be supported on all platforms now
#' if(poppler_config()$can_render){
#'
#' # convert few pages to png
#' file.copy(file.path(Sys.getenv("R_DOC_DIR"), "NEWS.pdf"), "news.pdf")
#' pdf_convert("news.pdf", pages = 1:3)
#'
#' # render into raw bitmap
#' bitmap <- pdf_render_page("news.pdf")
#'
#' # save to bitmap formats
#' png::writePNG(bitmap, "page.png")
#' jpeg::writeJPEG(bitmap, "page.jpeg")
#' webp::write_webp(bitmap, "page.webp")
#'
#' # Higher quality
#' bitmap <- pdf_render_page("news.pdf", page = 1, dpi = 300)
#' png::writePNG(bitmap, "page.png")
#'
#' # slightly more efficient
#' bitmap_raw <- pdf_render_page("news.pdf", numeric = FALSE)
#' webp::write_webp(bitmap_raw, "page.webp")
#' }
pdf_render_page<- function(pdf, page = 1, dpi = 72, numeric = FALSE, opw = "", upw = "") {
out <- poppler_render_page(loadfile(pdf), page, dpi, opw, upw)
if(identical(dim(out)[1], 4L)){
out <- out[c(3,2,1,4),,, drop = FALSE] ## convert ARGB to RGBA
}
if(isTRUE(numeric)){
out <- structure(as.numeric(out)/255, dim = dim(out))
out <- aperm(out)
} else {
class(out) <- c("bitmap", "rgba")
}
return(out)
}
#' @export
#' @rdname pdf_render_page
#' @param format string with output format such as `"png"` or `"jpeg"`. Must be equal
#' to one of `poppler_config()$supported_image_formats`.
#' @param pages vector with one-based page numbers to render. `NULL` means all pages.
#' @param filenames vector of equal length to `pages` with output filenames. May also be
#' a format string which is expanded using `pages` and `format` respectively.
pdf_convert <- function(pdf, format = "png", pages = NULL, filenames = NULL , dpi = 72, opw = "", upw = ""){
config <- poppler_config()
if(!config$can_render || !length(config$supported_image_formats))
stop("You version of libppoppler does not support rendering")
format <- match.arg(format, poppler_config()$supported_image_formats)
if(is.null(pages))
pages <- seq_len(pdf_info(pdf, opw = opw, upw = upw)$pages)
if(!is.numeric(pages) || !length(pages))
stop("Argument 'pages' must be a one-indexed vector of page numbers")
input <- sub(".pdf", "", basename(pdf), fixed = TRUE)
filenames <- sprintf("%s_%d.%s", input, pages, format)
if(length(filenames) != length(pages))
stop("Length of 'filenames' must be one or equal to 'pages'")
poppler_convert(loadfile(pdf), format, pages, filenames, dpi, opw, upw)
}
#' @export
#' @rdname pdf_render_page
poppler_config <- function(){
get_poppler_config()
}
| /R/render.R | no_license | xtmgah/pdftools | R | false | false | 3,125 | r | #' Render / Convert PDF
#'
#' High quality conversion of pdf page(s) to png, jpeg or tiff format, or render into a
#' raw bitmap array for further processing in R. This functionality is only available if
#' libpoppler was compiled with cairo support.
#'
#' @export
#' @rdname pdf_render_page
#' @param pdf file path or raw vector with pdf data
#' @param page which page to render
#' @param numeric convert raw output to (0-1) real values
#' @param dpi resolution (dots per inch) to render
#' @param opw owner password
#' @param upw user password
#' @family pdftools
#' @aliases render
#' @examples # Rendering should be supported on all platforms now
#' if(poppler_config()$can_render){
#'
#' # convert few pages to png
#' file.copy(file.path(Sys.getenv("R_DOC_DIR"), "NEWS.pdf"), "news.pdf")
#' pdf_convert("news.pdf", pages = 1:3)
#'
#' # render into raw bitmap
#' bitmap <- pdf_render_page("news.pdf")
#'
#' # save to bitmap formats
#' png::writePNG(bitmap, "page.png")
#' jpeg::writeJPEG(bitmap, "page.jpeg")
#' webp::write_webp(bitmap, "page.webp")
#'
#' # Higher quality
#' bitmap <- pdf_render_page("news.pdf", page = 1, dpi = 300)
#' png::writePNG(bitmap, "page.png")
#'
#' # slightly more efficient
#' bitmap_raw <- pdf_render_page("news.pdf", numeric = FALSE)
#' webp::write_webp(bitmap_raw, "page.webp")
#' }
pdf_render_page<- function(pdf, page = 1, dpi = 72, numeric = FALSE, opw = "", upw = "") {
out <- poppler_render_page(loadfile(pdf), page, dpi, opw, upw)
if(identical(dim(out)[1], 4L)){
out <- out[c(3,2,1,4),,, drop = FALSE] ## convert ARGB to RGBA
}
if(isTRUE(numeric)){
out <- structure(as.numeric(out)/255, dim = dim(out))
out <- aperm(out)
} else {
class(out) <- c("bitmap", "rgba")
}
return(out)
}
#' @export
#' @rdname pdf_render_page
#' @param format string with output format such as `"png"` or `"jpeg"`. Must be equal
#' to one of `poppler_config()$supported_image_formats`.
#' @param pages vector with one-based page numbers to render. `NULL` means all pages.
#' @param filenames vector of equal length to `pages` with output filenames. May also be
#' a format string which is expanded using `pages` and `format` respectively.
pdf_convert <- function(pdf, format = "png", pages = NULL, filenames = NULL , dpi = 72, opw = "", upw = ""){
config <- poppler_config()
if(!config$can_render || !length(config$supported_image_formats))
stop("You version of libppoppler does not support rendering")
format <- match.arg(format, poppler_config()$supported_image_formats)
if(is.null(pages))
pages <- seq_len(pdf_info(pdf, opw = opw, upw = upw)$pages)
if(!is.numeric(pages) || !length(pages))
stop("Argument 'pages' must be a one-indexed vector of page numbers")
input <- sub(".pdf", "", basename(pdf), fixed = TRUE)
filenames <- sprintf("%s_%d.%s", input, pages, format)
if(length(filenames) != length(pages))
stop("Length of 'filenames' must be one or equal to 'pages'")
poppler_convert(loadfile(pdf), format, pages, filenames, dpi, opw, upw)
}
#' @export
#' @rdname pdf_render_page
poppler_config <- function(){
get_poppler_config()
}
|
## Part3
#dev.off()
rm(list=ls())
# read data
dtcon <- read.table("Sham.data", header=TRUE)
dtcas <- read.table("PAB.data", header=TRUE)
colnames(dtcon) <- 'x'
colnames(dtcas) <- 'x'
trlabel1 <- rep('control', nrow(dtcon))
trlabel2 <- rep('case', nrow(dtcas))
#stack up data for convenience
dtall <- rbind(dtcon, dtcas)
labelall <- c(trlabel1, trlabel2)
labelallval <- rep(0, length(labelall))
labelallval[labelall == "control"] <- 0
labelallval[labelall == "case"] <- 1
id1 <- (labelallval == 1)
id0 <- (labelallval == 0)
#set the direction for testing
detcasek <- 1
if (mean(dtcon[,1]) > mean(dtcas[,1])) {
detcasek <- 2
}
#prepare threshold values
minv <- min( dtall[,1] )
maxv <- max( dtall[,1] )
TotLen <- 10
threshall <- seq(minv,maxv,length.out=TotLen)
tprall <- rep(0,TotLen) # Sensitivity
fnrall <- rep(0,TotLen)
fprall <- rep(0,TotLen) # 1 - Specificity
for( i in 1:TotLen) {
# to do some implementations here
pred <- ifelse(dtall$x >= threshall[i], 1, 0)
TP <- sum(pred == 1 & labelallval == 1)
FP <- sum(pred == 1 & labelallval == 0)
FN <- sum(pred == 0 & labelallval == 1)
TN <- sum(pred == 0 & labelallval == 0)
tprall[i] <- TP / (TP + FN) # Sensitivity
fprall[i] <- FP / (FP + TN) # 1 - Specificity
fnrall[i] <- FN / (TP + FN)
}
# saving the plot
#show ROC curve
plot(fprall, tprall, type="o", xlim=c(0,1), ylim=c(0,1), xlab="1-specificity", ylab="sensitivity", cex.lab=1.5)
#dev.copy(png,"myROC.png")
#dev.off()
| /R/DataMining/HW2/HW2_Prob_3_Rscript.R | no_license | HwangBoSungHun/Undergraduate_homework | R | false | false | 1,469 | r | ## Part3
#dev.off()
rm(list=ls())
# read data
dtcon <- read.table("Sham.data", header=TRUE)
dtcas <- read.table("PAB.data", header=TRUE)
colnames(dtcon) <- 'x'
colnames(dtcas) <- 'x'
trlabel1 <- rep('control', nrow(dtcon))
trlabel2 <- rep('case', nrow(dtcas))
#stack up data for convenience
dtall <- rbind(dtcon, dtcas)
labelall <- c(trlabel1, trlabel2)
labelallval <- rep(0, length(labelall))
labelallval[labelall == "control"] <- 0
labelallval[labelall == "case"] <- 1
id1 <- (labelallval == 1)
id0 <- (labelallval == 0)
#set the direction for testing
detcasek <- 1
if (mean(dtcon[,1]) > mean(dtcas[,1])) {
detcasek <- 2
}
#prepare threshold values
minv <- min( dtall[,1] )
maxv <- max( dtall[,1] )
TotLen <- 10
threshall <- seq(minv,maxv,length.out=TotLen)
tprall <- rep(0,TotLen) # Sensitivity
fnrall <- rep(0,TotLen)
fprall <- rep(0,TotLen) # 1 - Specificity
for( i in 1:TotLen) {
# to do some implementations here
pred <- ifelse(dtall$x >= threshall[i], 1, 0)
TP <- sum(pred == 1 & labelallval == 1)
FP <- sum(pred == 1 & labelallval == 0)
FN <- sum(pred == 0 & labelallval == 1)
TN <- sum(pred == 0 & labelallval == 0)
tprall[i] <- TP / (TP + FN) # Sensitivity
fprall[i] <- FP / (FP + TN) # 1 - Specificity
fnrall[i] <- FN / (TP + FN)
}
# saving the plot
#show ROC curve
plot(fprall, tprall, type="o", xlim=c(0,1), ylim=c(0,1), xlab="1-specificity", ylab="sensitivity", cex.lab=1.5)
#dev.copy(png,"myROC.png")
#dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/MFO_df.R
\docType{data}
\name{MFO_df}
\alias{MFO_df}
\title{MFO test dataframe}
\format{
An object of class \code{tbl_df} (inherits from \code{tbl}, \code{data.frame}) with 45 rows and 8 columns.
}
\usage{
data(MFO_df)
}
\description{
A dataframe with the results of a test to assess MFO metabolism
}
\section{Variables}{
\describe{
\item{Time}{time test, in minutess}
\item{HR}{heart rate, in beats/min}
\item{VO2}{volume of oxygen consumption, in ml/min}
\item{VCO2}{volume of exhaled carbon dioxide, in ml/min}
\item{RER}{respiratory exchange ratio}
\item{VE}{ventilation, in l/min}
\item{PETCO2}{end-tidal carbondioxide pressure, in mmHg}
}
}
\keyword{datasets}
| /man/MFO_df.Rd | permissive | JorgeDelro/MFO | R | false | true | 746 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/MFO_df.R
\docType{data}
\name{MFO_df}
\alias{MFO_df}
\title{MFO test dataframe}
\format{
An object of class \code{tbl_df} (inherits from \code{tbl}, \code{data.frame}) with 45 rows and 8 columns.
}
\usage{
data(MFO_df)
}
\description{
A dataframe with the results of a test to assess MFO metabolism
}
\section{Variables}{
\describe{
\item{Time}{time test, in minutess}
\item{HR}{heart rate, in beats/min}
\item{VO2}{volume of oxygen consumption, in ml/min}
\item{VCO2}{volume of exhaled carbon dioxide, in ml/min}
\item{RER}{respiratory exchange ratio}
\item{VE}{ventilation, in l/min}
\item{PETCO2}{end-tidal carbondioxide pressure, in mmHg}
}
}
\keyword{datasets}
|
#!/usr/bin/env Rscript
# boxiang liu
# durga
# find the top eqtls
# command args:
args=commandArgs(T,T)
gtex_file=args$input
output_dir=args$outdir
num_sig=as.numeric(args$num)
# gtex_file='/mnt/lab_data/montgomery/shared/datasets/gtex/GTEx_Analysis_2015-01-12/eqtl_updated_annotation/v6_fastQTL_FOR_QC_ONLY/Uterus_Analysis.v6.FOR_QC_ONLY.egenes.txt.gz'
# output_dir='/srv/persistent/bliu2/HCASMC_eQTL/processed_data/160615/compare_hcasmc_and_gtex/'
# num_sig=290
# read input:
message(gtex_file)
options(warn=-1)
gtex=fread(sprintf('zcat %s',gtex_file),header=T)
options(warn=0)
# get tissue name:
tissue=basename(gtex_file)%>%str_replace('_Analysis.v6.FOR_QC_ONLY.egenes.txt.gz','')
# remove sex chromosome:
gtex=gtex[gtex$gene_chr!='X',]
# get top eQTLs:
top_eqtl=gtex[gtex$qval%in%sort(gtex$qval)[1:num_sig],.(variant_id,gene_id)]
chr_pos=str_split_fixed(top_eqtl$variant_id,'_',5)[,c(1,2)]
top_eqtl$chr=chr_pos[,1]
top_eqtl$pos=chr_pos[,2]
setcolorder(top_eqtl,c('chr','pos','gene_id','variant_id'))
top_eqtl$tissue=tissue
# write output:
output_file=paste0(output_dir,'/',tissue,'.txt')
write.table(top_eqtl,file=output_file,quote=F,col.names=F,row.names=F,sep='\t')
| /160615/get_top_eqtls.gtex.R | no_license | chanibravo/hcasmc_eqtl | R | false | false | 1,190 | r | #!/usr/bin/env Rscript
# boxiang liu
# durga
# find the top eqtls
# command args:
args=commandArgs(T,T)
gtex_file=args$input
output_dir=args$outdir
num_sig=as.numeric(args$num)
# gtex_file='/mnt/lab_data/montgomery/shared/datasets/gtex/GTEx_Analysis_2015-01-12/eqtl_updated_annotation/v6_fastQTL_FOR_QC_ONLY/Uterus_Analysis.v6.FOR_QC_ONLY.egenes.txt.gz'
# output_dir='/srv/persistent/bliu2/HCASMC_eQTL/processed_data/160615/compare_hcasmc_and_gtex/'
# num_sig=290
# read input:
message(gtex_file)
options(warn=-1)
gtex=fread(sprintf('zcat %s',gtex_file),header=T)
options(warn=0)
# get tissue name:
tissue=basename(gtex_file)%>%str_replace('_Analysis.v6.FOR_QC_ONLY.egenes.txt.gz','')
# remove sex chromosome:
gtex=gtex[gtex$gene_chr!='X',]
# get top eQTLs:
top_eqtl=gtex[gtex$qval%in%sort(gtex$qval)[1:num_sig],.(variant_id,gene_id)]
chr_pos=str_split_fixed(top_eqtl$variant_id,'_',5)[,c(1,2)]
top_eqtl$chr=chr_pos[,1]
top_eqtl$pos=chr_pos[,2]
setcolorder(top_eqtl,c('chr','pos','gene_id','variant_id'))
top_eqtl$tissue=tissue
# write output:
output_file=paste0(output_dir,'/',tissue,'.txt')
write.table(top_eqtl,file=output_file,quote=F,col.names=F,row.names=F,sep='\t')
|
Prod3 <- function(blk, p, A, B, C, sym=0, nzlistQ=NULL){
checkcell <- c(is.list(A), is.list(B), is.list(B))
if(!is.null(nzlistQ)){
checkcell <- c(checkcell, is.list(nzlistQ))
}else{
nzlistQ <- Inf
}
##
if(any((checkcell - 1) != 0)){
if(blk[[p,1]] == "s"){
len <- nrow(as.matrix(nzlistQ))
len2 <- ncol(as.matrix(nzlistQ))
if(is.null(len)){
len <- 0
}
if(is.null(len2)){
len2 <- 0
}
if(len == 0){
nzlistQ <- Inf
len2 <- 1
}
if(len2 == 1 & any(is.infinite(nzlistQ))){
tmp <- Prod2(blk,p,A,B,0)
Q <- Prod2(blk,p,tmp,C,sym)
}else{
tmp <- Prod2(blk,p,B,C,0)
Q <- mexProd2nz(blk,p,A,tmp,nzlistQ)
if(sym){
Q <- 0.5*(Q + t(Q))
}
}
}else if(blk[[p,1]] == "q" | blk[[p,1]] == "l" | blk[[p,1]] == "u"){
Q <- A * B * C
}
}
return(Q)
} | /R/Prod3.R | no_license | AdamRahman/sdpt3r | R | false | false | 926 | r | Prod3 <- function(blk, p, A, B, C, sym=0, nzlistQ=NULL){
checkcell <- c(is.list(A), is.list(B), is.list(B))
if(!is.null(nzlistQ)){
checkcell <- c(checkcell, is.list(nzlistQ))
}else{
nzlistQ <- Inf
}
##
if(any((checkcell - 1) != 0)){
if(blk[[p,1]] == "s"){
len <- nrow(as.matrix(nzlistQ))
len2 <- ncol(as.matrix(nzlistQ))
if(is.null(len)){
len <- 0
}
if(is.null(len2)){
len2 <- 0
}
if(len == 0){
nzlistQ <- Inf
len2 <- 1
}
if(len2 == 1 & any(is.infinite(nzlistQ))){
tmp <- Prod2(blk,p,A,B,0)
Q <- Prod2(blk,p,tmp,C,sym)
}else{
tmp <- Prod2(blk,p,B,C,0)
Q <- mexProd2nz(blk,p,A,tmp,nzlistQ)
if(sym){
Q <- 0.5*(Q + t(Q))
}
}
}else if(blk[[p,1]] == "q" | blk[[p,1]] == "l" | blk[[p,1]] == "u"){
Q <- A * B * C
}
}
return(Q)
} |
library(Rnightlights)
### Name: getNlTileZipLclNameVIIRS
### Title: Constructs the filename used to save/access the downloaded VIIRS
### tile .tgz file
### Aliases: getNlTileZipLclNameVIIRS
### ** Examples
## Not run:
##D Rnightlights:::getNlTileZipLclNameVIIRS("201401", 1)
##D #returns "./tiles/VIIRS_2014_01_75N180W.tgz"
##D
## End(Not run)
| /data/genthat_extracted_code/Rnightlights/examples/getNlTileZipLclNameVIIRS.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 358 | r | library(Rnightlights)
### Name: getNlTileZipLclNameVIIRS
### Title: Constructs the filename used to save/access the downloaded VIIRS
### tile .tgz file
### Aliases: getNlTileZipLclNameVIIRS
### ** Examples
## Not run:
##D Rnightlights:::getNlTileZipLclNameVIIRS("201401", 1)
##D #returns "./tiles/VIIRS_2014_01_75N180W.tgz"
##D
## End(Not run)
|
xl = c(0,40); yl = c(-50, 0) ##plot limits
load("digit3.dat.rda")
D = digit3.dat
# flatten
X = rbind(D[,1,],D[,2,]) # we treat X as p by n (not n by p)
# white on black
par(bg = 'black',col.main="white")
for (i in 1:30) {
plot(matrix(X[,i],13,2),xlim=xl,ylim=yl,type="l",lwd=5,col="white",as=1,main=paste0("data point: ",i))
Sys.sleep(1/2)
}
# Show landmarks clearly
par(bg = 'white',col.main="black")
for (i in 1:3) {
plot(matrix(X[,i],13,2),xlim=xl,ylim=yl,type="o",as = 1, main="the threes are encoded using landmarks")
abline(h = 0,lwd=5,lty=2)
abline(v = 0,lwd=5,lty=2)
Sys.sleep(3)
}
# Plot the average 3, plus variation around the average.
x.bar = rowMeans(X) # E(3); the "average" 3.
par(bg = 'black', col.main="Blue")
plot(matrix(x.bar,13,2),xlim=xl,ylim=yl,type="l",as = 1,lwd=5,,col="blue", main = "the average 3")
for (i in 1:30) {
lines(matrix(X[,i],13,2),lwd=0.5,col="white")
Sys.sleep(1/2)
}
S = var(t(X)) # if x ~ Normal, then (S,x.bar) contains all information about Number 3s.
## PCA ##
A = eigen(S)$vectors # A: a matrix of principle component loading vectors
# I also called the columns of A "prototypes of variation", "error templates" and a bunch of other things.
# each column of A is a "way" in which a three can deviate from the typical three.
# the first column a1, is the most salient way
# a2 is the second most salient, subject to the restriction that it is orthogonal to a1
# likewise a3, but orthogonal to both a1 and a2 etc...
lambda = eigen(S)$values # corresponding eigenvalues
par(bg = 'white')
plot(100*lambda/sum(lambda),type="o",col="blue", main="looks like we only have 9 effective dimensions")
abline(h=1)
Sys.sleep(5)
## Looks like there are only really 9 ways for 3's to vary.
## Since there could have been 23, this is where the phrase "dimension reduction" comes from.
## If 3's required all 23 dimensions, then they would have no structure, because in this case each
## landmark would be independent of any other (in fact the x and y components of each landmark would also be independent).
plot.pca = function(lambda,a,x.bar,xl,yl,p) {
par(bg = 'black',col.main="green")
hp = round(4*sqrt(lambda))
range = seq(-hp,hp,length=100)
for (z in range) {
three = matrix(x.bar+ z*a,13,2)
plot(three,xlim=xl,ylim=yl,as=1,main=paste0("principle component #",p),col="white",cex=2,lwd=3)
Sys.sleep(1/100)
}
}
# plot the first 9 principle components
for (a in 1:9) {
plot.pca(lambda[a],A[,a],x.bar,xl,yl,a)
Sys.sleep(1/2)
}
# plot them individually
a = 1
plot.pca(lambda[a],A[,a],x.bar,xl,yl,a)
# Removing an aspect from the data.
# a1 is interpreted as location on the main (off) diagonal of the page.
# Let's remove this (non interesting) aspect from the threes by setting Z1 = 0.
j = 1 # change this to 2, to delete a2, 3 to delete a3 etc...
for (i in 1:30) {
my.three.x = X[,i] # get a three
my.three.z = t(A) %*% (my.three.x - x.bar) # X = X.bar + AZ entails Z = A'(X-X.bar)
my.three.z[j] = 0 # set Zj = 0
my.three.x.standardized = x.bar + A %*% my.three.z # now transfrom back
par(bg = 'black',col.main="white")
plot(matrix(my.three.x,13,2),xlim=xl,ylim=yl,type="l",lwd=1,col="white",as=1)
lines(matrix(my.three.x.standardized,13,2),xlim=xl,ylim=yl,lwd=5,col="blue")
Sys.sleep(1/2)
}
| /Week 7/three.R | permissive | christiangilson/GLM-Multivariate | R | false | false | 3,351 | r | xl = c(0,40); yl = c(-50, 0) ##plot limits
load("digit3.dat.rda")
D = digit3.dat
# flatten
X = rbind(D[,1,],D[,2,]) # we treat X as p by n (not n by p)
# white on black
par(bg = 'black',col.main="white")
for (i in 1:30) {
plot(matrix(X[,i],13,2),xlim=xl,ylim=yl,type="l",lwd=5,col="white",as=1,main=paste0("data point: ",i))
Sys.sleep(1/2)
}
# Show landmarks clearly
par(bg = 'white',col.main="black")
for (i in 1:3) {
plot(matrix(X[,i],13,2),xlim=xl,ylim=yl,type="o",as = 1, main="the threes are encoded using landmarks")
abline(h = 0,lwd=5,lty=2)
abline(v = 0,lwd=5,lty=2)
Sys.sleep(3)
}
# Plot the average 3, plus variation around the average.
x.bar = rowMeans(X) # E(3); the "average" 3.
par(bg = 'black', col.main="Blue")
plot(matrix(x.bar,13,2),xlim=xl,ylim=yl,type="l",as = 1,lwd=5,,col="blue", main = "the average 3")
for (i in 1:30) {
lines(matrix(X[,i],13,2),lwd=0.5,col="white")
Sys.sleep(1/2)
}
S = var(t(X)) # if x ~ Normal, then (S,x.bar) contains all information about Number 3s.
## PCA ##
A = eigen(S)$vectors # A: a matrix of principle component loading vectors
# I also called the columns of A "prototypes of variation", "error templates" and a bunch of other things.
# each column of A is a "way" in which a three can deviate from the typical three.
# the first column a1, is the most salient way
# a2 is the second most salient, subject to the restriction that it is orthogonal to a1
# likewise a3, but orthogonal to both a1 and a2 etc...
lambda = eigen(S)$values # corresponding eigenvalues
par(bg = 'white')
plot(100*lambda/sum(lambda),type="o",col="blue", main="looks like we only have 9 effective dimensions")
abline(h=1)
Sys.sleep(5)
## Looks like there are only really 9 ways for 3's to vary.
## Since there could have been 23, this is where the phrase "dimension reduction" comes from.
## If 3's required all 23 dimensions, then they would have no structure, because in this case each
## landmark would be independent of any other (in fact the x and y components of each landmark would also be independent).
plot.pca = function(lambda,a,x.bar,xl,yl,p) {
par(bg = 'black',col.main="green")
hp = round(4*sqrt(lambda))
range = seq(-hp,hp,length=100)
for (z in range) {
three = matrix(x.bar+ z*a,13,2)
plot(three,xlim=xl,ylim=yl,as=1,main=paste0("principle component #",p),col="white",cex=2,lwd=3)
Sys.sleep(1/100)
}
}
# plot the first 9 principle components
for (a in 1:9) {
plot.pca(lambda[a],A[,a],x.bar,xl,yl,a)
Sys.sleep(1/2)
}
# plot them individually
a = 1
plot.pca(lambda[a],A[,a],x.bar,xl,yl,a)
# Removing an aspect from the data.
# a1 is interpreted as location on the main (off) diagonal of the page.
# Let's remove this (non interesting) aspect from the threes by setting Z1 = 0.
j = 1 # change this to 2, to delete a2, 3 to delete a3 etc...
for (i in 1:30) {
my.three.x = X[,i] # get a three
my.three.z = t(A) %*% (my.three.x - x.bar) # X = X.bar + AZ entails Z = A'(X-X.bar)
my.three.z[j] = 0 # set Zj = 0
my.three.x.standardized = x.bar + A %*% my.three.z # now transfrom back
par(bg = 'black',col.main="white")
plot(matrix(my.three.x,13,2),xlim=xl,ylim=yl,type="l",lwd=1,col="white",as=1)
lines(matrix(my.three.x.standardized,13,2),xlim=xl,ylim=yl,lwd=5,col="blue")
Sys.sleep(1/2)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rate_conversion.R
\name{prob_to_prob}
\alias{prob_to_prob}
\title{Convert a probability to a probability with a different frequency}
\usage{
prob_to_prob(p, t = 1)
}
\arguments{
\item{p}{probability}
\item{t}{time/ frequency}
}
\value{
a scalar or vector of probabilities converted to a different frequency
}
\description{
\code{rate_to_prob} convert a probability to a probability with a different frequency.
}
\examples{
# Annual probability to monthly probability
p_year <- 0.3
p_month <- prob_to_prob(p = p_year, t = 1/12)
p_month
}
| /man/prob_to_prob.Rd | permissive | Dawei-Zhu85/darthtools | R | false | true | 617 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rate_conversion.R
\name{prob_to_prob}
\alias{prob_to_prob}
\title{Convert a probability to a probability with a different frequency}
\usage{
prob_to_prob(p, t = 1)
}
\arguments{
\item{p}{probability}
\item{t}{time/ frequency}
}
\value{
a scalar or vector of probabilities converted to a different frequency
}
\description{
\code{rate_to_prob} convert a probability to a probability with a different frequency.
}
\examples{
# Annual probability to monthly probability
p_year <- 0.3
p_month <- prob_to_prob(p = p_year, t = 1/12)
p_month
}
|
tab_data_ui <- function(id) {
ns <- shiny::NS(id)
shiny::fluidRow(
shinydashboard::box(
width = 12,
title = htmltools::tagList(
"Data",
help_button(ns("help_data"))
),
status = "primary",
explorer_ui(
id = ns("id_explorer")
)
)
)
}
tab_data <- function(
input, output, session, .values
) {
ns <- session$ns
shiny::observeEvent(input$help_data, {
.values$help$open("data")
})
shiny::callModule(
module = explorer,
id = "id_explorer",
.values = .values,
.root_node_r = shiny::reactive(.values$tree$get_root_node()),
.explorer_classes = .values$explorer_classes,
addable_r = shiny::reactive("__group__"),
visible_r = shiny::reactive(c("__group__", "dataset")),
.label_list = shinyExplorer::label_explorer(
add_group = "New folder"
)
)
} | /modules/Data/tab_data.R | no_license | DavidBarke/shinyplyr | R | false | false | 878 | r | tab_data_ui <- function(id) {
ns <- shiny::NS(id)
shiny::fluidRow(
shinydashboard::box(
width = 12,
title = htmltools::tagList(
"Data",
help_button(ns("help_data"))
),
status = "primary",
explorer_ui(
id = ns("id_explorer")
)
)
)
}
tab_data <- function(
input, output, session, .values
) {
ns <- session$ns
shiny::observeEvent(input$help_data, {
.values$help$open("data")
})
shiny::callModule(
module = explorer,
id = "id_explorer",
.values = .values,
.root_node_r = shiny::reactive(.values$tree$get_root_node()),
.explorer_classes = .values$explorer_classes,
addable_r = shiny::reactive("__group__"),
visible_r = shiny::reactive(c("__group__", "dataset")),
.label_list = shinyExplorer::label_explorer(
add_group = "New folder"
)
)
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pairs.R
\name{pairs.copuladata}
\alias{pairs.copuladata}
\title{Pairs Plot of Copula Data}
\usage{
\method{pairs}{copuladata}(x, labels = names(x), ...,
lower.panel = lp.copuladata, upper.panel = up.copuladata,
diag.panel = dp.copuladata, label.pos = 0.85, cex.labels = 1, gap = 0,
method = "kendall", ccols = terrain.colors(30), margins = "norm",
margins.par = 0)
}
\arguments{
\item{x}{\code{copuladata} object.}
\item{labels}{variable names/labels.}
\item{lower.panel}{panel function to be used on the lower diagonal panels
(if not supplied, a default function is used)}
\item{upper.panel}{panel function to be used on the upper diagonal panels
(if not supplied, a default function is used)}
\item{diag.panel}{panel function to be used on the diagonal panels (if not
supplied, a default function is used)}
\item{label.pos}{y position of labels in the diagonal panel; default:
\code{label.pos = 0.85}.}
\item{cex.labels}{magnification to be used for the labels of the diagonal
panel; default: \code{cex.labels = 1}.}
\item{gap}{distance between subplots, in margin lines; default: \code{gap =
0}.}
\item{method}{a character string indicating which correlation coefficients
are computed. One of \code{"pearson"}, \code{"kendall"} (default), or
\code{"spearman"}}
\item{ccols}{colour to be used for the contour plots; default: \code{ccols =
terrain.colors(30)}.}
\item{margins}{character; margins for the contour plots. Possible margins
are:\cr \code{"norm"} = standard normal margins (default)\cr \code{"t"} =
Student t margins with degrees of freedom as specified by
\code{margins.par}\cr \code{"gamma"} = Gamma margins with shape and scale as
specified by \code{margins.par}\cr \code{"exp"} = Exponential margins with
rate as specified by \code{margins.par}\cr \code{"unif"} = uniform margins}
\item{margins.par}{parameter(s) of the distribution of the margins (of the
contour plots) if necessary (default: \code{margins.par = 0}), i.e.,
\itemize{ \item a positive real number for the degrees of freedom of Student
t margins (see \code{\link{dt}}), \item a 2-dimensional vector of positive
real numbers for the shape and scale parameters of Gamma margins (see
\code{\link{dgamma}}), \item a positive real number for the rate parameter
of exponential margins (see \code{\link{dexp}}). }}
\item{\dots}{other graphical parameters (see \code{\link[graphics]{par}}).}
}
\description{
This function provides pair plots for copula data. Using default setting it
plots bivariate contour plots on the lower panel, scatter plots and
correlations on the upper panel and histograms on the diagonal panel.
}
\note{
If the default panel functions are used \cr \itemize{ \item \code{col}
changes only the colour of the points in the scatter plot
(\code{upper.panel}) \cr \item \code{cex} changes only the magnification of
the points in the scatter plot (\code{upper.panel}) }
}
\examples{
data(daxreturns)
data <- as.copuladata(daxreturns)
sel <- c(4,5,14,15)
## pairs plot with default settings
pairs(data[sel])
## pairs plot with custom settings
nlevels <- 20
pairs(data[sel], cex = 2, pch = 1, col = "black",
diag.panel = NULL, label.pos = 0.5,
cex.labels = 2.5, gap = 1,
method = "pearson", ccols = heat.colors(nlevels),
margins = "gamma", margins.par = c(1,1))
## pairs plot with own panel functions
up <- function(x, y) {
# upper panel: empirical contour plot
op <- par(usr = c(-3, 3, -3, 3), new = TRUE)
BiCopMetaContour(x, y, bw = 2, levels = c(0.01, 0.05, 0.1, 0.15, 0.2),
# exponential margins
margins = "exp", margins.par = 1,
axes = FALSE)
on.exit(par(op))
}
lp <- function(x, y) {
# lower panel: scatter plot (copula data) and correlation
op <- par(usr = c(0, 1, 0, 1), new = TRUE)
points(x, y, pch = 1, col = "black")
r <- cor(x, y, method = "spearman") # Spearman's rho
txt <- format(x = r, digits = 3, nsmall = 3)[1]
text(x = 0.5, y = 0.5, labels = txt, cex = 1 + abs(r) * 2, col = "blue")
on.exit(par(op))
}
dp <- function(x) {
# diagonal panel: histograms (copula data)
op <- par(usr = c(0, 1, 0, 1.5), new = TRUE)
hist(x, freq = FALSE, add = TRUE, col = "brown", border = "black", main = "")
abline(h = 1, col = "black", lty = 2)
on.exit(par(op))
}
nlevels <- 20
pairs(data[sel],
lower.panel = lp, upper.panel = up, diag.panel = dp, gap = 0.5)
}
\author{
Tobias Erhardt
}
\seealso{
\code{\link[graphics]{pairs}}, \code{\link{as.copuladata}},
\code{\link{BiCopMetaContour}}
}
| /man/pairs.copuladata.Rd | no_license | ulf85/VineCopula | R | false | true | 4,619 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pairs.R
\name{pairs.copuladata}
\alias{pairs.copuladata}
\title{Pairs Plot of Copula Data}
\usage{
\method{pairs}{copuladata}(x, labels = names(x), ...,
lower.panel = lp.copuladata, upper.panel = up.copuladata,
diag.panel = dp.copuladata, label.pos = 0.85, cex.labels = 1, gap = 0,
method = "kendall", ccols = terrain.colors(30), margins = "norm",
margins.par = 0)
}
\arguments{
\item{x}{\code{copuladata} object.}
\item{labels}{variable names/labels.}
\item{lower.panel}{panel function to be used on the lower diagonal panels
(if not supplied, a default function is used)}
\item{upper.panel}{panel function to be used on the upper diagonal panels
(if not supplied, a default function is used)}
\item{diag.panel}{panel function to be used on the diagonal panels (if not
supplied, a default function is used)}
\item{label.pos}{y position of labels in the diagonal panel; default:
\code{label.pos = 0.85}.}
\item{cex.labels}{magnification to be used for the labels of the diagonal
panel; default: \code{cex.labels = 1}.}
\item{gap}{distance between subplots, in margin lines; default: \code{gap =
0}.}
\item{method}{a character string indicating which correlation coefficients
are computed. One of \code{"pearson"}, \code{"kendall"} (default), or
\code{"spearman"}}
\item{ccols}{colour to be used for the contour plots; default: \code{ccols =
terrain.colors(30)}.}
\item{margins}{character; margins for the contour plots. Possible margins
are:\cr \code{"norm"} = standard normal margins (default)\cr \code{"t"} =
Student t margins with degrees of freedom as specified by
\code{margins.par}\cr \code{"gamma"} = Gamma margins with shape and scale as
specified by \code{margins.par}\cr \code{"exp"} = Exponential margins with
rate as specified by \code{margins.par}\cr \code{"unif"} = uniform margins}
\item{margins.par}{parameter(s) of the distribution of the margins (of the
contour plots) if necessary (default: \code{margins.par = 0}), i.e.,
\itemize{ \item a positive real number for the degrees of freedom of Student
t margins (see \code{\link{dt}}), \item a 2-dimensional vector of positive
real numbers for the shape and scale parameters of Gamma margins (see
\code{\link{dgamma}}), \item a positive real number for the rate parameter
of exponential margins (see \code{\link{dexp}}). }}
\item{\dots}{other graphical parameters (see \code{\link[graphics]{par}}).}
}
\description{
This function provides pair plots for copula data. Using default setting it
plots bivariate contour plots on the lower panel, scatter plots and
correlations on the upper panel and histograms on the diagonal panel.
}
\note{
If the default panel functions are used \cr \itemize{ \item \code{col}
changes only the colour of the points in the scatter plot
(\code{upper.panel}) \cr \item \code{cex} changes only the magnification of
the points in the scatter plot (\code{upper.panel}) }
}
\examples{
data(daxreturns)
data <- as.copuladata(daxreturns)
sel <- c(4,5,14,15)
## pairs plot with default settings
pairs(data[sel])
## pairs plot with custom settings
nlevels <- 20
pairs(data[sel], cex = 2, pch = 1, col = "black",
diag.panel = NULL, label.pos = 0.5,
cex.labels = 2.5, gap = 1,
method = "pearson", ccols = heat.colors(nlevels),
margins = "gamma", margins.par = c(1,1))
## pairs plot with own panel functions
up <- function(x, y) {
# upper panel: empirical contour plot
op <- par(usr = c(-3, 3, -3, 3), new = TRUE)
BiCopMetaContour(x, y, bw = 2, levels = c(0.01, 0.05, 0.1, 0.15, 0.2),
# exponential margins
margins = "exp", margins.par = 1,
axes = FALSE)
on.exit(par(op))
}
lp <- function(x, y) {
# lower panel: scatter plot (copula data) and correlation
op <- par(usr = c(0, 1, 0, 1), new = TRUE)
points(x, y, pch = 1, col = "black")
r <- cor(x, y, method = "spearman") # Spearman's rho
txt <- format(x = r, digits = 3, nsmall = 3)[1]
text(x = 0.5, y = 0.5, labels = txt, cex = 1 + abs(r) * 2, col = "blue")
on.exit(par(op))
}
dp <- function(x) {
# diagonal panel: histograms (copula data)
op <- par(usr = c(0, 1, 0, 1.5), new = TRUE)
hist(x, freq = FALSE, add = TRUE, col = "brown", border = "black", main = "")
abline(h = 1, col = "black", lty = 2)
on.exit(par(op))
}
nlevels <- 20
pairs(data[sel],
lower.panel = lp, upper.panel = up, diag.panel = dp, gap = 0.5)
}
\author{
Tobias Erhardt
}
\seealso{
\code{\link[graphics]{pairs}}, \code{\link{as.copuladata}},
\code{\link{BiCopMetaContour}}
}
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/diffnet.R
\name{error.bars}
\alias{error.bars}
\title{Error bars for plotCV}
\usage{
error.bars(x, upper, lower, width = 0.02, ...)
}
\arguments{
\item{x}{no descr}
\item{upper}{no descr}
\item{lower}{no descr}
\item{width}{no descr}
\item{...}{no descr}
}
\value{
no descr
}
\description{
Error bars for plotCV
}
\author{
n.stadler
}
\keyword{internal}
| /man/error.bars.Rd | no_license | FrankD/NetHet_old | R | false | false | 445 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/diffnet.R
\name{error.bars}
\alias{error.bars}
\title{Error bars for plotCV}
\usage{
error.bars(x, upper, lower, width = 0.02, ...)
}
\arguments{
\item{x}{no descr}
\item{upper}{no descr}
\item{lower}{no descr}
\item{width}{no descr}
\item{...}{no descr}
}
\value{
no descr
}
\description{
Error bars for plotCV
}
\author{
n.stadler
}
\keyword{internal}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SimSmoother.R
\name{SimSmoother}
\alias{SimSmoother}
\title{Generating Random Samples using the Simulation Smoother}
\usage{
SimSmoother(object, nsim = 1, components = TRUE)
}
\arguments{
\item{object}{A statespacer object as returned by \code{\link{statespacer}}.}
\item{nsim}{Number of random samples to draw. Defaults to \code{1}.}
\item{components}{Boolean indicating whether the components of
the model should be extracted in each of the random samples.}
}
\value{
A list containing the simulated state parameters and disturbances.
In addition, it returns the components as specified by the State Space model
if \code{components = TRUE}. Each of the objects are arrays, where the first
dimension equals the number of time points, the second dimension the number
of state parameters, disturbances, or dependent variables, and the third
dimension equals the number of random samples \code{nsim}.
}
\description{
Draws random samples of the specified model conditional
on the observed data.
}
\examples{
# Fits a local level model for the Nile data
library(datasets)
y <- matrix(Nile)
fit <- statespacer(initial = 10, y = y, local_level_ind = TRUE)
# Obtain random sample using the fitted model
sim <- SimSmoother(fit, nsim = 1, components = TRUE)
# Plot the simulated level against the smoothed level of the original data
plot(sim$level[, 1, 1], type = 'p')
lines(fit$smoothed$level, type = 'l')
}
\author{
Dylan Beijers, \email{dylanbeijers@gmail.com}
}
| /man/SimSmoother.Rd | permissive | DylanB95/statespacer | R | false | true | 1,540 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SimSmoother.R
\name{SimSmoother}
\alias{SimSmoother}
\title{Generating Random Samples using the Simulation Smoother}
\usage{
SimSmoother(object, nsim = 1, components = TRUE)
}
\arguments{
\item{object}{A statespacer object as returned by \code{\link{statespacer}}.}
\item{nsim}{Number of random samples to draw. Defaults to \code{1}.}
\item{components}{Boolean indicating whether the components of
the model should be extracted in each of the random samples.}
}
\value{
A list containing the simulated state parameters and disturbances.
In addition, it returns the components as specified by the State Space model
if \code{components = TRUE}. Each of the objects are arrays, where the first
dimension equals the number of time points, the second dimension the number
of state parameters, disturbances, or dependent variables, and the third
dimension equals the number of random samples \code{nsim}.
}
\description{
Draws random samples of the specified model conditional
on the observed data.
}
\examples{
# Fits a local level model for the Nile data
library(datasets)
y <- matrix(Nile)
fit <- statespacer(initial = 10, y = y, local_level_ind = TRUE)
# Obtain random sample using the fitted model
sim <- SimSmoother(fit, nsim = 1, components = TRUE)
# Plot the simulated level against the smoothed level of the original data
plot(sim$level[, 1, 1], type = 'p')
lines(fit$smoothed$level, type = 'l')
}
\author{
Dylan Beijers, \email{dylanbeijers@gmail.com}
}
|
library(MasterBayes)
### Name: tunePed
### Title: tunePed Object
### Aliases: tunePed is.tunePed
### Keywords: classes
### ** Examples
## Not run:
##D data(WarblerG)
##D A<-extractA(WarblerG)
##D
##D ped<-matrix(NA, 100,3)
##D ped[,1]<-1:100
##D
##D G<-simgenotypes(A, ped=ped, E1=0.1, E2=0.001, no_dup=2)
##D GdP<-GdataPed(G=G$Gobs, id=G$id)
##D
##D model1<-MCMCped(GdP=GdP, nitt=1500, thin=1, burnin=500)
##D
##D # The proposal distribution is to conservative for E1
##D # and the update is accepted about 70% of the time
##D
##D plot(model1$E1)
##D autocorr(model1$E1)
##D
##D # Succesive samples from the posterior distribution are
##D # strongly autocorrelated. Should of course run the chain
##D # for longer with a larger thinning interval, but a greater
##D # tuning parameter helps (now 3e-4, rather than 3e-5):
##D
##D model2<-MCMCped(GdP=GdP, tP=tunePed(E1=10), nitt=1500,
##D thin=1, burnin=500)
##D
##D plot(model2$E1)
##D autocorr(model2$E1)
## End(Not run)
| /data/genthat_extracted_code/MasterBayes/examples/tunePed.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 993 | r | library(MasterBayes)
### Name: tunePed
### Title: tunePed Object
### Aliases: tunePed is.tunePed
### Keywords: classes
### ** Examples
## Not run:
##D data(WarblerG)
##D A<-extractA(WarblerG)
##D
##D ped<-matrix(NA, 100,3)
##D ped[,1]<-1:100
##D
##D G<-simgenotypes(A, ped=ped, E1=0.1, E2=0.001, no_dup=2)
##D GdP<-GdataPed(G=G$Gobs, id=G$id)
##D
##D model1<-MCMCped(GdP=GdP, nitt=1500, thin=1, burnin=500)
##D
##D # The proposal distribution is to conservative for E1
##D # and the update is accepted about 70% of the time
##D
##D plot(model1$E1)
##D autocorr(model1$E1)
##D
##D # Succesive samples from the posterior distribution are
##D # strongly autocorrelated. Should of course run the chain
##D # for longer with a larger thinning interval, but a greater
##D # tuning parameter helps (now 3e-4, rather than 3e-5):
##D
##D model2<-MCMCped(GdP=GdP, tP=tunePed(E1=10), nitt=1500,
##D thin=1, burnin=500)
##D
##D plot(model2$E1)
##D autocorr(model2$E1)
## End(Not run)
|
setwd("C:/Users/dark_/Documents/NDRN/CAP_Appropriations")
source('workload_calc.R')
#exploration of analysis for d_1, d_2, d_7
#Very initial look at favorable closes per year. Appears to be some outliers.
#2016 has a pretty big spread but appears to not matter
ggplot(data = combined_data)+
aes(x = factor(Year),
y = f_close_per_work
) +
geom_boxplot()
#national trend is trending downwards, but at a rate that seems insignificant
summary(lm(d_7_per_work ~ Year, combined_data))
plot_favorable_time <- function(df){
plot <- ggplot(data = df)+
aes(x = factor(Year),
y = f_close_per_work,
label = State
) +
geom_text()
return(plot)
}
plot_vs <- function(df){
plot <- ggplot(data = df) +
aes(x = f_close_per_work,
y = d_7_per_work,
label = State,
color = factor(Year)) +
geom_text() +
scale_color_brewer(palette = 'Dark2')
return(plot)
}
combined_data %>% filter(pop_cat == 7) %>% plot_vs()
combined_data %>% plot_favorable_time()
#the data is disproportionaly skewed towards having 0 unfavorable closes
ggplot(data = combined_data #%>% filter(d_7_per_work != 0)
)+
aes(x = d_7_per_work,
fill = factor(pop_cat)) +
geom_histogram(position = 'stack', binwidth = .01) +
scale_fill_hue(h = c(0,270))
#filtering out zero ratios
ggplot(data = combined_data %>% filter(d_7_per_work != 0)
)+
aes(x = d_7_per_work,
fill = factor(fund_cat)) +
geom_histogram(position = 'stack', binwidth = .01) +
scale_fill_hue(h = c(0,270))
ggplot(data = combined_data %>% filter(d_7_per_work != 0)
,
aes(x = d_7_per_work)) +
geom_dotplot(dotsize = .4,
binwidth = max(combined_data$d_7_per_work)/30
)
combined_data %>% filter(d_7_per_work != 0)
plot2 <- function(df){
plot <- ggplot(data = df) +
aes(x = Year,
y = d_7_per_pop,
label = State) +
geom_text()
return(plot)
}
plot2(combined_data %>% filter(pop_cat == 7))
range(combined_data$d_7_per_pop)
hist(combined_data$d_7_per_pop)
density(combined_data$d_7_per_pop)
ggplot(data = combined_data %>% filter(pop_cat == 7)) +
aes(x = d_7_per_pop) +
geom_density()
#shows that the d_7 outliers appear to correlate strongly by population category
ggplot(data = combined_data ) +
aes(x = factor(pop_cat),
y = d_7_per_pop) +
geom_boxplot()
median(combined_data$d_7_per_pop)
median()
combined_data %>% filter(State == 'District of Columbia') %>% View()
sd()
test <-
combined_data %>%
group_by(State) %>%
summarise(p_value_d_7 = regression_calc(Year, d_7_not_favor, 8),
trend = regression_calc(Year, d_7_not_favor, 2),
p_value_d_7_per_pop = regression_calc(Year, d_7_per_pop, 8),
trend_inc_pop = regression_calc(Year, d_7_per_pop, 2)
) %>% filter(p_value_d_7 <= .05) %>%
select(State, trend, trend_inc_pop)
d_7_stats %>% arrange(mean_rank, desc(pop_cat)) %>% View()
summary(lm(combined_data$d_7_not_favor ~ combined_data$pop))
regression_calc(combined_data$pop, combined_data$d_7_not_favor, 2)
ggplot(data = combined_data %>% filter(pop_cat < 4) ) +
aes(x = pop,
y = d_7_not_favor,
label = State) +
geom_text() +
geom_smooth()
regression_calc(as.numeric(d_7_stats$pop_cat), d_7_stats$mean, 2)
summary(lm(d_7_stats$mean ~ as.numeric(d_7_stats$pop_cat)))
cor(as.numeric(d_7_stats$pop_cat), d_7_stats$mean)
d <- density(combined_data$d_7_per_pop)
library(MESS)
auc(d[["x"]][1:23], d[["y"]][1:23], type = 'spline')
combined_data %>%
filter(State == 'Montana') %>%
select(-disab_16, -disab_17, -network_status, -agency_name, -compare_cat_18,
-c_5_informal_review, -c_6_formal_review, -c_7_legal, c_5_per_work,
c_6_per_work, c_7_per_work) %>%
select(State, Year, pop_cat, d_7_per_pop) %>%
View()
P <- .10
helper_regression <- function(x, y, value, p){
#passing in a value of 8 will return the P value if the result is less than p
#passing in a value of 2 will return the slope if the p value is less than p
r <- if_else(regression_calc(x, y, 8) < p,
regression_calc(x, y, value),
NaN)
return(r)
}
regress_df <- function(df){
df <- df %>%
summarise(work_pop_p = helper_regression(Year, work_per_pop, 8, P),
work_pop_lm = helper_regression(Year, work_per_pop, 2, P),
f_pop_p = helper_regression(Year, f_close_per_pop, 8, P),
f_pop_lm = helper_regression(Year, f_close_per_pop, 2, P),
f_work_p = helper_regression(Year, f_close_per_work, 8, P),
f_work_lm = helper_regression(Year, f_close_per_work, 2, P),
d_7_pop_p = helper_regression(Year, d_7_per_pop, 8, P),
d_7_pop_lm = helper_regression(Year, d_7_per_pop, 2, P),
d_7_work_p = helper_regression(Year, d_7_per_work, 8, P),
d_7_work_lm = helper_regression(Year, d_7_per_work, 2, P),
c_5_pop_p = helper_regression(Year, c_5_per_pop, 8, P),
c_5_pop_lm = helper_regression(Year, c_5_per_pop, 2, P),
c_5_work_p = helper_regression(Year, c_5_per_work, 8, P),
c_5_work_lm = helper_regression(Year, c_5_per_work, 2, P),
c_6_pop_p = helper_regression(Year, c_6_per_pop, 8, P),
c_6_pop_lm = helper_regression(Year, c_6_per_pop, 2, P),
c_6_work_p = helper_regression(Year, c_6_per_work, 8, P),
c_6_work_lm = helper_regression(Year, c_6_per_work, 2, P),
c_7_pop_p = helper_regression(Year, c_7_per_pop, 8, P),
c_7_pop_lm = helper_regression(Year, c_7_per_pop, 2, P),
c_7_work_p = helper_regression(Year, c_7_per_work, 8, P),
c_7_work_lm = helper_regression(Year, c_7_per_work, 2, P)
)
return(df)
}
group_PA <-
combined_data %>%
group_by(is_PA) %>%
regress_df() %>%
mutate(agg_grp = 2)
national <-
combined_data %>%
regress_df() %>%
mutate(agg_grp = 1)
group_pop_cat <-
combined_data %>%
group_by(pop_cat) %>%
regress_df() %>%
mutate(agg_grp = 3)
group_PA_pop_cat <-
combined_data %>%
group_by(pop_cat, is_PA) %>%
regress_df() %>%
mutate(agg_grp = 4)
group_state <-
combined_data %>%
group_by(State, is_PA) %>%
regress_df() %>%
mutate(agg_grp = 5)
test <- full_join(group_PA, national)
test <- full_join(group_pop_cat, test)
test <- full_join(group_PA_pop_cat, test)
test <- full_join(group_state, test)
write.csv(test, 'comparative linear regression10.csv')
?cor
cor(combined_data$pop, combined_data$d_7_not_favor)
lm(combined_data$d_7_not_favor ~ combined_data$pop, method = 'pearson')
?lm
summary(combined_data)
dc <- combined_data$
ggcorr(combined_data)
| /section_d_exploration.R | no_license | bmaxwell99/CAP_Appropriations | R | false | false | 6,742 | r | setwd("C:/Users/dark_/Documents/NDRN/CAP_Appropriations")
source('workload_calc.R')
#exploration of analysis for d_1, d_2, d_7
#Very initial look at favorable closes per year. Appears to be some outliers.
#2016 has a pretty big spread but appears to not matter
ggplot(data = combined_data)+
aes(x = factor(Year),
y = f_close_per_work
) +
geom_boxplot()
#national trend is trending downwards, but at a rate that seems insignificant
summary(lm(d_7_per_work ~ Year, combined_data))
plot_favorable_time <- function(df){
plot <- ggplot(data = df)+
aes(x = factor(Year),
y = f_close_per_work,
label = State
) +
geom_text()
return(plot)
}
plot_vs <- function(df){
plot <- ggplot(data = df) +
aes(x = f_close_per_work,
y = d_7_per_work,
label = State,
color = factor(Year)) +
geom_text() +
scale_color_brewer(palette = 'Dark2')
return(plot)
}
combined_data %>% filter(pop_cat == 7) %>% plot_vs()
combined_data %>% plot_favorable_time()
#the data is disproportionaly skewed towards having 0 unfavorable closes
ggplot(data = combined_data #%>% filter(d_7_per_work != 0)
)+
aes(x = d_7_per_work,
fill = factor(pop_cat)) +
geom_histogram(position = 'stack', binwidth = .01) +
scale_fill_hue(h = c(0,270))
#filtering out zero ratios
ggplot(data = combined_data %>% filter(d_7_per_work != 0)
)+
aes(x = d_7_per_work,
fill = factor(fund_cat)) +
geom_histogram(position = 'stack', binwidth = .01) +
scale_fill_hue(h = c(0,270))
ggplot(data = combined_data %>% filter(d_7_per_work != 0)
,
aes(x = d_7_per_work)) +
geom_dotplot(dotsize = .4,
binwidth = max(combined_data$d_7_per_work)/30
)
combined_data %>% filter(d_7_per_work != 0)
plot2 <- function(df){
plot <- ggplot(data = df) +
aes(x = Year,
y = d_7_per_pop,
label = State) +
geom_text()
return(plot)
}
plot2(combined_data %>% filter(pop_cat == 7))
range(combined_data$d_7_per_pop)
hist(combined_data$d_7_per_pop)
density(combined_data$d_7_per_pop)
ggplot(data = combined_data %>% filter(pop_cat == 7)) +
aes(x = d_7_per_pop) +
geom_density()
#shows that the d_7 outliers appear to correlate strongly by population category
ggplot(data = combined_data ) +
aes(x = factor(pop_cat),
y = d_7_per_pop) +
geom_boxplot()
median(combined_data$d_7_per_pop)
median()
combined_data %>% filter(State == 'District of Columbia') %>% View()
sd()
test <-
combined_data %>%
group_by(State) %>%
summarise(p_value_d_7 = regression_calc(Year, d_7_not_favor, 8),
trend = regression_calc(Year, d_7_not_favor, 2),
p_value_d_7_per_pop = regression_calc(Year, d_7_per_pop, 8),
trend_inc_pop = regression_calc(Year, d_7_per_pop, 2)
) %>% filter(p_value_d_7 <= .05) %>%
select(State, trend, trend_inc_pop)
d_7_stats %>% arrange(mean_rank, desc(pop_cat)) %>% View()
summary(lm(combined_data$d_7_not_favor ~ combined_data$pop))
regression_calc(combined_data$pop, combined_data$d_7_not_favor, 2)
ggplot(data = combined_data %>% filter(pop_cat < 4) ) +
aes(x = pop,
y = d_7_not_favor,
label = State) +
geom_text() +
geom_smooth()
regression_calc(as.numeric(d_7_stats$pop_cat), d_7_stats$mean, 2)
summary(lm(d_7_stats$mean ~ as.numeric(d_7_stats$pop_cat)))
cor(as.numeric(d_7_stats$pop_cat), d_7_stats$mean)
d <- density(combined_data$d_7_per_pop)
library(MESS)
auc(d[["x"]][1:23], d[["y"]][1:23], type = 'spline')
combined_data %>%
filter(State == 'Montana') %>%
select(-disab_16, -disab_17, -network_status, -agency_name, -compare_cat_18,
-c_5_informal_review, -c_6_formal_review, -c_7_legal, c_5_per_work,
c_6_per_work, c_7_per_work) %>%
select(State, Year, pop_cat, d_7_per_pop) %>%
View()
P <- .10
helper_regression <- function(x, y, value, p){
#passing in a value of 8 will return the P value if the result is less than p
#passing in a value of 2 will return the slope if the p value is less than p
r <- if_else(regression_calc(x, y, 8) < p,
regression_calc(x, y, value),
NaN)
return(r)
}
regress_df <- function(df){
df <- df %>%
summarise(work_pop_p = helper_regression(Year, work_per_pop, 8, P),
work_pop_lm = helper_regression(Year, work_per_pop, 2, P),
f_pop_p = helper_regression(Year, f_close_per_pop, 8, P),
f_pop_lm = helper_regression(Year, f_close_per_pop, 2, P),
f_work_p = helper_regression(Year, f_close_per_work, 8, P),
f_work_lm = helper_regression(Year, f_close_per_work, 2, P),
d_7_pop_p = helper_regression(Year, d_7_per_pop, 8, P),
d_7_pop_lm = helper_regression(Year, d_7_per_pop, 2, P),
d_7_work_p = helper_regression(Year, d_7_per_work, 8, P),
d_7_work_lm = helper_regression(Year, d_7_per_work, 2, P),
c_5_pop_p = helper_regression(Year, c_5_per_pop, 8, P),
c_5_pop_lm = helper_regression(Year, c_5_per_pop, 2, P),
c_5_work_p = helper_regression(Year, c_5_per_work, 8, P),
c_5_work_lm = helper_regression(Year, c_5_per_work, 2, P),
c_6_pop_p = helper_regression(Year, c_6_per_pop, 8, P),
c_6_pop_lm = helper_regression(Year, c_6_per_pop, 2, P),
c_6_work_p = helper_regression(Year, c_6_per_work, 8, P),
c_6_work_lm = helper_regression(Year, c_6_per_work, 2, P),
c_7_pop_p = helper_regression(Year, c_7_per_pop, 8, P),
c_7_pop_lm = helper_regression(Year, c_7_per_pop, 2, P),
c_7_work_p = helper_regression(Year, c_7_per_work, 8, P),
c_7_work_lm = helper_regression(Year, c_7_per_work, 2, P)
)
return(df)
}
group_PA <-
combined_data %>%
group_by(is_PA) %>%
regress_df() %>%
mutate(agg_grp = 2)
national <-
combined_data %>%
regress_df() %>%
mutate(agg_grp = 1)
group_pop_cat <-
combined_data %>%
group_by(pop_cat) %>%
regress_df() %>%
mutate(agg_grp = 3)
group_PA_pop_cat <-
combined_data %>%
group_by(pop_cat, is_PA) %>%
regress_df() %>%
mutate(agg_grp = 4)
group_state <-
combined_data %>%
group_by(State, is_PA) %>%
regress_df() %>%
mutate(agg_grp = 5)
test <- full_join(group_PA, national)
test <- full_join(group_pop_cat, test)
test <- full_join(group_PA_pop_cat, test)
test <- full_join(group_state, test)
write.csv(test, 'comparative linear regression10.csv')
?cor
cor(combined_data$pop, combined_data$d_7_not_favor)
lm(combined_data$d_7_not_favor ~ combined_data$pop, method = 'pearson')
?lm
summary(combined_data)
dc <- combined_data$
ggcorr(combined_data)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/s2dverification.R
\docType{package}
\name{s2dverification}
\alias{s2dverification}
\alias{s2dverification-package}
\title{Set of Common Tools for Forecast Verification}
\description{
Set of tools to verify forecasts through the computation of typical
prediction scores against one or more observational datasets or reanalyses
(a reanalysis being a physical extrapolation of observations that relies on
the equations from a model, not a pure observational dataset). Intended for
seasonal to decadal climate forecasts although can be useful to verify other
kinds of forecasts. The package can be helpful in climate sciences for other
purposes than forecasting.
}
\details{
\tabular{ll}{
Package: \tab s2dverification\cr
Type: \tab Package\cr
Version: \tab 2.9.0\cr
Date: \tab 2020-10-30\cr
License: \tab LGPLv3\cr
}
Check an overview of the package functionalities and its modules at
\url{https://earth.bsc.es/gitlab/es/s2dverification/-/wikis/home}.
For more information load the package and check the help for each function
or the documentation attached to the package.
}
\seealso{
Useful links:
\itemize{
\item \url{https://earth.bsc.es/gitlab/es/s2dverification/-/wikis/home}
\item Report bugs at \url{https://earth.bsc.es/gitlab/es/s2dverification/-/issues}
}
}
\author{
Nicolau Manubens \email{nicolau.manubens@bsc.es}
}
\keyword{datagen}
\keyword{dynamic}
\keyword{package}
| /man/s2dverification.Rd | no_license | rpkgs/s2dverification | R | false | true | 1,487 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/s2dverification.R
\docType{package}
\name{s2dverification}
\alias{s2dverification}
\alias{s2dverification-package}
\title{Set of Common Tools for Forecast Verification}
\description{
Set of tools to verify forecasts through the computation of typical
prediction scores against one or more observational datasets or reanalyses
(a reanalysis being a physical extrapolation of observations that relies on
the equations from a model, not a pure observational dataset). Intended for
seasonal to decadal climate forecasts although can be useful to verify other
kinds of forecasts. The package can be helpful in climate sciences for other
purposes than forecasting.
}
\details{
\tabular{ll}{
Package: \tab s2dverification\cr
Type: \tab Package\cr
Version: \tab 2.9.0\cr
Date: \tab 2020-10-30\cr
License: \tab LGPLv3\cr
}
Check an overview of the package functionalities and its modules at
\url{https://earth.bsc.es/gitlab/es/s2dverification/-/wikis/home}.
For more information load the package and check the help for each function
or the documentation attached to the package.
}
\seealso{
Useful links:
\itemize{
\item \url{https://earth.bsc.es/gitlab/es/s2dverification/-/wikis/home}
\item Report bugs at \url{https://earth.bsc.es/gitlab/es/s2dverification/-/issues}
}
}
\author{
Nicolau Manubens \email{nicolau.manubens@bsc.es}
}
\keyword{datagen}
\keyword{dynamic}
\keyword{package}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/city_list.R
\name{search_city_list}
\alias{search_city_list}
\title{Look up coordinates and city id in owm's city list.}
\usage{
search_city_list(city, country_code = "")
}
\arguments{
\item{city}{city name (regex)}
\item{country_code}{two letter country code (AU, DE, ...),
use \code{country_code = ""} as wildcard}
}
\value{
data frame with matches
}
\description{
search \code{\link{owm_cities}} dataset by city name
and country code
}
\examples{
search_city_list("London", "GB")
search_city_list("London")
search_city_list("Lond")
}
\seealso{
\code{\link{owm_cities}} dataset
}
| /man/search_city_list.Rd | no_license | rubedawg/owmr | R | false | true | 662 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/city_list.R
\name{search_city_list}
\alias{search_city_list}
\title{Look up coordinates and city id in owm's city list.}
\usage{
search_city_list(city, country_code = "")
}
\arguments{
\item{city}{city name (regex)}
\item{country_code}{two letter country code (AU, DE, ...),
use \code{country_code = ""} as wildcard}
}
\value{
data frame with matches
}
\description{
search \code{\link{owm_cities}} dataset by city name
and country code
}
\examples{
search_city_list("London", "GB")
search_city_list("London")
search_city_list("Lond")
}
\seealso{
\code{\link{owm_cities}} dataset
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/main_splash_grid.R
\name{splash.grid}
\alias{splash.grid}
\title{splash.grid}
\usage{
splash.grid(sw_in, tc, pn, elev, soil, outdir = getwd(),
sim.control = list(par = TRUE, ncores = 7, output.mode = "monthly",
inmem = FALSE), ...)
}
\arguments{
\item{sw_in, }{lon}
\item{tc, }{lon}
\item{pn, }{lon}
\item{elev, }{lon}
}
\value{
a matrix xts type
}
\description{
Apply splash algorithm
}
\examples{
splash.grid()
}
\keyword{splash}
| /man/splash.grid.Rd | no_license | prenticelab/SPLASH_R_DSandoval | R | false | true | 517 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/main_splash_grid.R
\name{splash.grid}
\alias{splash.grid}
\title{splash.grid}
\usage{
splash.grid(sw_in, tc, pn, elev, soil, outdir = getwd(),
sim.control = list(par = TRUE, ncores = 7, output.mode = "monthly",
inmem = FALSE), ...)
}
\arguments{
\item{sw_in, }{lon}
\item{tc, }{lon}
\item{pn, }{lon}
\item{elev, }{lon}
}
\value{
a matrix xts type
}
\description{
Apply splash algorithm
}
\examples{
splash.grid()
}
\keyword{splash}
|
test_that("glue and glue_data safe do not execute code", {
expect_error(glue_safe("{1+1}"), "object '1\\+1' not found")
expect_error(glue_data_safe(mtcars, "{1+1}"), "object '1\\+1' not found")
"1 + 1" <- 5
expect_equal(glue("{1 + 1}"), "2")
expect_equal(glue_safe("{1 + 1}"), "5")
})
| /tests/testthat/test-safe.R | permissive | tidyverse/glue | R | false | false | 297 | r | test_that("glue and glue_data safe do not execute code", {
expect_error(glue_safe("{1+1}"), "object '1\\+1' not found")
expect_error(glue_data_safe(mtcars, "{1+1}"), "object '1\\+1' not found")
"1 + 1" <- 5
expect_equal(glue("{1 + 1}"), "2")
expect_equal(glue_safe("{1 + 1}"), "5")
})
|
library(polysat)
### Name: catalanAlleles
### Title: Sort Alleles into Isoloci
### Aliases: catalanAlleles
### Keywords: misc
### ** Examples
# make the default simulated allotetraploid dataset
mydata <- simAllopoly()
# resolve the alleles
myassign <- catalanAlleles(mydata)
| /data/genthat_extracted_code/polysat/examples/catalanAlleles.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 283 | r | library(polysat)
### Name: catalanAlleles
### Title: Sort Alleles into Isoloci
### Aliases: catalanAlleles
### Keywords: misc
### ** Examples
# make the default simulated allotetraploid dataset
mydata <- simAllopoly()
# resolve the alleles
myassign <- catalanAlleles(mydata)
|
library(glmnet)
mydata = read.table("../../../../TrainingSet/FullSet/Lasso/NSCLC.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.75,family="gaussian",standardize=TRUE)
sink('./NSCLC_079.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
| /Model/EN/Lasso/NSCLC/NSCLC_079.R | no_license | esbgkannan/QSMART | R | false | false | 342 | r | library(glmnet)
mydata = read.table("../../../../TrainingSet/FullSet/Lasso/NSCLC.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.75,family="gaussian",standardize=TRUE)
sink('./NSCLC_079.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
# September 6, 2017
# I am adding some documentation in this header
# Then I will make a commit using R Studio
# Ultimately I hope this goes to GitHub
library(tm)
library(SnowballC)
library(wordcloud)
setwd('/Users/edwardtaylorUTK/Desktop/2015WCfiles')
jeopQ <- read.csv("./INJZ_OCCUR_RSN.txt", stringsAsFactors = FALSE, sep = "\t", header=F)
#The actual questions are available in the Question column.
#Delete columns 1 & 2
jeopQ <- jeopQ[c(2:nrow(jeopQ)) , -c(1:2)]
# Remove non-UTF8 characters
jeopQ <- sapply(jeopQ, function(row) iconv(row, "latin1", "ASCII", sub=""))
# Get words in lower case
jeopQ <- tolower(jeopQ)
#Now, we will perform a series of operations on the text data to simplify it.
#First, we need to create a corpus.
jeopCorpus <- Corpus(VectorSource(jeopQ))
#Next, we will convert the corpus to a plain text document.
jeopCorpus <- tm_map(jeopCorpus, PlainTextDocument)
# Strip whitespace
jeopCorpus <- tm_map(jeopCorpus, stripWhitespace)
#Then, we will remove all punctuation and stopwords. Stopwords are commonly used words in the English language such as I, me, my, etc. You can see the full list of stopwords using stopwords('english').
jeopCorpus <- tm_map(jeopCorpus, removePunctuation)
# Remove numbers
jeopCorpus <- tm_map(jeopCorpus, removeNumbers)
# Remove stopwords
jeopCorpus <- tm_map(jeopCorpus, removeWords, stopwords('en'))
# Remove other meaningless words
NoGoodWords <- c('left', 'right', 'ee', 'employe', 'employee', 'injuri', 'went',
'caus', 'get', 'got','use', '.get', 'alleg', 'alleged', 'allege',
'cause','part')
jeopCorpus <- tm_map(jeopCorpus, removeWords, NoGoodWords)
#Next, we will perform stemming. This means that all the words are converted to their stem (Ex: learning -> learn, walked -> walk, etc.). This will ensure that different forms of the word are converted to the same form and plotted only once in the wordcloud.
jeopCorpus <- tm_map(jeopCorpus, stemDocument)
x <- c('fell', 'slip', 'trip', 'right','left', 'fall', 'employee')
stemCompletion(x, jeopCorpus[[1]][[1]], type="longest")
#Now, we will plot the wordcloud.
pal = brewer.pal(5,"Reds")
wordcloud(jeopCorpus[[1]][[1]], max.words = 49, random.order = F,
random.color = F,
colors = pal)
| /WordNarrative.R | no_license | etaylo19/work-comp | R | false | false | 2,282 | r | # September 6, 2017
# I am adding some documentation in this header
# Then I will make a commit using R Studio
# Ultimately I hope this goes to GitHub
library(tm)
library(SnowballC)
library(wordcloud)
setwd('/Users/edwardtaylorUTK/Desktop/2015WCfiles')
jeopQ <- read.csv("./INJZ_OCCUR_RSN.txt", stringsAsFactors = FALSE, sep = "\t", header=F)
#The actual questions are available in the Question column.
#Delete columns 1 & 2
jeopQ <- jeopQ[c(2:nrow(jeopQ)) , -c(1:2)]
# Remove non-UTF8 characters
jeopQ <- sapply(jeopQ, function(row) iconv(row, "latin1", "ASCII", sub=""))
# Get words in lower case
jeopQ <- tolower(jeopQ)
#Now, we will perform a series of operations on the text data to simplify it.
#First, we need to create a corpus.
jeopCorpus <- Corpus(VectorSource(jeopQ))
#Next, we will convert the corpus to a plain text document.
jeopCorpus <- tm_map(jeopCorpus, PlainTextDocument)
# Strip whitespace
jeopCorpus <- tm_map(jeopCorpus, stripWhitespace)
#Then, we will remove all punctuation and stopwords. Stopwords are commonly used words in the English language such as I, me, my, etc. You can see the full list of stopwords using stopwords('english').
jeopCorpus <- tm_map(jeopCorpus, removePunctuation)
# Remove numbers
jeopCorpus <- tm_map(jeopCorpus, removeNumbers)
# Remove stopwords
jeopCorpus <- tm_map(jeopCorpus, removeWords, stopwords('en'))
# Remove other meaningless words
NoGoodWords <- c('left', 'right', 'ee', 'employe', 'employee', 'injuri', 'went',
'caus', 'get', 'got','use', '.get', 'alleg', 'alleged', 'allege',
'cause','part')
jeopCorpus <- tm_map(jeopCorpus, removeWords, NoGoodWords)
#Next, we will perform stemming. This means that all the words are converted to their stem (Ex: learning -> learn, walked -> walk, etc.). This will ensure that different forms of the word are converted to the same form and plotted only once in the wordcloud.
jeopCorpus <- tm_map(jeopCorpus, stemDocument)
x <- c('fell', 'slip', 'trip', 'right','left', 'fall', 'employee')
stemCompletion(x, jeopCorpus[[1]][[1]], type="longest")
#Now, we will plot the wordcloud.
pal = brewer.pal(5,"Reds")
wordcloud(jeopCorpus[[1]][[1]], max.words = 49, random.order = F,
random.color = F,
colors = pal)
|
########################################
# Trabalho 4 - INF-615 - validade test data
# Nome(s): Liselene Borges e Marcos Scarpim
########################################
set.seed(42)
source("data_processing.R")
#reading test data
print("Reading CSV...")
data_test = read.csv("mnist_test.csv", header=FALSE)
print("Applying PCA model...")
# apply PCA or normalization here
#remove "only zero columns"
data_filtered <- data_test[,c(TRUE, colSums(data[,2:ncol(data)]) != 0)]
# apply PCA
data_test_pca <- predict(data.pca1, data_filtered)
set.seed(42)
# get PCA with 95% of variance
dataTest <- data.frame(V1 = data_test[,1], data_test_pca[,1:331])
# return a list with 5 balanced datasets
getBalancedData <- function(split_data, index) {
posData <- list()
negData <- list(data.frame(),data.frame(),data.frame(),data.frame(),data.frame())
for (i in 1:10) {
if (i == index) {
# generate 5 datasets
posData[[1]] <- split_data[[i]][1:as.integer(nrow(split_data[[i]])*0.2),]
posData[[2]] <- split_data[[i]][as.integer(nrow(split_data[[i]])*0.2+1):as.integer(0.4*nrow(split_data[[i]])),]
posData[[3]] <- split_data[[i]][as.integer(nrow(split_data[[i]])*0.4+1):as.integer(0.6*nrow(split_data[[i]])),]
posData[[4]] <- split_data[[i]][as.integer(nrow(split_data[[i]])*0.6+1):as.integer(0.8*nrow(split_data[[i]])),]
posData[[5]] <- split_data[[i]][as.integer(nrow(split_data[[i]])*0.8+1):nrow(split_data[[i]]),]
}
else {
# get only 11% of data
idx <- sample(1:nrow(split_data[[i]]), 0.11*nrow(split_data[[i]]))
sample_data <- split_data[[i]][idx,]
negData[[1]] <- rbind(negData[[1]], sample_data[1:as.integer(nrow(sample_data)*0.2),])
negData[[2]] <- rbind(negData[[2]], sample_data[as.integer(nrow(sample_data)*0.2+1):as.integer(nrow(sample_data)*0.4),])
negData[[3]] <- rbind(negData[[3]], sample_data[as.integer(nrow(sample_data)*0.4+1):as.integer(nrow(sample_data)*0.6),])
negData[[4]] <- rbind(negData[[4]], sample_data[as.integer(nrow(sample_data)*0.6+1):as.integer(nrow(sample_data)*0.8),])
negData[[5]] <- rbind(negData[[5]], sample_data[as.integer(nrow(sample_data)*0.8+1):nrow(sample_data),])
}
}
trainData <- list()
for(i in 1:5) {
posData[[i]]$V1 = 1
negData[[i]]$V1 = 0
trainData[[i]] <- rbind(posData[[i]], negData[[i]])
}
return(trainData)
}
getPredictions <- function(NN, predData) {
predictions <- list(matrix(0L, nrow = nrow(predData), ncol = 1),
matrix(0L, nrow = nrow(predData), ncol = 1),
matrix(0L, nrow = nrow(predData), ncol = 1),
matrix(0L, nrow = nrow(predData), ncol = 1),
matrix(0L, nrow = nrow(predData), ncol = 1),
matrix(0L, nrow = nrow(predData), ncol = 1),
matrix(0L, nrow = nrow(predData), ncol = 1),
matrix(0L, nrow = nrow(predData), ncol = 1),
matrix(0L, nrow = nrow(predData), ncol = 1),
matrix(0L, nrow = nrow(predData), ncol = 1))
for (i in 1:10) {
set.seed(42)
for (j in 1:5) {
nnCompute = compute(NN[[i]][[j]], predData[,2:ncol(predData)])
prediction = nnCompute$net.result
prediction[prediction < 0.5] = -1
prediction[prediction >= 0.5] = 1
predictions[[i]] <- predictions[[i]] + prediction
}
}
return(predictions)
}
evaluatePredictions <- function(predictions, label) {
combinedPred <- data.frame(alg1=numeric(nrow(predictions[[1]])),
alg2=numeric(nrow(predictions[[2]])),
alg3=numeric(nrow(predictions[[3]])),
alg4=numeric(nrow(predictions[[4]])),
alg5=numeric(nrow(predictions[[5]])),
alg6=numeric(nrow(predictions[[6]])),
alg7=numeric(nrow(predictions[[7]])),
alg8=numeric(nrow(predictions[[8]])),
alg9=numeric(nrow(predictions[[9]])),
alg0=numeric(nrow(predictions[[10]])))
combinedPred[,"alg1"] <- predictions[[1]] - predictions[[2]] - predictions[[3]] - predictions[[4]] - predictions[[5]] - predictions[[6]] - predictions[[7]] - predictions[[8]] - predictions[[9]] - predictions[[10]]
combinedPred[,"alg2"] <- - predictions[[1]] + predictions[[2]] - predictions[[3]] - predictions[[4]] - predictions[[5]] - predictions[[6]] - predictions[[7]] - predictions[[8]] - predictions[[9]] - predictions[[10]]
combinedPred[,"alg3"] <- - predictions[[1]] - predictions[[2]] + predictions[[3]] - predictions[[4]] - predictions[[5]] - predictions[[6]] - predictions[[7]] - predictions[[8]] - predictions[[9]] - predictions[[10]]
combinedPred[,"alg4"] <- - predictions[[1]] - predictions[[2]] - predictions[[3]] + predictions[[4]] - predictions[[5]] - predictions[[6]] - predictions[[7]] - predictions[[8]] - predictions[[9]] - predictions[[10]]
combinedPred[,"alg5"] <- - predictions[[1]] - predictions[[2]] - predictions[[3]] - predictions[[4]] + predictions[[5]] - predictions[[6]] - predictions[[7]] - predictions[[8]] - predictions[[9]] - predictions[[10]]
combinedPred[,"alg6"] <- - predictions[[1]] - predictions[[2]] - predictions[[3]] - predictions[[4]] - predictions[[5]] + predictions[[6]] - predictions[[7]] - predictions[[8]] - predictions[[9]] - predictions[[10]]
combinedPred[,"alg7"] <- - predictions[[1]] - predictions[[2]] - predictions[[3]] - predictions[[4]] - predictions[[5]] - predictions[[6]] + predictions[[7]] - predictions[[8]] - predictions[[9]] - predictions[[10]]
combinedPred[,"alg8"] <- - predictions[[1]] - predictions[[2]] - predictions[[3]] - predictions[[4]] - predictions[[5]] - predictions[[6]] - predictions[[7]] + predictions[[8]] - predictions[[9]] - predictions[[10]]
combinedPred[,"alg9"] <- - predictions[[1]] - predictions[[2]] - predictions[[3]] - predictions[[4]] - predictions[[5]] - predictions[[6]] - predictions[[7]] - predictions[[8]] + predictions[[9]] - predictions[[10]]
combinedPred[,"alg0"] <- - predictions[[1]] - predictions[[2]] - predictions[[3]] - predictions[[4]] - predictions[[5]] - predictions[[6]] - predictions[[7]] - predictions[[8]] - predictions[[9]] + predictions[[10]]
finalPred = colnames(combinedPred)[apply(combinedPred, 1, which.max)]
cm = as.matrix(table(Actual = label, Predicted = finalPred))
ACCs <- c()
for (i in 1:10) {
ACCs[i] = cm[i,i] / sum(cm[1:10,i])
}
return(list(cm, ACCs))
}
# train the model
# define the formula
feats <- names(dataTest)
f <- paste(feats[2:length(feats)],collapse=' + ')
f <- paste('V1 ~',f)
f <- as.formula(f)
set.seed(42)
NN <- list(list(), list(), list(), list(), list(), list(), list(), list(), list(), list())
for (i in 1:10) {
set.seed(42)
trainData <- getBalancedData(split_data_train, i)
print(paste0("Aplying model ", i, "..."))
for(j in 1:5) {
print(paste0("Data size ", j, " = " , nrow(trainData[[j]])))
NN[[i]][[j]] <- neuralnet(formula=f, data=trainData[[j]], hidden=c(10,10), linear.output=FALSE, stepmax = 1e6)
}
}
# accuracy measure
labelTest = dataTest[,"V1"]
predictions_test <- getPredictions(NN, dataTest)
eval_test <- evaluatePredictions(predictions_test, labelTest)
cm_test <- eval_test[[1]]
ACCs_test <- eval_test[[2]]
ACC_final_test <- sum(eval_test[[2]])/10
print(paste0("ACC_test = ", ACC_final_test))
| /trab4/trabalho_4_test.R | no_license | liselene/inf-615 | R | false | false | 7,485 | r | ########################################
# Trabalho 4 - INF-615 - validade test data
# Nome(s): Liselene Borges e Marcos Scarpim
########################################
set.seed(42)
source("data_processing.R")
#reading test data
print("Reading CSV...")
data_test = read.csv("mnist_test.csv", header=FALSE)
print("Applying PCA model...")
# apply PCA or normalization here
#remove "only zero columns"
data_filtered <- data_test[,c(TRUE, colSums(data[,2:ncol(data)]) != 0)]
# apply PCA
data_test_pca <- predict(data.pca1, data_filtered)
set.seed(42)
# get PCA with 95% of variance
dataTest <- data.frame(V1 = data_test[,1], data_test_pca[,1:331])
# return a list with 5 balanced datasets
getBalancedData <- function(split_data, index) {
posData <- list()
negData <- list(data.frame(),data.frame(),data.frame(),data.frame(),data.frame())
for (i in 1:10) {
if (i == index) {
# generate 5 datasets
posData[[1]] <- split_data[[i]][1:as.integer(nrow(split_data[[i]])*0.2),]
posData[[2]] <- split_data[[i]][as.integer(nrow(split_data[[i]])*0.2+1):as.integer(0.4*nrow(split_data[[i]])),]
posData[[3]] <- split_data[[i]][as.integer(nrow(split_data[[i]])*0.4+1):as.integer(0.6*nrow(split_data[[i]])),]
posData[[4]] <- split_data[[i]][as.integer(nrow(split_data[[i]])*0.6+1):as.integer(0.8*nrow(split_data[[i]])),]
posData[[5]] <- split_data[[i]][as.integer(nrow(split_data[[i]])*0.8+1):nrow(split_data[[i]]),]
}
else {
# get only 11% of data
idx <- sample(1:nrow(split_data[[i]]), 0.11*nrow(split_data[[i]]))
sample_data <- split_data[[i]][idx,]
negData[[1]] <- rbind(negData[[1]], sample_data[1:as.integer(nrow(sample_data)*0.2),])
negData[[2]] <- rbind(negData[[2]], sample_data[as.integer(nrow(sample_data)*0.2+1):as.integer(nrow(sample_data)*0.4),])
negData[[3]] <- rbind(negData[[3]], sample_data[as.integer(nrow(sample_data)*0.4+1):as.integer(nrow(sample_data)*0.6),])
negData[[4]] <- rbind(negData[[4]], sample_data[as.integer(nrow(sample_data)*0.6+1):as.integer(nrow(sample_data)*0.8),])
negData[[5]] <- rbind(negData[[5]], sample_data[as.integer(nrow(sample_data)*0.8+1):nrow(sample_data),])
}
}
trainData <- list()
for(i in 1:5) {
posData[[i]]$V1 = 1
negData[[i]]$V1 = 0
trainData[[i]] <- rbind(posData[[i]], negData[[i]])
}
return(trainData)
}
getPredictions <- function(NN, predData) {
predictions <- list(matrix(0L, nrow = nrow(predData), ncol = 1),
matrix(0L, nrow = nrow(predData), ncol = 1),
matrix(0L, nrow = nrow(predData), ncol = 1),
matrix(0L, nrow = nrow(predData), ncol = 1),
matrix(0L, nrow = nrow(predData), ncol = 1),
matrix(0L, nrow = nrow(predData), ncol = 1),
matrix(0L, nrow = nrow(predData), ncol = 1),
matrix(0L, nrow = nrow(predData), ncol = 1),
matrix(0L, nrow = nrow(predData), ncol = 1),
matrix(0L, nrow = nrow(predData), ncol = 1))
for (i in 1:10) {
set.seed(42)
for (j in 1:5) {
nnCompute = compute(NN[[i]][[j]], predData[,2:ncol(predData)])
prediction = nnCompute$net.result
prediction[prediction < 0.5] = -1
prediction[prediction >= 0.5] = 1
predictions[[i]] <- predictions[[i]] + prediction
}
}
return(predictions)
}
evaluatePredictions <- function(predictions, label) {
combinedPred <- data.frame(alg1=numeric(nrow(predictions[[1]])),
alg2=numeric(nrow(predictions[[2]])),
alg3=numeric(nrow(predictions[[3]])),
alg4=numeric(nrow(predictions[[4]])),
alg5=numeric(nrow(predictions[[5]])),
alg6=numeric(nrow(predictions[[6]])),
alg7=numeric(nrow(predictions[[7]])),
alg8=numeric(nrow(predictions[[8]])),
alg9=numeric(nrow(predictions[[9]])),
alg0=numeric(nrow(predictions[[10]])))
combinedPred[,"alg1"] <- predictions[[1]] - predictions[[2]] - predictions[[3]] - predictions[[4]] - predictions[[5]] - predictions[[6]] - predictions[[7]] - predictions[[8]] - predictions[[9]] - predictions[[10]]
combinedPred[,"alg2"] <- - predictions[[1]] + predictions[[2]] - predictions[[3]] - predictions[[4]] - predictions[[5]] - predictions[[6]] - predictions[[7]] - predictions[[8]] - predictions[[9]] - predictions[[10]]
combinedPred[,"alg3"] <- - predictions[[1]] - predictions[[2]] + predictions[[3]] - predictions[[4]] - predictions[[5]] - predictions[[6]] - predictions[[7]] - predictions[[8]] - predictions[[9]] - predictions[[10]]
combinedPred[,"alg4"] <- - predictions[[1]] - predictions[[2]] - predictions[[3]] + predictions[[4]] - predictions[[5]] - predictions[[6]] - predictions[[7]] - predictions[[8]] - predictions[[9]] - predictions[[10]]
combinedPred[,"alg5"] <- - predictions[[1]] - predictions[[2]] - predictions[[3]] - predictions[[4]] + predictions[[5]] - predictions[[6]] - predictions[[7]] - predictions[[8]] - predictions[[9]] - predictions[[10]]
combinedPred[,"alg6"] <- - predictions[[1]] - predictions[[2]] - predictions[[3]] - predictions[[4]] - predictions[[5]] + predictions[[6]] - predictions[[7]] - predictions[[8]] - predictions[[9]] - predictions[[10]]
combinedPred[,"alg7"] <- - predictions[[1]] - predictions[[2]] - predictions[[3]] - predictions[[4]] - predictions[[5]] - predictions[[6]] + predictions[[7]] - predictions[[8]] - predictions[[9]] - predictions[[10]]
combinedPred[,"alg8"] <- - predictions[[1]] - predictions[[2]] - predictions[[3]] - predictions[[4]] - predictions[[5]] - predictions[[6]] - predictions[[7]] + predictions[[8]] - predictions[[9]] - predictions[[10]]
combinedPred[,"alg9"] <- - predictions[[1]] - predictions[[2]] - predictions[[3]] - predictions[[4]] - predictions[[5]] - predictions[[6]] - predictions[[7]] - predictions[[8]] + predictions[[9]] - predictions[[10]]
combinedPred[,"alg0"] <- - predictions[[1]] - predictions[[2]] - predictions[[3]] - predictions[[4]] - predictions[[5]] - predictions[[6]] - predictions[[7]] - predictions[[8]] - predictions[[9]] + predictions[[10]]
finalPred = colnames(combinedPred)[apply(combinedPred, 1, which.max)]
cm = as.matrix(table(Actual = label, Predicted = finalPred))
ACCs <- c()
for (i in 1:10) {
ACCs[i] = cm[i,i] / sum(cm[1:10,i])
}
return(list(cm, ACCs))
}
# train the model
# define the formula
feats <- names(dataTest)
f <- paste(feats[2:length(feats)],collapse=' + ')
f <- paste('V1 ~',f)
f <- as.formula(f)
set.seed(42)
NN <- list(list(), list(), list(), list(), list(), list(), list(), list(), list(), list())
for (i in 1:10) {
set.seed(42)
trainData <- getBalancedData(split_data_train, i)
print(paste0("Aplying model ", i, "..."))
for(j in 1:5) {
print(paste0("Data size ", j, " = " , nrow(trainData[[j]])))
NN[[i]][[j]] <- neuralnet(formula=f, data=trainData[[j]], hidden=c(10,10), linear.output=FALSE, stepmax = 1e6)
}
}
# accuracy measure
labelTest = dataTest[,"V1"]
predictions_test <- getPredictions(NN, dataTest)
eval_test <- evaluatePredictions(predictions_test, labelTest)
cm_test <- eval_test[[1]]
ACCs_test <- eval_test[[2]]
ACC_final_test <- sum(eval_test[[2]])/10
print(paste0("ACC_test = ", ACC_final_test))
|
#!/usr/bin/Rscript
.libPaths(new = "/hpc/local/CentOS7/dbg_mz/R_libs/3.6.2")
# load required packages
library("ggplot2")
library("reshape2")
library("openxlsx")
library("loder")
# define parameters
cmd_args <- commandArgs(trailingOnly = TRUE)
for (arg in cmd_args) cat(" ", arg, "\n", sep = "")
outdir <- cmd_args[1] #"/Users/nunen/Documents/Metab/test_set"
project <- cmd_args[2] #"test"
matrix <- cmd_args[3] #"DBS"
hmdb <- cmd_args[4] #"/Users/nunen/Documents/Metab/DIMS/db/HMDB_with_info_relevance_IS_C5OH.RData"
z_score <- as.numeric(cmd_args[5])
plot <- TRUE
init <- paste0(outdir, "/logs/init.RData")
export <- TRUE
control_label <- "C"
case_label <- "P"
imagesize_multiplier <- 2
rundate <- Sys.Date()
plotdir <- paste0(outdir, "/plots/adducts")
dir.create(paste0(outdir, "/plots"), showWarnings = F)
dir.create(plotdir, showWarnings = F)
options(digits=16)
setwd(outdir)
# sum positive and negative adductsums
# Load pos and neg adduct sums
load(paste0(outdir,"/adductSums_negative.RData"))
outlist.neg.adducts.HMDB <- outlist.tot
load(paste0(outdir,"/adductSums_positive.RData"))
outlist.pos.adducts.HMDB <- outlist.tot
rm(outlist.tot)
# Only continue with patients (columns) that are in both pos and neg, so patients that are in both
tmp <- intersect(colnames(outlist.neg.adducts.HMDB), colnames(outlist.pos.adducts.HMDB))
outlist.neg.adducts.HMDB <- outlist.neg.adducts.HMDB[,tmp]
outlist.pos.adducts.HMDB <- outlist.pos.adducts.HMDB[,tmp]
# Find indexes of neg hmdb code that are also found in pos and vice versa
index.neg <- which(rownames(outlist.neg.adducts.HMDB) %in% rownames(outlist.pos.adducts.HMDB))
index.pos <- which(rownames(outlist.pos.adducts.HMDB) %in% rownames(outlist.neg.adducts.HMDB))
# Get number of columns
# Only continue with HMDB codes (rows) that were found in both pos and neg mode and remove last column (hmdb_name)
tmp.pos <- outlist.pos.adducts.HMDB[rownames(outlist.pos.adducts.HMDB)[index.pos], 1:(dim(outlist.pos.adducts.HMDB)[2]-1)]
tmp.hmdb_name.pos <- outlist.pos.adducts.HMDB[rownames(outlist.pos.adducts.HMDB)[index.pos], dim(outlist.pos.adducts.HMDB)[2]]
tmp.pos.left <- outlist.pos.adducts.HMDB[-index.pos,]
tmp.neg <- outlist.neg.adducts.HMDB[rownames(outlist.pos.adducts.HMDB)[index.pos], 1:(dim(outlist.neg.adducts.HMDB)[2]-1)]
tmp.neg.left <- outlist.neg.adducts.HMDB[-index.neg,]
# Combine positive and negative numbers and paste back HMDB column
tmp <- apply(tmp.pos, 2,as.numeric) + apply(tmp.neg, 2,as.numeric)
rownames(tmp) <- rownames(tmp.pos)
tmp <- cbind(tmp, "HMDB_name"=tmp.hmdb_name.pos)
outlist <- rbind(tmp, tmp.pos.left, tmp.neg.left)
# Filter
load(hmdb) # rlvnc
peaksInList <- which(rownames(outlist) %in% rownames(rlvnc))
outlist <- cbind(outlist[peaksInList,],as.data.frame(rlvnc[rownames(outlist)[peaksInList],]))
outlist <- outlist[-grep("Exogenous", outlist[,"relevance"], fixed = TRUE),]
outlist <- outlist[-grep("exogenous", outlist[,"relevance"], fixed = TRUE),]
outlist <- outlist[-grep("Drug", outlist[,"relevance"], fixed = TRUE),]
# Add HMDB_code column with all the HMDB ID and sort on it
outlist <- cbind(outlist, "HMDB_code" = rownames(outlist))
outlist <- outlist[order(outlist[,"HMDB_code"]),]
# Create excel
filelist <- "AllPeakGroups"
wb <- createWorkbook("SinglePatient")
addWorksheet(wb, filelist)
#outlist.backup <- outlist
#outlist <- outlist.backup
# Add Z-scores and create plots
if (z_score == 1) {
########## Statistics: Z-score
outlist <- cbind(plots = NA, outlist)
#outlist <- as.data.frame(outlist)
startcol <- dim(outlist)[2] + 3
# Get columns with control intensities
control_col_ids <- grep(control_label, colnames(outlist), fixed = TRUE)
control_columns <- outlist[, control_col_ids]
# Get columns with patient intensities
patient_col_ids <- grep(case_label, colnames(outlist), fixed = TRUE)
patient_columns <- outlist[, patient_col_ids]
intensity_col_ids <- c(control_col_ids, patient_col_ids)
# set intensities of 0 to NA?
outlist[,intensity_col_ids][outlist[,intensity_col_ids] == 0] <- NA
# calculate mean and sd for Control group
outlist$avg.ctrls <- apply(control_columns, 1, function(x) mean(as.numeric(x),na.rm = TRUE))
outlist$sd.ctrls <- apply(control_columns, 1, function(x) sd(as.numeric(x),na.rm = TRUE))
# Make and add columns with zscores
cnames.z <- NULL
for (i in intensity_col_ids) {
cname <- colnames(outlist)[i]
cnames.z <- c(cnames.z, paste(cname, "Zscore", sep="_"))
zscores.1col <- (as.numeric(as.vector(unlist(outlist[ , i]))) - outlist$avg.ctrls) / outlist$sd.ctrls
outlist <- cbind(outlist, zscores.1col)
}
colnames(outlist)[startcol:ncol(outlist)] <- cnames.z
patient_ids <- unique(as.vector(unlist(lapply(strsplit(colnames(patient_columns), ".", fixed = TRUE), function(x) x[1]))))
patient_ids <- patient_ids[order(nchar(patient_ids), patient_ids)] # sorts
temp_png <- NULL
# Iterate through every row, make boxplot, insert into excel, and make Zscore for every patient
for (p in 1:nrow(outlist)) {
########################## box plot ###########################
hmdb_name <- rownames(outlist[p,])
intensities <- list(as.numeric(as.vector(unlist(control_columns[p,]))))
labels <- c("C", patient_ids)
for (i in 1:length(patient_ids)) {
id <- patient_ids[i]
# get all intensities that start with ex. P18. (so P18.1, P18.2, but not x_P18.1 and not P180.1)
p.int <- as.numeric(as.vector(unlist(outlist[p, names(patient_columns[1,])[startsWith(names(patient_columns[1,]), paste0(id, "."))]])))
intensities[[i+1]] <- p.int
}
intensities <- setNames(intensities, labels)
plot_width <- length(labels) * 16 + 90
plot.new()
if (export) {
png(filename = paste0(plotdir, "/", hmdb_name, "_box.png"),
width = plot_width,
height = 280)
}
par(oma=c(2,0,0,0))
boxplot(intensities,
col=c("green", rep("red", length(intensities)-1)),
names.arg = labels,
las=2,
main = hmdb_name)
dev.off()
file_png <- paste0(plotdir, "/", hmdb_name, "_box.png")
if (is.null(temp_png)) {
temp_png <- readPng(file_png)
img_dim <- dim(temp_png)[c(1,2)]
cell_dim <- img_dim * imagesize_multiplier
setColWidths(wb, filelist, cols = 1, widths = cell_dim[2]/20)
}
insertImage(wb,
filelist,
file_png,
startRow = p + 1,
startCol = 1,
height = cell_dim[1],
width = cell_dim[2],
units = "px")
if (p %% 100 == 0) {
cat("at row: ", p, "\n")
}
}
setRowHeights(wb, filelist, rows = c(1:nrow(outlist) + 1), heights = cell_dim[1]/4)
setColWidths(wb, filelist, cols = c(2:ncol(outlist)), widths = 20)
} else {
setRowHeights(wb, filelist, rows = c(1:nrow(outlist)), heights = 18)
setColWidths(wb, filelist, cols = c(1:ncol(outlist)), widths = 20)
}
writeData(wb, sheet = 1, outlist, startCol = 1)
xlsx_name <- paste0(outdir, "/", project, ".xlsx")
saveWorkbook(wb,
xlsx_name,
overwrite = TRUE)
cat(xlsx_name)
rm(wb)
write.table(outlist, file = paste(outdir, "allpgrps_stats.txt", sep = "/"))
# INTERNE STANDAARDEN
load(init)
IS <- outlist[grep("Internal standard", outlist[,"relevance"], fixed = TRUE),]
IS_codes <- rownames(IS)
cat(IS_codes,"\n")
# Retrieve IS summed adducts
IS_summed <- IS[c(names(repl.pattern), "HMDB_code")]
IS_summed$HMDB.name <- IS$name
IS_summed <- melt(IS_summed, id.vars=c('HMDB_code','HMDB.name'))
colnames(IS_summed) <- c('HMDB.code','HMDB.name','Sample','Intensity')
IS_summed$Intensity <- as.numeric(IS_summed$Intensity)
IS_summed$Matrix <- matrix
IS_summed$Rundate <- rundate
IS_summed$Project <- project
IS_summed$Intensity <- as.numeric(as.character(IS_summed$Intensity))
# Retrieve IS positive mode
IS_pos <- as.data.frame(subset(outlist.pos.adducts.HMDB,rownames(outlist.pos.adducts.HMDB) %in% IS_codes))
IS_pos$HMDB_name <- IS[match(row.names(IS_pos),IS$HMDB_code,nomatch=NA),'name']
IS_pos$HMDB.code <- row.names(IS_pos)
IS_pos <- melt(IS_pos, id.vars=c('HMDB.code','HMDB_name'))
colnames(IS_pos) <- c('HMDB.code','HMDB.name','Sample','Intensity')
IS_pos$Matrix <- matrix
IS_pos$Rundate <- rundate
IS_pos$Project <- project
IS_pos$Intensity <- as.numeric(as.character(IS_pos$Intensity))
# Retrieve IS negative mode
IS_neg <- as.data.frame(subset(outlist.neg.adducts.HMDB,rownames(outlist.neg.adducts.HMDB) %in% IS_codes))
IS_neg$HMDB_name <- IS[match(row.names(IS_neg),IS$HMDB_code,nomatch=NA),'name']
IS_neg$HMDB.code <- row.names(IS_neg)
IS_neg <- melt(IS_neg, id.vars=c('HMDB.code','HMDB_name'))
colnames(IS_neg) <- c('HMDB.code','HMDB.name','Sample','Intensity')
IS_neg$Matrix <- matrix
IS_neg$Rundate <- rundate
IS_neg$Project <- project
IS_neg$Intensity <- as.numeric(as.character(IS_neg$Intensity))
# Save results
save(IS_pos,IS_neg,IS_summed, file = paste(outdir, 'IS_results.RData', sep = "/"))
# Barplot voor alle IS
IS_neg_plot <- ggplot(IS_neg, aes(Sample,Intensity))+
ggtitle("Interne Standaard (Neg)") +
geom_bar(aes(fill=HMDB.name),stat='identity')+
labs(x='',y='Intensity')+
facet_wrap(~HMDB.name, scales='free_y')+
theme(axis.text.x=element_text(angle = 90, hjust = 1, vjust = 0.5, size=8),
legend.position='none')+
scale_y_continuous(breaks = scales::pretty_breaks(n = 10))
IS_pos_plot <- ggplot(IS_pos, aes(Sample,Intensity))+
ggtitle("Interne Standaard (Pos)") +
geom_bar(aes(fill=HMDB.name),stat='identity')+
labs(x='',y='Intensity')+
facet_wrap(~HMDB.name, scales='free_y')+
theme(axis.text.x=element_text(angle = 90, hjust = 1, vjust = 0.5, size=8),
legend.position='none')+
scale_y_continuous(breaks = scales::pretty_breaks(n = 10))
IS_sum_plot <- ggplot(IS_summed, aes(Sample,Intensity))+
ggtitle("Interne Standaard (Summed)") +
geom_bar(aes(fill=HMDB.name),stat='identity')+
labs(x='',y='Intensity')+
facet_wrap(~HMDB.name, scales='free_y')+
theme(axis.text.x=element_text(angle = 90, hjust = 1, vjust = 0.5, size=8),
legend.position='none')+
scale_y_continuous(breaks = scales::pretty_breaks(n = 10))
len <- length(repl.pattern)
w <- 9 + 0.35 * len
ggsave(paste0(outdir, "/plots/IS_bar_neg.png"), plot=IS_neg_plot, height=w/2.5, width=w, units="in")
ggsave(paste0(outdir, "/plots/IS_bar_pos.png"), plot=IS_pos_plot, height=w/2.5, width=w, units="in")
ggsave(paste0(outdir, "/plots/IS_bar_sum.png"), plot=IS_sum_plot, height=w/2.5, width=w, units="in")
# Lineplot voor alle IS
IS_neg_plot <- ggplot(IS_neg, aes(Sample,Intensity))+
ggtitle("Interne Standaard (Neg)") +
geom_point(aes(col=HMDB.name))+
geom_line(aes(col=HMDB.name, group=HMDB.name))+
labs(x='',y='Intensity')+
theme(axis.text.x=element_text(angle = 90, hjust = 1, vjust = 0.5, size=8))
IS_pos_plot <- ggplot(IS_pos, aes(Sample,Intensity)) +
ggtitle("Interne Standaard (Pos)") +
geom_point(aes(col = HMDB.name)) +
geom_line(aes(col = HMDB.name, group = HMDB.name)) +
labs(x = '', y = 'Intensity') +
theme(axis.text.x = element_text(angle = 90, hjust = 1, vjust = 0.5, size = 8))
IS_sum_plot <- ggplot(IS_summed, aes(Sample, Intensity)) +
ggtitle("Interne Standaard (Sum)") +
geom_point(aes(col = HMDB.name)) +
geom_line(aes(col = HMDB.name, group = HMDB.name)) +
labs(x = '', y = 'Intensity') +
theme(axis.text.x = element_text(angle = 90, hjust = 1, vjust = 0.5, size = 8))
w <- 8 + 0.2 * len
ggsave(paste0(outdir,"/plots/IS_line_neg.png"), plot = IS_neg_plot, height = w/2.5, width = w, units = "in")
ggsave(paste0(outdir,"/plots/IS_line_pos.png"), plot = IS_pos_plot, height = w/2.5, width = w, units = "in")
ggsave(paste0(outdir,"/plots/IS_line_sum.png"), plot = IS_sum_plot, height = w/2.5, width = w, units = "in")
# Barplot voor Leucine voor alle data
IS_now<-'2H3-Leucine (IS)'
p1<-ggplot(subset(IS_neg, HMDB.name %in% IS_now), aes(Sample,Intensity)) +
ggtitle(paste0(IS_now, " (Neg)")) +
geom_bar(aes(fill=HMDB.name),stat='identity')+
labs(title='Negative mode',x='',y='Intensity')+
theme(axis.text.x=element_text(angle = 90, hjust = 1, vjust = 0.5, size=10),
legend.position='none')
p2<-ggplot(subset(IS_pos, HMDB.name %in% IS_now), aes(Sample,Intensity)) +
ggtitle(paste0(IS_now, " (Pos)")) +
geom_bar(aes(fill=HMDB.name),stat='identity')+
labs(title='Positive mode',x='',y='Intensity')+
theme(axis.text.x=element_text(angle = 90, hjust = 1, vjust = 0.5, size=10),
legend.position='none')
p3<-ggplot(subset(IS_summed, HMDB.name %in% IS_now), aes(Sample,Intensity)) +
ggtitle(paste0(IS_now, " (Sum)")) +
geom_bar(aes(fill=HMDB.name),stat='identity')+
labs(title='Adduct sums',x='',y='Intensity')+
theme(axis.text.x=element_text(angle = 90, hjust = 1, vjust = 0.5, size=10),
legend.position='none')
w <- 3 + 0.2 * len
ggsave(paste0(outdir, "/plots/Leucine_neg.png"), plot = p1, height = w/2.5, width = w, units = "in")
ggsave(paste0(outdir, "/plots/Leucine_pos.png"), plot = p2, height = w/2.5, width = w, units = "in")
ggsave(paste0(outdir, "/plots/Leucine_sum.png"), plot = p3, height = w/2.5, width = w, units = "in")
if (z_score == 1) {
### POSITIVE CONTROLS
#HMDB codes
PA_codes <- c('HMDB00824', 'HMDB00783', 'HMDB00123')
PKU_codes <- c('HMDB00159')
LPI_codes <- c('HMDB00904', 'HMDB00641', 'HMDB00182')
PA_data <- outlist[PA_codes, c('HMDB_code','name','P1002.1_Zscore')]
PA_data <- melt(PA_data, id.vars = c('HMDB_code','name'))
colnames(PA_data) <- c('HMDB.code','HMDB.name','Sample','Zscore')
PKU_data <- outlist[PKU_codes, c('HMDB_code','name','P1003.1_Zscore')]
PKU_data <- melt(PKU_data, id.vars = c('HMDB_code','name'))
colnames(PKU_data) <- c('HMDB.code','HMDB.name','Sample','Zscore')
LPI_data <- outlist[LPI_codes, c('HMDB_code','name','P1005.1_Zscore')]
LPI_data <- melt(LPI_data, id.vars = c('HMDB_code','name'))
colnames(LPI_data) <- c('HMDB.code','HMDB.name','Sample','Zscore')
Pos_Contr <- rbind(PA_data, PKU_data, LPI_data)
Pos_Contr <- rbind(PA_data)
Pos_Contr$Zscore <- as.numeric(Pos_Contr$Zscore)
Pos_Contr$Matrix <- matrix
Pos_Contr$Rundate <- rundate
Pos_Contr$Project <- project
#Save results
save(Pos_Contr,file = paste(outdir, 'Pos_Contr.RData', sep = "/"))
}
cat("Ready excelExport.R")
| /pipeline/scripts/13-excelExport.R | permissive | metabdel/DIMS | R | false | false | 14,411 | r | #!/usr/bin/Rscript
.libPaths(new = "/hpc/local/CentOS7/dbg_mz/R_libs/3.6.2")
# load required packages
library("ggplot2")
library("reshape2")
library("openxlsx")
library("loder")
# define parameters
cmd_args <- commandArgs(trailingOnly = TRUE)
for (arg in cmd_args) cat(" ", arg, "\n", sep = "")
outdir <- cmd_args[1] #"/Users/nunen/Documents/Metab/test_set"
project <- cmd_args[2] #"test"
matrix <- cmd_args[3] #"DBS"
hmdb <- cmd_args[4] #"/Users/nunen/Documents/Metab/DIMS/db/HMDB_with_info_relevance_IS_C5OH.RData"
z_score <- as.numeric(cmd_args[5])
plot <- TRUE
init <- paste0(outdir, "/logs/init.RData")
export <- TRUE
control_label <- "C"
case_label <- "P"
imagesize_multiplier <- 2
rundate <- Sys.Date()
plotdir <- paste0(outdir, "/plots/adducts")
dir.create(paste0(outdir, "/plots"), showWarnings = F)
dir.create(plotdir, showWarnings = F)
options(digits=16)
setwd(outdir)
# sum positive and negative adductsums
# Load pos and neg adduct sums
load(paste0(outdir,"/adductSums_negative.RData"))
outlist.neg.adducts.HMDB <- outlist.tot
load(paste0(outdir,"/adductSums_positive.RData"))
outlist.pos.adducts.HMDB <- outlist.tot
rm(outlist.tot)
# Only continue with patients (columns) that are in both pos and neg, so patients that are in both
tmp <- intersect(colnames(outlist.neg.adducts.HMDB), colnames(outlist.pos.adducts.HMDB))
outlist.neg.adducts.HMDB <- outlist.neg.adducts.HMDB[,tmp]
outlist.pos.adducts.HMDB <- outlist.pos.adducts.HMDB[,tmp]
# Find indexes of neg hmdb code that are also found in pos and vice versa
index.neg <- which(rownames(outlist.neg.adducts.HMDB) %in% rownames(outlist.pos.adducts.HMDB))
index.pos <- which(rownames(outlist.pos.adducts.HMDB) %in% rownames(outlist.neg.adducts.HMDB))
# Get number of columns
# Only continue with HMDB codes (rows) that were found in both pos and neg mode and remove last column (hmdb_name)
tmp.pos <- outlist.pos.adducts.HMDB[rownames(outlist.pos.adducts.HMDB)[index.pos], 1:(dim(outlist.pos.adducts.HMDB)[2]-1)]
tmp.hmdb_name.pos <- outlist.pos.adducts.HMDB[rownames(outlist.pos.adducts.HMDB)[index.pos], dim(outlist.pos.adducts.HMDB)[2]]
tmp.pos.left <- outlist.pos.adducts.HMDB[-index.pos,]
tmp.neg <- outlist.neg.adducts.HMDB[rownames(outlist.pos.adducts.HMDB)[index.pos], 1:(dim(outlist.neg.adducts.HMDB)[2]-1)]
tmp.neg.left <- outlist.neg.adducts.HMDB[-index.neg,]
# Combine positive and negative numbers and paste back HMDB column
tmp <- apply(tmp.pos, 2,as.numeric) + apply(tmp.neg, 2,as.numeric)
rownames(tmp) <- rownames(tmp.pos)
tmp <- cbind(tmp, "HMDB_name"=tmp.hmdb_name.pos)
outlist <- rbind(tmp, tmp.pos.left, tmp.neg.left)
# Filter
load(hmdb) # rlvnc
peaksInList <- which(rownames(outlist) %in% rownames(rlvnc))
outlist <- cbind(outlist[peaksInList,],as.data.frame(rlvnc[rownames(outlist)[peaksInList],]))
outlist <- outlist[-grep("Exogenous", outlist[,"relevance"], fixed = TRUE),]
outlist <- outlist[-grep("exogenous", outlist[,"relevance"], fixed = TRUE),]
outlist <- outlist[-grep("Drug", outlist[,"relevance"], fixed = TRUE),]
# Add HMDB_code column with all the HMDB ID and sort on it
outlist <- cbind(outlist, "HMDB_code" = rownames(outlist))
outlist <- outlist[order(outlist[,"HMDB_code"]),]
# Create excel
filelist <- "AllPeakGroups"
wb <- createWorkbook("SinglePatient")
addWorksheet(wb, filelist)
#outlist.backup <- outlist
#outlist <- outlist.backup
# Add Z-scores and create plots
if (z_score == 1) {
########## Statistics: Z-score
outlist <- cbind(plots = NA, outlist)
#outlist <- as.data.frame(outlist)
startcol <- dim(outlist)[2] + 3
# Get columns with control intensities
control_col_ids <- grep(control_label, colnames(outlist), fixed = TRUE)
control_columns <- outlist[, control_col_ids]
# Get columns with patient intensities
patient_col_ids <- grep(case_label, colnames(outlist), fixed = TRUE)
patient_columns <- outlist[, patient_col_ids]
intensity_col_ids <- c(control_col_ids, patient_col_ids)
# set intensities of 0 to NA?
outlist[,intensity_col_ids][outlist[,intensity_col_ids] == 0] <- NA
# calculate mean and sd for Control group
outlist$avg.ctrls <- apply(control_columns, 1, function(x) mean(as.numeric(x),na.rm = TRUE))
outlist$sd.ctrls <- apply(control_columns, 1, function(x) sd(as.numeric(x),na.rm = TRUE))
# Make and add columns with zscores
cnames.z <- NULL
for (i in intensity_col_ids) {
cname <- colnames(outlist)[i]
cnames.z <- c(cnames.z, paste(cname, "Zscore", sep="_"))
zscores.1col <- (as.numeric(as.vector(unlist(outlist[ , i]))) - outlist$avg.ctrls) / outlist$sd.ctrls
outlist <- cbind(outlist, zscores.1col)
}
colnames(outlist)[startcol:ncol(outlist)] <- cnames.z
patient_ids <- unique(as.vector(unlist(lapply(strsplit(colnames(patient_columns), ".", fixed = TRUE), function(x) x[1]))))
patient_ids <- patient_ids[order(nchar(patient_ids), patient_ids)] # sorts
temp_png <- NULL
# Iterate through every row, make boxplot, insert into excel, and make Zscore for every patient
for (p in 1:nrow(outlist)) {
########################## box plot ###########################
hmdb_name <- rownames(outlist[p,])
intensities <- list(as.numeric(as.vector(unlist(control_columns[p,]))))
labels <- c("C", patient_ids)
for (i in 1:length(patient_ids)) {
id <- patient_ids[i]
# get all intensities that start with ex. P18. (so P18.1, P18.2, but not x_P18.1 and not P180.1)
p.int <- as.numeric(as.vector(unlist(outlist[p, names(patient_columns[1,])[startsWith(names(patient_columns[1,]), paste0(id, "."))]])))
intensities[[i+1]] <- p.int
}
intensities <- setNames(intensities, labels)
plot_width <- length(labels) * 16 + 90
plot.new()
if (export) {
png(filename = paste0(plotdir, "/", hmdb_name, "_box.png"),
width = plot_width,
height = 280)
}
par(oma=c(2,0,0,0))
boxplot(intensities,
col=c("green", rep("red", length(intensities)-1)),
names.arg = labels,
las=2,
main = hmdb_name)
dev.off()
file_png <- paste0(plotdir, "/", hmdb_name, "_box.png")
if (is.null(temp_png)) {
temp_png <- readPng(file_png)
img_dim <- dim(temp_png)[c(1,2)]
cell_dim <- img_dim * imagesize_multiplier
setColWidths(wb, filelist, cols = 1, widths = cell_dim[2]/20)
}
insertImage(wb,
filelist,
file_png,
startRow = p + 1,
startCol = 1,
height = cell_dim[1],
width = cell_dim[2],
units = "px")
if (p %% 100 == 0) {
cat("at row: ", p, "\n")
}
}
setRowHeights(wb, filelist, rows = c(1:nrow(outlist) + 1), heights = cell_dim[1]/4)
setColWidths(wb, filelist, cols = c(2:ncol(outlist)), widths = 20)
} else {
setRowHeights(wb, filelist, rows = c(1:nrow(outlist)), heights = 18)
setColWidths(wb, filelist, cols = c(1:ncol(outlist)), widths = 20)
}
writeData(wb, sheet = 1, outlist, startCol = 1)
xlsx_name <- paste0(outdir, "/", project, ".xlsx")
saveWorkbook(wb,
xlsx_name,
overwrite = TRUE)
cat(xlsx_name)
rm(wb)
write.table(outlist, file = paste(outdir, "allpgrps_stats.txt", sep = "/"))
# INTERNE STANDAARDEN
load(init)
IS <- outlist[grep("Internal standard", outlist[,"relevance"], fixed = TRUE),]
IS_codes <- rownames(IS)
cat(IS_codes,"\n")
# Retrieve IS summed adducts
IS_summed <- IS[c(names(repl.pattern), "HMDB_code")]
IS_summed$HMDB.name <- IS$name
IS_summed <- melt(IS_summed, id.vars=c('HMDB_code','HMDB.name'))
colnames(IS_summed) <- c('HMDB.code','HMDB.name','Sample','Intensity')
IS_summed$Intensity <- as.numeric(IS_summed$Intensity)
IS_summed$Matrix <- matrix
IS_summed$Rundate <- rundate
IS_summed$Project <- project
IS_summed$Intensity <- as.numeric(as.character(IS_summed$Intensity))
# Retrieve IS positive mode
IS_pos <- as.data.frame(subset(outlist.pos.adducts.HMDB,rownames(outlist.pos.adducts.HMDB) %in% IS_codes))
IS_pos$HMDB_name <- IS[match(row.names(IS_pos),IS$HMDB_code,nomatch=NA),'name']
IS_pos$HMDB.code <- row.names(IS_pos)
IS_pos <- melt(IS_pos, id.vars=c('HMDB.code','HMDB_name'))
colnames(IS_pos) <- c('HMDB.code','HMDB.name','Sample','Intensity')
IS_pos$Matrix <- matrix
IS_pos$Rundate <- rundate
IS_pos$Project <- project
IS_pos$Intensity <- as.numeric(as.character(IS_pos$Intensity))
# Retrieve IS negative mode
IS_neg <- as.data.frame(subset(outlist.neg.adducts.HMDB,rownames(outlist.neg.adducts.HMDB) %in% IS_codes))
IS_neg$HMDB_name <- IS[match(row.names(IS_neg),IS$HMDB_code,nomatch=NA),'name']
IS_neg$HMDB.code <- row.names(IS_neg)
IS_neg <- melt(IS_neg, id.vars=c('HMDB.code','HMDB_name'))
colnames(IS_neg) <- c('HMDB.code','HMDB.name','Sample','Intensity')
IS_neg$Matrix <- matrix
IS_neg$Rundate <- rundate
IS_neg$Project <- project
IS_neg$Intensity <- as.numeric(as.character(IS_neg$Intensity))
# Save results
save(IS_pos,IS_neg,IS_summed, file = paste(outdir, 'IS_results.RData', sep = "/"))
# Barplot voor alle IS
IS_neg_plot <- ggplot(IS_neg, aes(Sample,Intensity))+
ggtitle("Interne Standaard (Neg)") +
geom_bar(aes(fill=HMDB.name),stat='identity')+
labs(x='',y='Intensity')+
facet_wrap(~HMDB.name, scales='free_y')+
theme(axis.text.x=element_text(angle = 90, hjust = 1, vjust = 0.5, size=8),
legend.position='none')+
scale_y_continuous(breaks = scales::pretty_breaks(n = 10))
IS_pos_plot <- ggplot(IS_pos, aes(Sample,Intensity))+
ggtitle("Interne Standaard (Pos)") +
geom_bar(aes(fill=HMDB.name),stat='identity')+
labs(x='',y='Intensity')+
facet_wrap(~HMDB.name, scales='free_y')+
theme(axis.text.x=element_text(angle = 90, hjust = 1, vjust = 0.5, size=8),
legend.position='none')+
scale_y_continuous(breaks = scales::pretty_breaks(n = 10))
IS_sum_plot <- ggplot(IS_summed, aes(Sample,Intensity))+
ggtitle("Interne Standaard (Summed)") +
geom_bar(aes(fill=HMDB.name),stat='identity')+
labs(x='',y='Intensity')+
facet_wrap(~HMDB.name, scales='free_y')+
theme(axis.text.x=element_text(angle = 90, hjust = 1, vjust = 0.5, size=8),
legend.position='none')+
scale_y_continuous(breaks = scales::pretty_breaks(n = 10))
len <- length(repl.pattern)
w <- 9 + 0.35 * len
ggsave(paste0(outdir, "/plots/IS_bar_neg.png"), plot=IS_neg_plot, height=w/2.5, width=w, units="in")
ggsave(paste0(outdir, "/plots/IS_bar_pos.png"), plot=IS_pos_plot, height=w/2.5, width=w, units="in")
ggsave(paste0(outdir, "/plots/IS_bar_sum.png"), plot=IS_sum_plot, height=w/2.5, width=w, units="in")
# Lineplot voor alle IS
IS_neg_plot <- ggplot(IS_neg, aes(Sample,Intensity))+
ggtitle("Interne Standaard (Neg)") +
geom_point(aes(col=HMDB.name))+
geom_line(aes(col=HMDB.name, group=HMDB.name))+
labs(x='',y='Intensity')+
theme(axis.text.x=element_text(angle = 90, hjust = 1, vjust = 0.5, size=8))
IS_pos_plot <- ggplot(IS_pos, aes(Sample,Intensity)) +
ggtitle("Interne Standaard (Pos)") +
geom_point(aes(col = HMDB.name)) +
geom_line(aes(col = HMDB.name, group = HMDB.name)) +
labs(x = '', y = 'Intensity') +
theme(axis.text.x = element_text(angle = 90, hjust = 1, vjust = 0.5, size = 8))
IS_sum_plot <- ggplot(IS_summed, aes(Sample, Intensity)) +
ggtitle("Interne Standaard (Sum)") +
geom_point(aes(col = HMDB.name)) +
geom_line(aes(col = HMDB.name, group = HMDB.name)) +
labs(x = '', y = 'Intensity') +
theme(axis.text.x = element_text(angle = 90, hjust = 1, vjust = 0.5, size = 8))
w <- 8 + 0.2 * len
ggsave(paste0(outdir,"/plots/IS_line_neg.png"), plot = IS_neg_plot, height = w/2.5, width = w, units = "in")
ggsave(paste0(outdir,"/plots/IS_line_pos.png"), plot = IS_pos_plot, height = w/2.5, width = w, units = "in")
ggsave(paste0(outdir,"/plots/IS_line_sum.png"), plot = IS_sum_plot, height = w/2.5, width = w, units = "in")
# Barplot voor Leucine voor alle data
IS_now<-'2H3-Leucine (IS)'
p1<-ggplot(subset(IS_neg, HMDB.name %in% IS_now), aes(Sample,Intensity)) +
ggtitle(paste0(IS_now, " (Neg)")) +
geom_bar(aes(fill=HMDB.name),stat='identity')+
labs(title='Negative mode',x='',y='Intensity')+
theme(axis.text.x=element_text(angle = 90, hjust = 1, vjust = 0.5, size=10),
legend.position='none')
p2<-ggplot(subset(IS_pos, HMDB.name %in% IS_now), aes(Sample,Intensity)) +
ggtitle(paste0(IS_now, " (Pos)")) +
geom_bar(aes(fill=HMDB.name),stat='identity')+
labs(title='Positive mode',x='',y='Intensity')+
theme(axis.text.x=element_text(angle = 90, hjust = 1, vjust = 0.5, size=10),
legend.position='none')
p3<-ggplot(subset(IS_summed, HMDB.name %in% IS_now), aes(Sample,Intensity)) +
ggtitle(paste0(IS_now, " (Sum)")) +
geom_bar(aes(fill=HMDB.name),stat='identity')+
labs(title='Adduct sums',x='',y='Intensity')+
theme(axis.text.x=element_text(angle = 90, hjust = 1, vjust = 0.5, size=10),
legend.position='none')
w <- 3 + 0.2 * len
ggsave(paste0(outdir, "/plots/Leucine_neg.png"), plot = p1, height = w/2.5, width = w, units = "in")
ggsave(paste0(outdir, "/plots/Leucine_pos.png"), plot = p2, height = w/2.5, width = w, units = "in")
ggsave(paste0(outdir, "/plots/Leucine_sum.png"), plot = p3, height = w/2.5, width = w, units = "in")
if (z_score == 1) {
### POSITIVE CONTROLS
#HMDB codes
PA_codes <- c('HMDB00824', 'HMDB00783', 'HMDB00123')
PKU_codes <- c('HMDB00159')
LPI_codes <- c('HMDB00904', 'HMDB00641', 'HMDB00182')
PA_data <- outlist[PA_codes, c('HMDB_code','name','P1002.1_Zscore')]
PA_data <- melt(PA_data, id.vars = c('HMDB_code','name'))
colnames(PA_data) <- c('HMDB.code','HMDB.name','Sample','Zscore')
PKU_data <- outlist[PKU_codes, c('HMDB_code','name','P1003.1_Zscore')]
PKU_data <- melt(PKU_data, id.vars = c('HMDB_code','name'))
colnames(PKU_data) <- c('HMDB.code','HMDB.name','Sample','Zscore')
LPI_data <- outlist[LPI_codes, c('HMDB_code','name','P1005.1_Zscore')]
LPI_data <- melt(LPI_data, id.vars = c('HMDB_code','name'))
colnames(LPI_data) <- c('HMDB.code','HMDB.name','Sample','Zscore')
Pos_Contr <- rbind(PA_data, PKU_data, LPI_data)
Pos_Contr <- rbind(PA_data)
Pos_Contr$Zscore <- as.numeric(Pos_Contr$Zscore)
Pos_Contr$Matrix <- matrix
Pos_Contr$Rundate <- rundate
Pos_Contr$Project <- project
#Save results
save(Pos_Contr,file = paste(outdir, 'Pos_Contr.RData', sep = "/"))
}
cat("Ready excelExport.R")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/polyhedra-lib.R
\docType{class}
\name{PolyhedronStateDmccooeyScraper}
\alias{PolyhedronStateDmccooeyScraper}
\title{PolyhedronStateDmccooeyScraper}
\description{
Scrapes polyhedra from a dmccooey file format
}
\author{
ken4rab
}
\section{Super class}{
\code{\link[Rpolyhedra:PolyhedronState]{Rpolyhedra::PolyhedronState}} -> \code{PolyhedronStateDmccooeyScraper}
}
\section{Public fields}{
\if{html}{\out{<div class="r6-fields">}}
\describe{
\item{\code{regexp.values.names}}{regexp for scraping values names}
\item{\code{regexp.rn}}{regexp for scraping real numbers}
\item{\code{regexp.values}}{regexp for scraping values}
\item{\code{regexp.vertex}}{regexp for scraping vertices}
\item{\code{regexp.faces}}{regexp for scraping faces}
\item{\code{polyhedra.dmccooey.lines}}{dmccooey polyhedra definition lines}
\item{\code{labels.map}}{labels map where values are}
\item{\code{values}}{labels map where values are}
\item{\code{vertices}}{specification}
\item{\code{vertices.replaced}}{3D values}
\item{\code{faces}}{definition}
}
\if{html}{\out{</div>}}
}
\section{Methods}{
\subsection{Public methods}{
\itemize{
\item \href{#method-PolyhedronStateDmccooeyScraper-new}{\code{PolyhedronStateDmccooeyScraper$new()}}
\item \href{#method-PolyhedronStateDmccooeyScraper-setupRegexp}{\code{PolyhedronStateDmccooeyScraper$setupRegexp()}}
\item \href{#method-PolyhedronStateDmccooeyScraper-scrapeValues}{\code{PolyhedronStateDmccooeyScraper$scrapeValues()}}
\item \href{#method-PolyhedronStateDmccooeyScraper-scrapeVertices}{\code{PolyhedronStateDmccooeyScraper$scrapeVertices()}}
\item \href{#method-PolyhedronStateDmccooeyScraper-scrapeFaces}{\code{PolyhedronStateDmccooeyScraper$scrapeFaces()}}
\item \href{#method-PolyhedronStateDmccooeyScraper-scrape}{\code{PolyhedronStateDmccooeyScraper$scrape()}}
\item \href{#method-PolyhedronStateDmccooeyScraper-getName}{\code{PolyhedronStateDmccooeyScraper$getName()}}
\item \href{#method-PolyhedronStateDmccooeyScraper-applyTransformationMatrix}{\code{PolyhedronStateDmccooeyScraper$applyTransformationMatrix()}}
\item \href{#method-PolyhedronStateDmccooeyScraper-buildRGL}{\code{PolyhedronStateDmccooeyScraper$buildRGL()}}
\item \href{#method-PolyhedronStateDmccooeyScraper-exportToXML}{\code{PolyhedronStateDmccooeyScraper$exportToXML()}}
\item \href{#method-PolyhedronStateDmccooeyScraper-clone}{\code{PolyhedronStateDmccooeyScraper$clone()}}
}
}
\if{html}{\out{
<details open><summary>Inherited methods</summary>
<ul>
<li><span class="pkg-link" data-pkg="Rpolyhedra" data-topic="PolyhedronState" data-id="addError"><a href='../../Rpolyhedra/html/PolyhedronState.html#method-PolyhedronState-addError'><code>Rpolyhedra::PolyhedronState$addError()</code></a></span></li>
<li><span class="pkg-link" data-pkg="Rpolyhedra" data-topic="PolyhedronState" data-id="checkEdgesConsistency"><a href='../../Rpolyhedra/html/PolyhedronState.html#method-PolyhedronState-checkEdgesConsistency'><code>Rpolyhedra::PolyhedronState$checkEdgesConsistency()</code></a></span></li>
<li><span class="pkg-link" data-pkg="Rpolyhedra" data-topic="PolyhedronState" data-id="getSolid"><a href='../../Rpolyhedra/html/PolyhedronState.html#method-PolyhedronState-getSolid'><code>Rpolyhedra::PolyhedronState$getSolid()</code></a></span></li>
</ul>
</details>
}}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-PolyhedronStateDmccooeyScraper-new"></a>}}
\if{latex}{\out{\hypertarget{method-PolyhedronStateDmccooeyScraper-new}{}}}
\subsection{Method \code{new()}}{
Initialize Dmccooey scraper
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{PolyhedronStateDmccooeyScraper$new(file.id, polyhedra.dmccooey.lines)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{file.id}}{identifier of the definition file.}
\item{\code{polyhedra.dmccooey.lines}}{raw Dmccooey definition file lines}
}
\if{html}{\out{</div>}}
}
\subsection{Returns}{
A new PolyhedronStateDmccooeyScraper object.
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-PolyhedronStateDmccooeyScraper-setupRegexp"></a>}}
\if{latex}{\out{\hypertarget{method-PolyhedronStateDmccooeyScraper-setupRegexp}{}}}
\subsection{Method \code{setupRegexp()}}{
setupRegexp for Dmccooey definition
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{PolyhedronStateDmccooeyScraper$setupRegexp()}\if{html}{\out{</div>}}
}
\subsection{Returns}{
This PolyhedronStateDmccooeyScraper object with regexp defined.
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-PolyhedronStateDmccooeyScraper-scrapeValues"></a>}}
\if{latex}{\out{\hypertarget{method-PolyhedronStateDmccooeyScraper-scrapeValues}{}}}
\subsection{Method \code{scrapeValues()}}{
scrape values from Dmccooey definition
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{PolyhedronStateDmccooeyScraper$scrapeValues(values.lines)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{values.lines}}{values definitions in Dmccooey source}
}
\if{html}{\out{</div>}}
}
\subsection{Returns}{
This PolyhedronStateDmccooeyScraper object with values defined.
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-PolyhedronStateDmccooeyScraper-scrapeVertices"></a>}}
\if{latex}{\out{\hypertarget{method-PolyhedronStateDmccooeyScraper-scrapeVertices}{}}}
\subsection{Method \code{scrapeVertices()}}{
scrape polyhedron vertices from definition
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{PolyhedronStateDmccooeyScraper$scrapeVertices(vertices.lines)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{vertices.lines}}{vertices definitions in Dmccooey source}
}
\if{html}{\out{</div>}}
}
\subsection{Returns}{
This PolyhedronStateDmccooeyScraper object with faces defined.
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-PolyhedronStateDmccooeyScraper-scrapeFaces"></a>}}
\if{latex}{\out{\hypertarget{method-PolyhedronStateDmccooeyScraper-scrapeFaces}{}}}
\subsection{Method \code{scrapeFaces()}}{
scrape polyhedron faces from definition
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{PolyhedronStateDmccooeyScraper$scrapeFaces(faces.lines)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{faces.lines}}{face}
}
\if{html}{\out{</div>}}
}
\subsection{Returns}{
This PolyhedronStateDmccooeyScraper object with faces defined.
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-PolyhedronStateDmccooeyScraper-scrape"></a>}}
\if{latex}{\out{\hypertarget{method-PolyhedronStateDmccooeyScraper-scrape}{}}}
\subsection{Method \code{scrape()}}{
scrape Dmccooey polyhedron definition
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{PolyhedronStateDmccooeyScraper$scrape()}\if{html}{\out{</div>}}
}
\subsection{Returns}{
A new PolyhedronStateDefined object.
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-PolyhedronStateDmccooeyScraper-getName"></a>}}
\if{latex}{\out{\hypertarget{method-PolyhedronStateDmccooeyScraper-getName}{}}}
\subsection{Method \code{getName()}}{
get Polyhedron name
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{PolyhedronStateDmccooeyScraper$getName()}\if{html}{\out{</div>}}
}
\subsection{Returns}{
string with polyhedron name
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-PolyhedronStateDmccooeyScraper-applyTransformationMatrix"></a>}}
\if{latex}{\out{\hypertarget{method-PolyhedronStateDmccooeyScraper-applyTransformationMatrix}{}}}
\subsection{Method \code{applyTransformationMatrix()}}{
Apply transformation matrix to polyhedron
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{PolyhedronStateDmccooeyScraper$applyTransformationMatrix(transformation.matrix)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{transformation.matrix}}{the transformation matrix to apply to the polyhedron}
}
\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-PolyhedronStateDmccooeyScraper-buildRGL"></a>}}
\if{latex}{\out{\hypertarget{method-PolyhedronStateDmccooeyScraper-buildRGL}{}}}
\subsection{Method \code{buildRGL()}}{
Creates a 'rgl' representation of the object
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{PolyhedronStateDmccooeyScraper$buildRGL(transformation.matrix)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{transformation.matrix}}{the transformation matrix to apply to the polyhedron}
}
\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-PolyhedronStateDmccooeyScraper-exportToXML"></a>}}
\if{latex}{\out{\hypertarget{method-PolyhedronStateDmccooeyScraper-exportToXML}{}}}
\subsection{Method \code{exportToXML()}}{
serializes object in XML
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{PolyhedronStateDmccooeyScraper$exportToXML()}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-PolyhedronStateDmccooeyScraper-clone"></a>}}
\if{latex}{\out{\hypertarget{method-PolyhedronStateDmccooeyScraper-clone}{}}}
\subsection{Method \code{clone()}}{
The objects of this class are cloneable with this method.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{PolyhedronStateDmccooeyScraper$clone(deep = FALSE)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{deep}}{Whether to make a deep clone.}
}
\if{html}{\out{</div>}}
}
}
}
| /man/PolyhedronStateDmccooeyScraper.Rd | no_license | ropensci/Rpolyhedra | R | false | true | 9,708 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/polyhedra-lib.R
\docType{class}
\name{PolyhedronStateDmccooeyScraper}
\alias{PolyhedronStateDmccooeyScraper}
\title{PolyhedronStateDmccooeyScraper}
\description{
Scrapes polyhedra from a dmccooey file format
}
\author{
ken4rab
}
\section{Super class}{
\code{\link[Rpolyhedra:PolyhedronState]{Rpolyhedra::PolyhedronState}} -> \code{PolyhedronStateDmccooeyScraper}
}
\section{Public fields}{
\if{html}{\out{<div class="r6-fields">}}
\describe{
\item{\code{regexp.values.names}}{regexp for scraping values names}
\item{\code{regexp.rn}}{regexp for scraping real numbers}
\item{\code{regexp.values}}{regexp for scraping values}
\item{\code{regexp.vertex}}{regexp for scraping vertices}
\item{\code{regexp.faces}}{regexp for scraping faces}
\item{\code{polyhedra.dmccooey.lines}}{dmccooey polyhedra definition lines}
\item{\code{labels.map}}{labels map where values are}
\item{\code{values}}{labels map where values are}
\item{\code{vertices}}{specification}
\item{\code{vertices.replaced}}{3D values}
\item{\code{faces}}{definition}
}
\if{html}{\out{</div>}}
}
\section{Methods}{
\subsection{Public methods}{
\itemize{
\item \href{#method-PolyhedronStateDmccooeyScraper-new}{\code{PolyhedronStateDmccooeyScraper$new()}}
\item \href{#method-PolyhedronStateDmccooeyScraper-setupRegexp}{\code{PolyhedronStateDmccooeyScraper$setupRegexp()}}
\item \href{#method-PolyhedronStateDmccooeyScraper-scrapeValues}{\code{PolyhedronStateDmccooeyScraper$scrapeValues()}}
\item \href{#method-PolyhedronStateDmccooeyScraper-scrapeVertices}{\code{PolyhedronStateDmccooeyScraper$scrapeVertices()}}
\item \href{#method-PolyhedronStateDmccooeyScraper-scrapeFaces}{\code{PolyhedronStateDmccooeyScraper$scrapeFaces()}}
\item \href{#method-PolyhedronStateDmccooeyScraper-scrape}{\code{PolyhedronStateDmccooeyScraper$scrape()}}
\item \href{#method-PolyhedronStateDmccooeyScraper-getName}{\code{PolyhedronStateDmccooeyScraper$getName()}}
\item \href{#method-PolyhedronStateDmccooeyScraper-applyTransformationMatrix}{\code{PolyhedronStateDmccooeyScraper$applyTransformationMatrix()}}
\item \href{#method-PolyhedronStateDmccooeyScraper-buildRGL}{\code{PolyhedronStateDmccooeyScraper$buildRGL()}}
\item \href{#method-PolyhedronStateDmccooeyScraper-exportToXML}{\code{PolyhedronStateDmccooeyScraper$exportToXML()}}
\item \href{#method-PolyhedronStateDmccooeyScraper-clone}{\code{PolyhedronStateDmccooeyScraper$clone()}}
}
}
\if{html}{\out{
<details open><summary>Inherited methods</summary>
<ul>
<li><span class="pkg-link" data-pkg="Rpolyhedra" data-topic="PolyhedronState" data-id="addError"><a href='../../Rpolyhedra/html/PolyhedronState.html#method-PolyhedronState-addError'><code>Rpolyhedra::PolyhedronState$addError()</code></a></span></li>
<li><span class="pkg-link" data-pkg="Rpolyhedra" data-topic="PolyhedronState" data-id="checkEdgesConsistency"><a href='../../Rpolyhedra/html/PolyhedronState.html#method-PolyhedronState-checkEdgesConsistency'><code>Rpolyhedra::PolyhedronState$checkEdgesConsistency()</code></a></span></li>
<li><span class="pkg-link" data-pkg="Rpolyhedra" data-topic="PolyhedronState" data-id="getSolid"><a href='../../Rpolyhedra/html/PolyhedronState.html#method-PolyhedronState-getSolid'><code>Rpolyhedra::PolyhedronState$getSolid()</code></a></span></li>
</ul>
</details>
}}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-PolyhedronStateDmccooeyScraper-new"></a>}}
\if{latex}{\out{\hypertarget{method-PolyhedronStateDmccooeyScraper-new}{}}}
\subsection{Method \code{new()}}{
Initialize Dmccooey scraper
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{PolyhedronStateDmccooeyScraper$new(file.id, polyhedra.dmccooey.lines)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{file.id}}{identifier of the definition file.}
\item{\code{polyhedra.dmccooey.lines}}{raw Dmccooey definition file lines}
}
\if{html}{\out{</div>}}
}
\subsection{Returns}{
A new PolyhedronStateDmccooeyScraper object.
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-PolyhedronStateDmccooeyScraper-setupRegexp"></a>}}
\if{latex}{\out{\hypertarget{method-PolyhedronStateDmccooeyScraper-setupRegexp}{}}}
\subsection{Method \code{setupRegexp()}}{
setupRegexp for Dmccooey definition
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{PolyhedronStateDmccooeyScraper$setupRegexp()}\if{html}{\out{</div>}}
}
\subsection{Returns}{
This PolyhedronStateDmccooeyScraper object with regexp defined.
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-PolyhedronStateDmccooeyScraper-scrapeValues"></a>}}
\if{latex}{\out{\hypertarget{method-PolyhedronStateDmccooeyScraper-scrapeValues}{}}}
\subsection{Method \code{scrapeValues()}}{
scrape values from Dmccooey definition
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{PolyhedronStateDmccooeyScraper$scrapeValues(values.lines)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{values.lines}}{values definitions in Dmccooey source}
}
\if{html}{\out{</div>}}
}
\subsection{Returns}{
This PolyhedronStateDmccooeyScraper object with values defined.
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-PolyhedronStateDmccooeyScraper-scrapeVertices"></a>}}
\if{latex}{\out{\hypertarget{method-PolyhedronStateDmccooeyScraper-scrapeVertices}{}}}
\subsection{Method \code{scrapeVertices()}}{
scrape polyhedron vertices from definition
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{PolyhedronStateDmccooeyScraper$scrapeVertices(vertices.lines)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{vertices.lines}}{vertices definitions in Dmccooey source}
}
\if{html}{\out{</div>}}
}
\subsection{Returns}{
This PolyhedronStateDmccooeyScraper object with faces defined.
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-PolyhedronStateDmccooeyScraper-scrapeFaces"></a>}}
\if{latex}{\out{\hypertarget{method-PolyhedronStateDmccooeyScraper-scrapeFaces}{}}}
\subsection{Method \code{scrapeFaces()}}{
scrape polyhedron faces from definition
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{PolyhedronStateDmccooeyScraper$scrapeFaces(faces.lines)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{faces.lines}}{face}
}
\if{html}{\out{</div>}}
}
\subsection{Returns}{
This PolyhedronStateDmccooeyScraper object with faces defined.
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-PolyhedronStateDmccooeyScraper-scrape"></a>}}
\if{latex}{\out{\hypertarget{method-PolyhedronStateDmccooeyScraper-scrape}{}}}
\subsection{Method \code{scrape()}}{
scrape Dmccooey polyhedron definition
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{PolyhedronStateDmccooeyScraper$scrape()}\if{html}{\out{</div>}}
}
\subsection{Returns}{
A new PolyhedronStateDefined object.
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-PolyhedronStateDmccooeyScraper-getName"></a>}}
\if{latex}{\out{\hypertarget{method-PolyhedronStateDmccooeyScraper-getName}{}}}
\subsection{Method \code{getName()}}{
get Polyhedron name
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{PolyhedronStateDmccooeyScraper$getName()}\if{html}{\out{</div>}}
}
\subsection{Returns}{
string with polyhedron name
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-PolyhedronStateDmccooeyScraper-applyTransformationMatrix"></a>}}
\if{latex}{\out{\hypertarget{method-PolyhedronStateDmccooeyScraper-applyTransformationMatrix}{}}}
\subsection{Method \code{applyTransformationMatrix()}}{
Apply transformation matrix to polyhedron
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{PolyhedronStateDmccooeyScraper$applyTransformationMatrix(transformation.matrix)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{transformation.matrix}}{the transformation matrix to apply to the polyhedron}
}
\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-PolyhedronStateDmccooeyScraper-buildRGL"></a>}}
\if{latex}{\out{\hypertarget{method-PolyhedronStateDmccooeyScraper-buildRGL}{}}}
\subsection{Method \code{buildRGL()}}{
Creates a 'rgl' representation of the object
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{PolyhedronStateDmccooeyScraper$buildRGL(transformation.matrix)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{transformation.matrix}}{the transformation matrix to apply to the polyhedron}
}
\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-PolyhedronStateDmccooeyScraper-exportToXML"></a>}}
\if{latex}{\out{\hypertarget{method-PolyhedronStateDmccooeyScraper-exportToXML}{}}}
\subsection{Method \code{exportToXML()}}{
serializes object in XML
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{PolyhedronStateDmccooeyScraper$exportToXML()}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-PolyhedronStateDmccooeyScraper-clone"></a>}}
\if{latex}{\out{\hypertarget{method-PolyhedronStateDmccooeyScraper-clone}{}}}
\subsection{Method \code{clone()}}{
The objects of this class are cloneable with this method.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{PolyhedronStateDmccooeyScraper$clone(deep = FALSE)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{deep}}{Whether to make a deep clone.}
}
\if{html}{\out{</div>}}
}
}
}
|
prog1 <- function(){
dataFile <- "household_power_consumption.txt"
data <- read.table(dataFile, header=TRUE, sep=";", stringsAsFactors=FALSE, dec=".")
subSetData <- data[data$Date %in% c("1/2/2007","2/2/2007") ,]
globalActivePower <- as.numeric(subSetData$Global_active_power)
png("trial.png", width=480, height=480)
hist(globalActivePower, col="red", main="Global Active Power", xlab="Global Active Power (kilowatts)")
dev.off()
} | /plot1.R | no_license | vishwajeet993511/ExData_Plotting1 | R | false | false | 457 | r | prog1 <- function(){
dataFile <- "household_power_consumption.txt"
data <- read.table(dataFile, header=TRUE, sep=";", stringsAsFactors=FALSE, dec=".")
subSetData <- data[data$Date %in% c("1/2/2007","2/2/2007") ,]
globalActivePower <- as.numeric(subSetData$Global_active_power)
png("trial.png", width=480, height=480)
hist(globalActivePower, col="red", main="Global Active Power", xlab="Global Active Power (kilowatts)")
dev.off()
} |
set.seed( 239 )
library(mvtnorm)
library(fields)
library(Rcpp)
library(mclust)
library(kernlab)
library(ConsensusClusterPlus)
simu=function(s){
prob_glcm<-function(c,s=s,mc=30000){
mu<-c(2+c,14-c)
sigma<-matrix(s*c(1,-0.7,-0.7,1),nrow=2)
elip<-rmvnorm(mc,mu,sigma)
# par(xaxs='i',yaxs='i')
# plot(elip,xlim =c(0,16) ,ylim=c(0,16))
# abline(16,-1,col='red')
# abline(h=16);abline(h=15);abline(h=14);abline(h=13);abline(h=12);abline(h=11);abline(h=10);abline(h=9);
# abline(h=8);abline(h=7);abline(h=6);abline(h=5);abline(h=4);abline(h=3);abline(h=2);abline(h=1);abline(h=0)
# abline(v=16);abline(v=15);abline(v=14);abline(v=13);abline(v=12);abline(v=11);abline(v=10);abline(v=9);
# abline(v=0);abline(v=1);abline(v=2);abline(v=3);abline(v=4);abline(v=5);abline(v=6);abline(v=7);abline(v=8)
cell_count<-rep(0,16*16)
for (i in 1:mc)
{
for (m in 1:16) {
for (k in 16:1) {
if (( (m-1) <elip[i,1])&(elip[i,1]< m)&( (k-1) <elip[i,2])&(elip[i,2]< k)) {
cell_count[16-k+1+16*(m-1)]=cell_count[16-k+1+16*(m-1)]+1}
}
}
}
## -c(2:16,19:32,36:48,53:64,70:80,87:96,104:112,121:128,138:144,155:160,172:176,189:192,206:208,223:224,240)
z<-cell_count/sum(cell_count)
z_whole<-z[c(1,17,33,49,65,81,97,113,129,145,161,177,193,209,225,241,
17,18,34,50,66,82,98,114,130,146,162,178,194,210,226,242,
33,34,35,51,67,83,99,115,131,147,163,179,195,211,227,243,
49,50,51,52,68,84,100,116,132,148,164,180,196,212,228,244,
65,66,67,68,69,85,101,117,133,149,165,181,197,213,229,245,
81,82,83,84,85,86,102,118,134,150,166,182,198,214,230,246,
97,98,99,100,101,102,103,119,135,151,167,183,199,215,231,247,
113,114,115,116,117,118,119,120,136,152,168,184,200,216,232,248,
129,130,131,132,133,134,135,136,137,153,169,185,201,217,233,249,
145,146,147,148,149,150,151,152,153,154,170,186,202,218,234,250,
161,162,163,164,165,166,167,168,169,170,171,187,203,219,235,251,
177,178,179,180,181,182,183,184,185,186,187,188,204,220,236,252,
193,194,195,196,197,198,199,200,201,202,203,204,205,221,237,253,
209,210,211,212,213,214,215,216,217,218,219,220,221,222,238,254,
225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,255,
241,242,243,244,245,246,247,248,249,250,251,252,253,254,255,256)]
arg. <- expand.grid(c(0.5:15.5),c(15.5:0.5))
I = as.image( Z=z_whole, x=arg., grid=list(x=seq(0.5,15.5,1), y=seq(0.5,15.5,1)))
image(I)
smooth.I <- image.smooth(I, theta=1);
#################################################
### notice the order of this sommthed image ###
#################################################
den=c()
for (r in 1:16) {
for (w in 1:r) {
den=c(den,smooth.I$z[r,16-(w-1)])
}
}
prob<-den/sum(den)
return(prob)
}
prob1=prob_glcm(c=5,s=s)
prob2=prob_glcm(c=5.5,s=s)
prob3=prob_glcm(c=6,s=s)
prob4=prob_glcm(c=6.5,s=s)
prob5=prob_glcm(c=7,s=s)
glcm=matrix(0,nrow=20*5,ncol=136)
for (j in 1:20)
{
t<-round(runif(1,500,20000),0)
glcm[j,]=round(t*prob1)
}
for (j in 21:40)
{
t<-round(runif(1,500,20000),0)
glcm[j,]=round(t*prob2)
}
for (j in 41:60)
{
t<-round(runif(1,500,20000),0)
glcm[j,]=round(t*prob3)
}
for (j in 61:80)
{
t<-round(runif(1,500,20000),0)
glcm[j,]=round(t*prob4)
}
for (j in 81:100)
{
t<-round(runif(1,500,20000),0)
glcm[j,]=round(t*prob5)
}
glcm
}
Z=simu(s=15)
Z_met=Z
T_met=nrow(Z_met)
n=ncol(Z_met)
X=apply(Z_met,1,sum)
X_met=X
sX_met=(X-mean(X))/sd(X)
R=array(data = NA,dim = c(2,n,T_met))
for (t in 1: nrow(Z_met)) R[,,t]=matrix(rep(c(1,sX_met[t]),times=n),byrow = FALSE,nrow=2,ncol=n)
############################################################################
########################## MCMC ########################
############################################################################
library(HI)
library(invgamma)
source('/gstore/scratch/u/lix233/RGSDP/sdp_functions_selfwriting_V12_cpp.R')
sourceCpp('/gstore/scratch/u/lix233/RGSDP/rgsdp.cpp')
D=read.csv('/gstore/scratch/u/lix233/RGSDP/D_16.csv',header=TRUE)
W=read.csv('/gstore/scratch/u/lix233/RGSDP/W_16.csv',header=TRUE)
N=20000;Burnin=N/2
Y_iter_met=Theta_iter_met=array(data=NA,dim = c(T_met,n,N))
try=matrix(0,nrow =T_met ,ncol = n)
for (i in 1:T_met){
for (j in 1:n){
if (Z_met[i,j]==0) {
try[i,j]=rnorm(1,mean=-10,sd=1)
} else {
try[i,j]=rnorm(1,mean=Z_met[i,j],sd=1)
}
}
}
g=update_Y(Z=Z_met,X=X_met,tau2=100,Theta = try,Beta =c(0.1,0.1),R)
sum(g==Inf)+sum(g==-Inf)
Theta_iter_met[,,1]=try
tau2_met=v_met=rho_met=sig2_met=rep(NA,N)
tau2_met[1]=50
v_met[1]=0.8
rho_met[1]=0.9
sig2_met[1]=10
# v_met=rep(1,N) # Fix v
av=bv=1
atau=0.0001 ;btau=0.0001
asig=0.0001 ;bsig=0.0001
Betam=c(0,0);Sigma_m=matrix(c(10^5,0,0,10^5),nrow=2,ncol=2)
Beta_iter_met=matrix(NA,nrow=N,ncol=nrow(R[,,1]))
Beta_iter_met[1,]=c(40,20)
for (iter in 2:N) {
Y_iter_met[,,iter]=update_Y(Z_met,X_met,tau2_met[iter-1],Theta_iter_met[,,iter-1],Beta_iter_met[iter-1,],R)
Theta_iter_met[,,iter]=update_theta(as.vector(X_met),Y_iter_met[,,iter],as.matrix(D),as.matrix(W),rho_met[iter-1],Theta_iter_met[,,iter-1],sig2_met[iter-1],tau2_met[iter-1],v_met[iter-1],Beta_iter_met[iter-1,],R)
Beta_iter_met[iter,]=update_Beta(Betam,Sigma_m,tau2_met[iter-1],X_met,Y_iter_met[,,iter],Theta_iter_met[,,iter],R)
tau2_met[iter] = update_tau2(X_met,Y_iter_met[,,iter],Theta_iter_met[,,iter],atau,btau,Beta_iter_met[iter,],R)
sig2_met[iter]= update_sig2(asig,bsig,D,W,rho_met[iter-1],Theta_iter_met[,,iter])
rho_met[iter] = update_rho(D,W,Theta_iter_met[,,iter],sig2_met[iter])
v_met[iter]=update_v(Z_met,v_met[iter-1],Tstar=nrow(unique.matrix(Theta_iter_met[,,iter])),av,bv)
}
library(coda)
mcmc_beta=mcmc(Beta_iter_met[(1+Burnin):N,])
pnorm(abs(geweke.diag(mcmc_beta)$z),lower.tail=FALSE)*2
mcmc_rho=mcmc(rho_met[(1+Burnin):N])
pnorm(abs(geweke.diag(mcmc_rho)$z),lower.tail=FALSE)*2
mcmc_sig2=mcmc(sig2_met[(1+Burnin):N])
pnorm(abs(geweke.diag(mcmc_sig2)$z),lower.tail=FALSE)*2
mcmc_tau2=mcmc(tau2_met[(1+Burnin):N])
pnorm(abs(geweke.diag(mcmc_tau2)$z),lower.tail=FALSE)*2
mcmc_v=mcmc(v_met[(1+Burnin):N])
pnorm(abs(geweke.diag(mcmc_v)$z),lower.tail=FALSE)*2
Theta_ave=Theta_sum=matrix(0,nrow=nrow(Theta_iter_met[,,1]),ncol=ncol(Theta_iter_met[,,1]))
for (i in (Burnin+1):N) {
Theta_sum=Theta_sum+Theta_iter_met[,,i]
}
Theta_ave=Theta_sum/(N-Burnin)
library('NbClust')
NbClust(Theta_ave,distance='euclidean',method='ward.D2',index='kl')
HRGSDP=NbClust(Theta_ave,distance='euclidean',method='ward.D2',index='kl')$Best.partition
glcm_whole=Z[,c(1,2,4,7,11,16,22,29,37,46,56,67,79,92,106,121,
2,3,5,8,12,17,23,30,38,47,57,68,80,93,107,122,
4,5,6,9,13,18,24,31,39,48,58,69,81,94,108,123,
7,8,9,10,14,19,25,32,40,49,59,70,82,95,109,124,
11,12,13,14,15,20,26,33,41,50,60,71,83,96,110,125,
16,17,18,19,20,21,27,34,42,51,61,72,84,97,111,126,
22,23,24,25,26,27,28,35,43,52,62,73,85,98,112,127,
29,30,31,32,33,34,35,36,44,53,63,74,86,99,113,128,
37,38,39,40,41,42,43,44,45,54,64,75,87,100,114,129,
46,47,48,49,50,51,52,53,54,55,65,76,88,101,115,130,
56,57,58,59,60,61,62,63,64,65,66,77,89,102,116,131,
67,68,69,70,71,72,73,74,75,76,77,78,90,103,117,132,
79,80,81,82,83,84,85,86,87,88,89,90,91,104,118,133,
92,93,94,95,96,97,98,99,100,101,102,103,104,105,119,134,
106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,135,
121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136)]
source('/gstore/scratch/u/lix233/RGSDP/cal_stat.R')
features=cal_stat(glcm_whole)
GMM=Mclust(features,5)
my.dist <- function(x) dist(x, method='euclidean')
my.hclust <- function(d) hclust(d, method='ward.D2')
HC<-cutree(my.hclust(my.dist(data.matrix(features))),k=5)
KM=kmeans(features,5)
SC=specc(features,5)
CO <- ConsensusClusterPlus(t(features),maxK=9,reps=100,pItem=0.90, pFeature=1,
clusterAlg='hc',distance='euclidean',plot=FALSE)
CO <- CO[[5]]$consensusClass
aa <- table(rep(1:5,each=20), CO)
bb <- table(rep(1:5,each=20), GMM$classification)
cc <- table(rep(1:5,each=20), HC)
dd <- table(rep(1:5,each=20), KM$cluster)
ee <- table(rep(1:5,each=20), SC)
ff <- table(rep(1:5,each=20), HRGSDP)
res_FeaCO=c(chisq.test(aa,correct = TRUE)$statistic,ncol(aa),error_rate(aa), 'FeaCO')
res_FeaGMM=c(chisq.test(bb,correct = TRUE)$statistic,ncol(bb),error_rate(bb), 'FeaGMM')
res_FeaHC=c(chisq.test(cc,correct = TRUE)$statistic,ncol(cc),error_rate(cc), 'FeaHC')
res_FeaKM=c(chisq.test(dd,correct = TRUE)$statistic,ncol(dd),error_rate(dd), 'FeaKM')
res_FeaSC=c(chisq.test(ee,correct = TRUE)$statistic,ncol(ee),error_rate(ee), 'FeaSC')
res_HRGSDP=c(chisq.test(ff,correct = TRUE)$statistic,ncol(ff),error_rate(ff), 'HRGSDP')
xx = rbind(res_FeaCO, res_FeaGMM, res_FeaHC, res_FeaKM, res_FeaSC, res_HRGSDP)
colnames(xx) = c('pearson.chi.sq', 'nunber of clusters', 'error.rate', 'method')
xx = as.data.frame(xx)
print(xx)
| /s=15/simu_239.R | no_license | mguindanigroup/Radiomics-Hierarchical-Rounded-Gaussian-Spatial-Dirichlet-Process | R | false | false | 9,294 | r | set.seed( 239 )
library(mvtnorm)
library(fields)
library(Rcpp)
library(mclust)
library(kernlab)
library(ConsensusClusterPlus)
simu=function(s){
prob_glcm<-function(c,s=s,mc=30000){
mu<-c(2+c,14-c)
sigma<-matrix(s*c(1,-0.7,-0.7,1),nrow=2)
elip<-rmvnorm(mc,mu,sigma)
# par(xaxs='i',yaxs='i')
# plot(elip,xlim =c(0,16) ,ylim=c(0,16))
# abline(16,-1,col='red')
# abline(h=16);abline(h=15);abline(h=14);abline(h=13);abline(h=12);abline(h=11);abline(h=10);abline(h=9);
# abline(h=8);abline(h=7);abline(h=6);abline(h=5);abline(h=4);abline(h=3);abline(h=2);abline(h=1);abline(h=0)
# abline(v=16);abline(v=15);abline(v=14);abline(v=13);abline(v=12);abline(v=11);abline(v=10);abline(v=9);
# abline(v=0);abline(v=1);abline(v=2);abline(v=3);abline(v=4);abline(v=5);abline(v=6);abline(v=7);abline(v=8)
cell_count<-rep(0,16*16)
for (i in 1:mc)
{
for (m in 1:16) {
for (k in 16:1) {
if (( (m-1) <elip[i,1])&(elip[i,1]< m)&( (k-1) <elip[i,2])&(elip[i,2]< k)) {
cell_count[16-k+1+16*(m-1)]=cell_count[16-k+1+16*(m-1)]+1}
}
}
}
## -c(2:16,19:32,36:48,53:64,70:80,87:96,104:112,121:128,138:144,155:160,172:176,189:192,206:208,223:224,240)
z<-cell_count/sum(cell_count)
z_whole<-z[c(1,17,33,49,65,81,97,113,129,145,161,177,193,209,225,241,
17,18,34,50,66,82,98,114,130,146,162,178,194,210,226,242,
33,34,35,51,67,83,99,115,131,147,163,179,195,211,227,243,
49,50,51,52,68,84,100,116,132,148,164,180,196,212,228,244,
65,66,67,68,69,85,101,117,133,149,165,181,197,213,229,245,
81,82,83,84,85,86,102,118,134,150,166,182,198,214,230,246,
97,98,99,100,101,102,103,119,135,151,167,183,199,215,231,247,
113,114,115,116,117,118,119,120,136,152,168,184,200,216,232,248,
129,130,131,132,133,134,135,136,137,153,169,185,201,217,233,249,
145,146,147,148,149,150,151,152,153,154,170,186,202,218,234,250,
161,162,163,164,165,166,167,168,169,170,171,187,203,219,235,251,
177,178,179,180,181,182,183,184,185,186,187,188,204,220,236,252,
193,194,195,196,197,198,199,200,201,202,203,204,205,221,237,253,
209,210,211,212,213,214,215,216,217,218,219,220,221,222,238,254,
225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,255,
241,242,243,244,245,246,247,248,249,250,251,252,253,254,255,256)]
arg. <- expand.grid(c(0.5:15.5),c(15.5:0.5))
I = as.image( Z=z_whole, x=arg., grid=list(x=seq(0.5,15.5,1), y=seq(0.5,15.5,1)))
image(I)
smooth.I <- image.smooth(I, theta=1);
#################################################
### notice the order of this sommthed image ###
#################################################
den=c()
for (r in 1:16) {
for (w in 1:r) {
den=c(den,smooth.I$z[r,16-(w-1)])
}
}
prob<-den/sum(den)
return(prob)
}
prob1=prob_glcm(c=5,s=s)
prob2=prob_glcm(c=5.5,s=s)
prob3=prob_glcm(c=6,s=s)
prob4=prob_glcm(c=6.5,s=s)
prob5=prob_glcm(c=7,s=s)
glcm=matrix(0,nrow=20*5,ncol=136)
for (j in 1:20)
{
t<-round(runif(1,500,20000),0)
glcm[j,]=round(t*prob1)
}
for (j in 21:40)
{
t<-round(runif(1,500,20000),0)
glcm[j,]=round(t*prob2)
}
for (j in 41:60)
{
t<-round(runif(1,500,20000),0)
glcm[j,]=round(t*prob3)
}
for (j in 61:80)
{
t<-round(runif(1,500,20000),0)
glcm[j,]=round(t*prob4)
}
for (j in 81:100)
{
t<-round(runif(1,500,20000),0)
glcm[j,]=round(t*prob5)
}
glcm
}
Z=simu(s=15)
Z_met=Z
T_met=nrow(Z_met)
n=ncol(Z_met)
X=apply(Z_met,1,sum)
X_met=X
sX_met=(X-mean(X))/sd(X)
R=array(data = NA,dim = c(2,n,T_met))
for (t in 1: nrow(Z_met)) R[,,t]=matrix(rep(c(1,sX_met[t]),times=n),byrow = FALSE,nrow=2,ncol=n)
############################################################################
########################## MCMC ########################
############################################################################
library(HI)
library(invgamma)
source('/gstore/scratch/u/lix233/RGSDP/sdp_functions_selfwriting_V12_cpp.R')
sourceCpp('/gstore/scratch/u/lix233/RGSDP/rgsdp.cpp')
D=read.csv('/gstore/scratch/u/lix233/RGSDP/D_16.csv',header=TRUE)
W=read.csv('/gstore/scratch/u/lix233/RGSDP/W_16.csv',header=TRUE)
N=20000;Burnin=N/2
Y_iter_met=Theta_iter_met=array(data=NA,dim = c(T_met,n,N))
try=matrix(0,nrow =T_met ,ncol = n)
for (i in 1:T_met){
for (j in 1:n){
if (Z_met[i,j]==0) {
try[i,j]=rnorm(1,mean=-10,sd=1)
} else {
try[i,j]=rnorm(1,mean=Z_met[i,j],sd=1)
}
}
}
g=update_Y(Z=Z_met,X=X_met,tau2=100,Theta = try,Beta =c(0.1,0.1),R)
sum(g==Inf)+sum(g==-Inf)
Theta_iter_met[,,1]=try
tau2_met=v_met=rho_met=sig2_met=rep(NA,N)
tau2_met[1]=50
v_met[1]=0.8
rho_met[1]=0.9
sig2_met[1]=10
# v_met=rep(1,N) # Fix v
av=bv=1
atau=0.0001 ;btau=0.0001
asig=0.0001 ;bsig=0.0001
Betam=c(0,0);Sigma_m=matrix(c(10^5,0,0,10^5),nrow=2,ncol=2)
Beta_iter_met=matrix(NA,nrow=N,ncol=nrow(R[,,1]))
Beta_iter_met[1,]=c(40,20)
for (iter in 2:N) {
Y_iter_met[,,iter]=update_Y(Z_met,X_met,tau2_met[iter-1],Theta_iter_met[,,iter-1],Beta_iter_met[iter-1,],R)
Theta_iter_met[,,iter]=update_theta(as.vector(X_met),Y_iter_met[,,iter],as.matrix(D),as.matrix(W),rho_met[iter-1],Theta_iter_met[,,iter-1],sig2_met[iter-1],tau2_met[iter-1],v_met[iter-1],Beta_iter_met[iter-1,],R)
Beta_iter_met[iter,]=update_Beta(Betam,Sigma_m,tau2_met[iter-1],X_met,Y_iter_met[,,iter],Theta_iter_met[,,iter],R)
tau2_met[iter] = update_tau2(X_met,Y_iter_met[,,iter],Theta_iter_met[,,iter],atau,btau,Beta_iter_met[iter,],R)
sig2_met[iter]= update_sig2(asig,bsig,D,W,rho_met[iter-1],Theta_iter_met[,,iter])
rho_met[iter] = update_rho(D,W,Theta_iter_met[,,iter],sig2_met[iter])
v_met[iter]=update_v(Z_met,v_met[iter-1],Tstar=nrow(unique.matrix(Theta_iter_met[,,iter])),av,bv)
}
library(coda)
mcmc_beta=mcmc(Beta_iter_met[(1+Burnin):N,])
pnorm(abs(geweke.diag(mcmc_beta)$z),lower.tail=FALSE)*2
mcmc_rho=mcmc(rho_met[(1+Burnin):N])
pnorm(abs(geweke.diag(mcmc_rho)$z),lower.tail=FALSE)*2
mcmc_sig2=mcmc(sig2_met[(1+Burnin):N])
pnorm(abs(geweke.diag(mcmc_sig2)$z),lower.tail=FALSE)*2
mcmc_tau2=mcmc(tau2_met[(1+Burnin):N])
pnorm(abs(geweke.diag(mcmc_tau2)$z),lower.tail=FALSE)*2
mcmc_v=mcmc(v_met[(1+Burnin):N])
pnorm(abs(geweke.diag(mcmc_v)$z),lower.tail=FALSE)*2
Theta_ave=Theta_sum=matrix(0,nrow=nrow(Theta_iter_met[,,1]),ncol=ncol(Theta_iter_met[,,1]))
for (i in (Burnin+1):N) {
Theta_sum=Theta_sum+Theta_iter_met[,,i]
}
Theta_ave=Theta_sum/(N-Burnin)
library('NbClust')
NbClust(Theta_ave,distance='euclidean',method='ward.D2',index='kl')
HRGSDP=NbClust(Theta_ave,distance='euclidean',method='ward.D2',index='kl')$Best.partition
glcm_whole=Z[,c(1,2,4,7,11,16,22,29,37,46,56,67,79,92,106,121,
2,3,5,8,12,17,23,30,38,47,57,68,80,93,107,122,
4,5,6,9,13,18,24,31,39,48,58,69,81,94,108,123,
7,8,9,10,14,19,25,32,40,49,59,70,82,95,109,124,
11,12,13,14,15,20,26,33,41,50,60,71,83,96,110,125,
16,17,18,19,20,21,27,34,42,51,61,72,84,97,111,126,
22,23,24,25,26,27,28,35,43,52,62,73,85,98,112,127,
29,30,31,32,33,34,35,36,44,53,63,74,86,99,113,128,
37,38,39,40,41,42,43,44,45,54,64,75,87,100,114,129,
46,47,48,49,50,51,52,53,54,55,65,76,88,101,115,130,
56,57,58,59,60,61,62,63,64,65,66,77,89,102,116,131,
67,68,69,70,71,72,73,74,75,76,77,78,90,103,117,132,
79,80,81,82,83,84,85,86,87,88,89,90,91,104,118,133,
92,93,94,95,96,97,98,99,100,101,102,103,104,105,119,134,
106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,135,
121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136)]
source('/gstore/scratch/u/lix233/RGSDP/cal_stat.R')
features=cal_stat(glcm_whole)
GMM=Mclust(features,5)
my.dist <- function(x) dist(x, method='euclidean')
my.hclust <- function(d) hclust(d, method='ward.D2')
HC<-cutree(my.hclust(my.dist(data.matrix(features))),k=5)
KM=kmeans(features,5)
SC=specc(features,5)
CO <- ConsensusClusterPlus(t(features),maxK=9,reps=100,pItem=0.90, pFeature=1,
clusterAlg='hc',distance='euclidean',plot=FALSE)
CO <- CO[[5]]$consensusClass
aa <- table(rep(1:5,each=20), CO)
bb <- table(rep(1:5,each=20), GMM$classification)
cc <- table(rep(1:5,each=20), HC)
dd <- table(rep(1:5,each=20), KM$cluster)
ee <- table(rep(1:5,each=20), SC)
ff <- table(rep(1:5,each=20), HRGSDP)
res_FeaCO=c(chisq.test(aa,correct = TRUE)$statistic,ncol(aa),error_rate(aa), 'FeaCO')
res_FeaGMM=c(chisq.test(bb,correct = TRUE)$statistic,ncol(bb),error_rate(bb), 'FeaGMM')
res_FeaHC=c(chisq.test(cc,correct = TRUE)$statistic,ncol(cc),error_rate(cc), 'FeaHC')
res_FeaKM=c(chisq.test(dd,correct = TRUE)$statistic,ncol(dd),error_rate(dd), 'FeaKM')
res_FeaSC=c(chisq.test(ee,correct = TRUE)$statistic,ncol(ee),error_rate(ee), 'FeaSC')
res_HRGSDP=c(chisq.test(ff,correct = TRUE)$statistic,ncol(ff),error_rate(ff), 'HRGSDP')
xx = rbind(res_FeaCO, res_FeaGMM, res_FeaHC, res_FeaKM, res_FeaSC, res_HRGSDP)
colnames(xx) = c('pearson.chi.sq', 'nunber of clusters', 'error.rate', 'method')
xx = as.data.frame(xx)
print(xx)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/misc.R
\name{qualitativeColors}
\alias{qualitativeColors}
\title{qualitativeColors}
\usage{
qualitativeColors(names, ...)
}
\arguments{
\item{names}{The names to which the colors are to be assigned, or an integer
indicating the desired number of colors}
\item{...}{passed to `randomcoloR::distinctColorPalette`}
}
\value{
A vector (eventually named) of colors
}
\description{
qualitativeColors
}
| /man/qualitativeColors.Rd | no_license | shaoyoucheng/SEtools | R | false | true | 475 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/misc.R
\name{qualitativeColors}
\alias{qualitativeColors}
\title{qualitativeColors}
\usage{
qualitativeColors(names, ...)
}
\arguments{
\item{names}{The names to which the colors are to be assigned, or an integer
indicating the desired number of colors}
\item{...}{passed to `randomcoloR::distinctColorPalette`}
}
\value{
A vector (eventually named) of colors
}
\description{
qualitativeColors
}
|
#' Plotting the best NDVI calculated using \code{\link{ndvi}}
#'
#' @param x A matrix of spectral data, a row is a spectrum across all spectral bands.
#' @param y A vector.
#' @param w A vector of wavelength.
#' @param p A vector of two elements (the pair of bands used for NDVI calculation).
#' @details
#' This function plots the best NDVI against the y
#' @examples
#' y <- exampleData[-1,1]
#' x <- exampleData[-1,-1]
#' w <- exampleData[1,-1]
#' p <- c(440,444)
#' ndvi.pair(x,y,w,p)
#' @export
ndvi.pair <- function(x,y,w,p){
bandInd <- which(w %in% p)
bestNDVI <- (x[,bandInd[2]] - x[,bandInd[1]]) / (x[,bandInd[2]] + x[,bandInd[1]])
cor(bestNDVI,y)
library(ggplot2)
library(ggpmisc)
x2 <- bestNDVI
df <- data.frame(x2,y)
my.formula <- y ~ x
p <- ggplot(data = df, aes(x = x2, y = y)) +
geom_smooth(method = "lm", se = FALSE, color = "blue",formula = my.formula) + geom_point()
yrange <- ggplot_build(p)$panel$ranges[[1]]$y.range
xrange <- ggplot_build(p)$panel$ranges[[1]]$x.range
p <- p + stat_poly_eq(formula = my.formula, eq.with.lhs = "italic(hat(y))~`=`~",
aes(label = paste(..eq.label.., ..rr.label.., sep = "~~~")),
parse = TRUE, col="blue", label.x = xrange[2]*0.5, label.y = yrange[2]*0.95, size = 5)
p
}
#' Say Hello!
view.datastr <- function() {
str(c(x,y,w))
print("Thansk for using this package!")
}
| /R/ndvi.pair.R | no_license | kang-yu/visar | R | false | false | 1,402 | r | #' Plotting the best NDVI calculated using \code{\link{ndvi}}
#'
#' @param x A matrix of spectral data, a row is a spectrum across all spectral bands.
#' @param y A vector.
#' @param w A vector of wavelength.
#' @param p A vector of two elements (the pair of bands used for NDVI calculation).
#' @details
#' This function plots the best NDVI against the y
#' @examples
#' y <- exampleData[-1,1]
#' x <- exampleData[-1,-1]
#' w <- exampleData[1,-1]
#' p <- c(440,444)
#' ndvi.pair(x,y,w,p)
#' @export
ndvi.pair <- function(x,y,w,p){
bandInd <- which(w %in% p)
bestNDVI <- (x[,bandInd[2]] - x[,bandInd[1]]) / (x[,bandInd[2]] + x[,bandInd[1]])
cor(bestNDVI,y)
library(ggplot2)
library(ggpmisc)
x2 <- bestNDVI
df <- data.frame(x2,y)
my.formula <- y ~ x
p <- ggplot(data = df, aes(x = x2, y = y)) +
geom_smooth(method = "lm", se = FALSE, color = "blue",formula = my.formula) + geom_point()
yrange <- ggplot_build(p)$panel$ranges[[1]]$y.range
xrange <- ggplot_build(p)$panel$ranges[[1]]$x.range
p <- p + stat_poly_eq(formula = my.formula, eq.with.lhs = "italic(hat(y))~`=`~",
aes(label = paste(..eq.label.., ..rr.label.., sep = "~~~")),
parse = TRUE, col="blue", label.x = xrange[2]*0.5, label.y = yrange[2]*0.95, size = 5)
p
}
#' Say Hello!
view.datastr <- function() {
str(c(x,y,w))
print("Thansk for using this package!")
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{WeibullHeightParameters}
\alias{WeibullHeightParameters}
\title{Regional weibull height-diameter parameters}
\format{
A data frame with 9 rows and 4 variables:
\describe{
\item{AllometricRegionID}{ID for allometric regions from Feldpausch et al. 2012.}
\item{a_par}{a parameter for Weibull model}
\item{b_par}{b parameter for Weibull model}
\item{c_par}{c parameter for Weibull model}
}
}
\source{
Feldpausch TR, Banin L, Phillips OL, Baker TR, Lewis SL et al. 2011. Height-diameter allometry of tropical forest trees. Biogeosciences 8 (5):1081-1106. doi:10.5194/bg-8-1081-2011
}
\usage{
WeibullHeightParameters
}
\description{
Regional height-diameter parameters for Weibull models from Feldpausch et al. 2012.
}
\keyword{datasets}
| /man/WeibullHeightParameters.Rd | no_license | ForestPlots/BiomasaFP | R | false | true | 839 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{WeibullHeightParameters}
\alias{WeibullHeightParameters}
\title{Regional weibull height-diameter parameters}
\format{
A data frame with 9 rows and 4 variables:
\describe{
\item{AllometricRegionID}{ID for allometric regions from Feldpausch et al. 2012.}
\item{a_par}{a parameter for Weibull model}
\item{b_par}{b parameter for Weibull model}
\item{c_par}{c parameter for Weibull model}
}
}
\source{
Feldpausch TR, Banin L, Phillips OL, Baker TR, Lewis SL et al. 2011. Height-diameter allometry of tropical forest trees. Biogeosciences 8 (5):1081-1106. doi:10.5194/bg-8-1081-2011
}
\usage{
WeibullHeightParameters
}
\description{
Regional height-diameter parameters for Weibull models from Feldpausch et al. 2012.
}
\keyword{datasets}
|
#A simple R script to illustrate R input-output.
# Run line by line and check inputs outputs to understand what is
# happening
MyData <- read.csv("../Data/trees.csv", header = TRUE) # import with headers
write.csv(MyData, "../Results/MyData.csv") #write it out as a new file
write.table(MyData[1,], file = "../Results/MyData.csv",append=TRUE) # Append to it
write.csv(MyData, "../Results/MyData.csv", row.names=TRUE) # write row names
write.table(MyData, "../Results/MyData.csv", col.names=FALSE) # ignore column names
| /Week3/Code/basic_io.R | no_license | abiB1994/CMEECourseWork | R | false | false | 525 | r | #A simple R script to illustrate R input-output.
# Run line by line and check inputs outputs to understand what is
# happening
MyData <- read.csv("../Data/trees.csv", header = TRUE) # import with headers
write.csv(MyData, "../Results/MyData.csv") #write it out as a new file
write.table(MyData[1,], file = "../Results/MyData.csv",append=TRUE) # Append to it
write.csv(MyData, "../Results/MyData.csv", row.names=TRUE) # write row names
write.table(MyData, "../Results/MyData.csv", col.names=FALSE) # ignore column names
|
spikecounts.rFunc = function(spikes, events) {
#min here is additive; e.g. need to find spike counts between 50 and 200 ms after cue onset
winmin = .05 #in s
winmax = .5 #in s
#####################################################################
#This function returns a list of lists of lists:
#Level 1 is the experiment; Level 2 is each neuron in the experiment;
#Level 3 is the time relative to the specified event for each trial
#The object masterlist can then be used to construct PSTHs or rasters
########################################################################
masterlist = lapply(seq(1, length(spikes)), function(x) {
lapply(seq(1, length(events[[x]])), function(y) {
stampsidx = which(spikes[[x]] >= events[[x]][y] + winmin & spikes[[x]] <= events[[x]][y] + winmax)
relstamps = spikes[[x]][stampsidx] - events[[x]][y]
})
})
#######################################################################
#This function returns a list of lists:
#Level 1 is the experiment
#Level 2 contains the the frequency histogram (PSTH)
#for each neuron for the event specified above.
#
#The object neurohist can then be used to plot histograms individually or
#averaged together as in the next function.
#########################################################################
counts = lapply(seq(1, length(masterlist)), function(x) {
lapply(seq(1:length(masterlist[[x]])), function(y) {
length(masterlist[[x]][[y]])
#allvals = unlist(masterlist[[x]])
#hcounts = hist(allvals, breaks = seq(-winmin, winmax, binsize/1000), plot = F)$counts
#freq = (hcounts/(binsize/1000))/length(masterlist[[x]])
})
})
return(counts)
}
save(spikecounts.rFunc, file = "C:/Users/Kevin Caref/Google Drive/RScripts/Functions/spikecounts.rFunc")
| /scripts/KC spike counts to feed GLM_CS+.R | permissive | kcaref/neural-analysis | R | false | false | 1,911 | r | spikecounts.rFunc = function(spikes, events) {
#min here is additive; e.g. need to find spike counts between 50 and 200 ms after cue onset
winmin = .05 #in s
winmax = .5 #in s
#####################################################################
#This function returns a list of lists of lists:
#Level 1 is the experiment; Level 2 is each neuron in the experiment;
#Level 3 is the time relative to the specified event for each trial
#The object masterlist can then be used to construct PSTHs or rasters
########################################################################
masterlist = lapply(seq(1, length(spikes)), function(x) {
lapply(seq(1, length(events[[x]])), function(y) {
stampsidx = which(spikes[[x]] >= events[[x]][y] + winmin & spikes[[x]] <= events[[x]][y] + winmax)
relstamps = spikes[[x]][stampsidx] - events[[x]][y]
})
})
#######################################################################
#This function returns a list of lists:
#Level 1 is the experiment
#Level 2 contains the the frequency histogram (PSTH)
#for each neuron for the event specified above.
#
#The object neurohist can then be used to plot histograms individually or
#averaged together as in the next function.
#########################################################################
counts = lapply(seq(1, length(masterlist)), function(x) {
lapply(seq(1:length(masterlist[[x]])), function(y) {
length(masterlist[[x]][[y]])
#allvals = unlist(masterlist[[x]])
#hcounts = hist(allvals, breaks = seq(-winmin, winmax, binsize/1000), plot = F)$counts
#freq = (hcounts/(binsize/1000))/length(masterlist[[x]])
})
})
return(counts)
}
save(spikecounts.rFunc, file = "C:/Users/Kevin Caref/Google Drive/RScripts/Functions/spikecounts.rFunc")
|
library(uniformly)
### Name: volume_ellipsoid
### Title: Ellipsoid volume
### Aliases: volume_ellipsoid
### ** Examples
# dimension 2 (area), with diagonal matrix A
A <- diag(c(2,3))
r <- 2
volume_ellipsoid(A, r)
pi * r^2 / sqrt(A[1,1]*A[2,2])
| /data/genthat_extracted_code/uniformly/examples/volume_ellipsoid.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 251 | r | library(uniformly)
### Name: volume_ellipsoid
### Title: Ellipsoid volume
### Aliases: volume_ellipsoid
### ** Examples
# dimension 2 (area), with diagonal matrix A
A <- diag(c(2,3))
r <- 2
volume_ellipsoid(A, r)
pi * r^2 / sqrt(A[1,1]*A[2,2])
|
# ------------------------------------------------------------------------------
dfs_from_json <- function(json) {
df_list <- jsonlite::fromJSON(json)
prop_df <- cbind(df_list$Property, df_list$Building)
uuid <- uuid::UUIDgenerate()
prop_df$id <- uuid
df_list$Sales$id <- uuid
df_list$Appraisals$id <- uuid
return(list(prop = prop_df,
sales = df_list$Sales,
appr = df_list$Appraisals))
}
combine_props <- function(dfs_list) {
props <- lapply(dfs_list, '[[', 1)
return(jsonlite::rbind_pages(props))
}
combine_sales <- function(dfs_list) {
props <- lapply(dfs_list, '[[', 2)
return(jsonlite::rbind_pages(props))
}
combine_apprs <- function(dfs_list) {
props <- lapply(dfs_list, '[[', 3)
return(jsonlite::rbind_pages(props))
}
simple_colnames <- function(df) {
df_names <- names(df)
df_names <- stringr::str_replace_all(df_names, ' & | |\\*', '_')
df_names <- stringr::str_to_lower(df_names)
return(df_names)
}
mutate_props <- function(df) {
names(df) <- simple_colnames(df)
df <- dplyr::mutate_each(df,
dplyr::funs(stringr::str_replace_all(.,
'\\$|,', '')),
c(10, 13:16, 23))
df <- dplyr::mutate_each(df,
dplyr::funs(stringr::str_replace_all(.,
' Acres', '')),
c(20))
df <- dplyr::mutate_each(df,
dplyr::funs(as.numeric),
c(10, 13:16, 20, 23, 28:32, 35))
df$sale_date <- as.Date(df$sale_date, "%m/%d/%Y")
df <- dplyr::mutate(df, zip_code = stringr::str_sub(mailing_address, -5))
return(df)
}
mutate_sales <- function(df) {
names(df) <- simple_colnames(df)
df$sale_date <- as.Date(df$sale_date, "%m/%d/%Y")
df$sale_price <- stringr::str_replace_all(df$sale_price, '\\$|,', '')
df$sale_price <- as.numeric(df$sale_price)
return(df)
}
mutate_apprs <- function(df) {
df <- dplyr::mutate_each(df,
dplyr::funs(stringr::str_replace_all(.,
'\\$|,', '')),
c(3:5,7))
df <- dplyr::mutate_each(df, dplyr::funs(as.numeric), c(3:5,7))
return(df)
}
write_db <- function(con, sql_tbl, df){
RPostgreSQL::dbWriteTable(con, sql_tbl, df, row.names=F, append=T)
}
| /scraper/json_to_db.R | no_license | davidcearl/nash-prop | R | false | false | 2,466 | r | # ------------------------------------------------------------------------------
dfs_from_json <- function(json) {
df_list <- jsonlite::fromJSON(json)
prop_df <- cbind(df_list$Property, df_list$Building)
uuid <- uuid::UUIDgenerate()
prop_df$id <- uuid
df_list$Sales$id <- uuid
df_list$Appraisals$id <- uuid
return(list(prop = prop_df,
sales = df_list$Sales,
appr = df_list$Appraisals))
}
combine_props <- function(dfs_list) {
props <- lapply(dfs_list, '[[', 1)
return(jsonlite::rbind_pages(props))
}
combine_sales <- function(dfs_list) {
props <- lapply(dfs_list, '[[', 2)
return(jsonlite::rbind_pages(props))
}
combine_apprs <- function(dfs_list) {
props <- lapply(dfs_list, '[[', 3)
return(jsonlite::rbind_pages(props))
}
simple_colnames <- function(df) {
df_names <- names(df)
df_names <- stringr::str_replace_all(df_names, ' & | |\\*', '_')
df_names <- stringr::str_to_lower(df_names)
return(df_names)
}
mutate_props <- function(df) {
names(df) <- simple_colnames(df)
df <- dplyr::mutate_each(df,
dplyr::funs(stringr::str_replace_all(.,
'\\$|,', '')),
c(10, 13:16, 23))
df <- dplyr::mutate_each(df,
dplyr::funs(stringr::str_replace_all(.,
' Acres', '')),
c(20))
df <- dplyr::mutate_each(df,
dplyr::funs(as.numeric),
c(10, 13:16, 20, 23, 28:32, 35))
df$sale_date <- as.Date(df$sale_date, "%m/%d/%Y")
df <- dplyr::mutate(df, zip_code = stringr::str_sub(mailing_address, -5))
return(df)
}
mutate_sales <- function(df) {
names(df) <- simple_colnames(df)
df$sale_date <- as.Date(df$sale_date, "%m/%d/%Y")
df$sale_price <- stringr::str_replace_all(df$sale_price, '\\$|,', '')
df$sale_price <- as.numeric(df$sale_price)
return(df)
}
mutate_apprs <- function(df) {
df <- dplyr::mutate_each(df,
dplyr::funs(stringr::str_replace_all(.,
'\\$|,', '')),
c(3:5,7))
df <- dplyr::mutate_each(df, dplyr::funs(as.numeric), c(3:5,7))
return(df)
}
write_db <- function(con, sql_tbl, df){
RPostgreSQL::dbWriteTable(con, sql_tbl, df, row.names=F, append=T)
}
|
########
# test for most efficient annotation
########
# List of all informative probesets in the microarray data: 5082
test.probe.list = PBL.toptable$ID
length(test.probe.list)
# Obtain the ensembl ids for the informative probesets
test.probe.ensembl.table = toTable(bovineENSEMBL[test.probe.list])
# Number of unique probesets mapped : 3360
length(unique(test.probe.ensembl.table$probe_id))
# Number of unique ensembl ids identified from the list of probesets: 3043
length(unique(test.probe.ensembl.table$ensembl_id))
# Number of final ensembl ids compared to number of initial probe sets: 2039
5082-3043
# Number of probe sets lost in the process: 1722 informative probesets do not map to any ensembl id in bovine.db
5082-3360
# Obtain the gene symbols for the informative probesets
test.probe.symbol.table = toTable([test.probe.list])
nrow(test.probe.symbol.table)
# Number of unique probesets mapped : 3607
length(unique(test.probe.symbol.table$probe_id))
# Number of unique ensembl ids identified from the list of probesets: 3223
length(unique(test.probe.symbol.table$symbol))
# Number of final gene symbol compared to number of initial probe sets: 1859
5082-3223
# Number of probe sets lost in the process: 1475 informative probesets do not map to any gene symbol in bovine.db
5082-3607 | /OtherScripts/Test.annotations.R | no_license | kmcloughlin1/PBL-Kirsten | R | false | false | 1,302 | r | ########
# test for most efficient annotation
########
# List of all informative probesets in the microarray data: 5082
test.probe.list = PBL.toptable$ID
length(test.probe.list)
# Obtain the ensembl ids for the informative probesets
test.probe.ensembl.table = toTable(bovineENSEMBL[test.probe.list])
# Number of unique probesets mapped : 3360
length(unique(test.probe.ensembl.table$probe_id))
# Number of unique ensembl ids identified from the list of probesets: 3043
length(unique(test.probe.ensembl.table$ensembl_id))
# Number of final ensembl ids compared to number of initial probe sets: 2039
5082-3043
# Number of probe sets lost in the process: 1722 informative probesets do not map to any ensembl id in bovine.db
5082-3360
# Obtain the gene symbols for the informative probesets
test.probe.symbol.table = toTable([test.probe.list])
nrow(test.probe.symbol.table)
# Number of unique probesets mapped : 3607
length(unique(test.probe.symbol.table$probe_id))
# Number of unique ensembl ids identified from the list of probesets: 3223
length(unique(test.probe.symbol.table$symbol))
# Number of final gene symbol compared to number of initial probe sets: 1859
5082-3223
# Number of probe sets lost in the process: 1475 informative probesets do not map to any gene symbol in bovine.db
5082-3607 |
library(memer)
meme_list()
m <-
meme_get('what-is-grief') %>%
meme_text_top('What is sliced') %>%
meme_text_bottom('if not contestants persevering')
m
magick::image_write(m, 'whatisgrief.png')
| /20210316/meme.R | no_license | mufflyt/ds-sliced | R | false | false | 203 | r |
library(memer)
meme_list()
m <-
meme_get('what-is-grief') %>%
meme_text_top('What is sliced') %>%
meme_text_bottom('if not contestants persevering')
m
magick::image_write(m, 'whatisgrief.png')
|
n <- 150 # number of data points
p <- 2 # dimension
sigma <- 1 # variance of the distribution
meanpos <- 0 # centre of the distribution of positive examples
meanneg <- 3 # centre of the distribution of negative examples
npos <- round(n/2) # number of positive examples
nneg <- n-npos # number of negative examples
# Generate the positive and negative examples
xpos <- matrix(rnorm(npos*p,mean=meanpos,sd=sigma),npos,p)
xneg <- matrix(rnorm(nneg*p,mean=meanneg,sd=sigma),npos,p)
x <- rbind(xpos,xneg)
# Generate the labels
y <- matrix(c(rep(1,npos),rep(-1,nneg)))
# Visualize the data
plot(x,col=ifelse(y>0,1,2))
legend("topleft",c('Positive','Negative'),col=seq(2),pch=1,text.col=seq(2))
#
ntrain <- round(n*0.8) # number of training examples
tindex <- sample(n,ntrain) # indices of training samples
xtrain <- x[tindex,]
xtest <- x[-tindex,]
ytrain <- y[tindex]
ytest <- y[-tindex]
istrain=rep(0,n)
istrain[tindex]=1
# Visualize
plot(x,col=ifelse(y>0,1,2),pch=ifelse(istrain==1,1,2))
legend("topleft",c('Positive Train','Positive Test','Negative Train','Negative Test'),col=c(1,1,2,2), pch=c(1,2,1,2), text.col=c(1,1,2,2))
library(e1071)
library(rpart)
data(Ozone, package="mlbenchâ€)
## split data into a train and test set
index <- 1:nrow(Ozone)
testindex <- sample(index, trunc(length(index)/3))
testset <- na.omit(Ozone[testindex,-3])
trainset <- na.omit(Ozone[-testindex,-3])
svm.model <- svm(V4 ~ ., data = trainset, cost = 1000, gamma = 0.0001)
svm.pred <- predict(svm.model, testset[,-3])
crossprod(svm.pred - testset[,3]) / length(testindex)
| /2021-04-01.R | no_license | hangyenli/DataAnalytics_2021_Honghao_Li | R | false | false | 1,559 | r | n <- 150 # number of data points
p <- 2 # dimension
sigma <- 1 # variance of the distribution
meanpos <- 0 # centre of the distribution of positive examples
meanneg <- 3 # centre of the distribution of negative examples
npos <- round(n/2) # number of positive examples
nneg <- n-npos # number of negative examples
# Generate the positive and negative examples
xpos <- matrix(rnorm(npos*p,mean=meanpos,sd=sigma),npos,p)
xneg <- matrix(rnorm(nneg*p,mean=meanneg,sd=sigma),npos,p)
x <- rbind(xpos,xneg)
# Generate the labels
y <- matrix(c(rep(1,npos),rep(-1,nneg)))
# Visualize the data
plot(x,col=ifelse(y>0,1,2))
legend("topleft",c('Positive','Negative'),col=seq(2),pch=1,text.col=seq(2))
#
ntrain <- round(n*0.8) # number of training examples
tindex <- sample(n,ntrain) # indices of training samples
xtrain <- x[tindex,]
xtest <- x[-tindex,]
ytrain <- y[tindex]
ytest <- y[-tindex]
istrain=rep(0,n)
istrain[tindex]=1
# Visualize
plot(x,col=ifelse(y>0,1,2),pch=ifelse(istrain==1,1,2))
legend("topleft",c('Positive Train','Positive Test','Negative Train','Negative Test'),col=c(1,1,2,2), pch=c(1,2,1,2), text.col=c(1,1,2,2))
library(e1071)
library(rpart)
data(Ozone, package="mlbenchâ€)
## split data into a train and test set
index <- 1:nrow(Ozone)
testindex <- sample(index, trunc(length(index)/3))
testset <- na.omit(Ozone[testindex,-3])
trainset <- na.omit(Ozone[-testindex,-3])
svm.model <- svm(V4 ~ ., data = trainset, cost = 1000, gamma = 0.0001)
svm.pred <- predict(svm.model, testset[,-3])
crossprod(svm.pred - testset[,3]) / length(testindex)
|
library(ape)
testtree <- read.tree("10371_1.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="10371_1_unrooted.txt") | /codeml_files/newick_trees_processed_and_cleaned/10371_1/rinput.R | no_license | DaniBoo/cyanobacteria_project | R | false | false | 137 | r | library(ape)
testtree <- read.tree("10371_1.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="10371_1_unrooted.txt") |
library(tidyverse)
library(tidytext)
library(gutenbergr)
library(janeaustenr)
library(ggraph)
library(widyr)
library(igraph)
#construct a bigram
austen_bigram <- austen_books() %>% unnest_tokens(bigram, text, token = "ngrams",n=2)
#count the bigram
austen_bigram %>% count(bigram,sort = T)
#separate bigram to filter stop-words
bigram_separate <- austen_bigram %>% separate(bigram,c("word1","word2"),sep = " ")
bigram_filter <- bigram_separate %>%
filter(!word1 %in% stop_words$word) %>%
filter(!word2 %in% stop_words$word)
bigram_count <- bigram_filter %>% count(word1,word2,sort = T)
#unite the bigram
austen_bigram <- bigram_filter %>% unite(bigram,word1,word2,sep = " ")
#using 3-grams
trigram <- austen_books() %>% unnest_tokens(trigram, text, token = "ngrams",n=3) %>%
separate(trigram,c("word1","word2","word3"),sep = " ") %>%
filter(!word1 %in% stop_words$word) %>%
filter(!word2 %in% stop_words$word) %>%
filter(!word3 %in% stop_words$word)
trigram %>% count(word1,word2,word3,sort = T)
#check the most common street in the books
bigram_filter %>%
filter(word2 == "street") %>%
count(book, word1, sort = TRUE)
#bind_tf_idf
bigram_tf <- austen_bigram %>% count(book,bigram) %>%
bind_tf_idf(bigram,book,n) %>% arrange(desc(tf_idf))
bigram_tf %>% group_by(book) %>% top_n(15,tf_idf) %>% ungroup() %>%
mutate(bigram = factor(bigram,levels = rev(unique(bigram)))) %>%
ggplot()+
geom_col(aes(bigram,tf_idf,fill=book),show.legend = F)+
facet_wrap(~book,scales = "free")+
coord_flip()
#see phrase with a negation (no!)
bigram_separate %>% filter(word1 == "not") %>% count(word1,word2,sort = T)
not_words <- bigram_separate %>% filter(word1=="not") %>%
inner_join(get_sentiments("afinn"),by = c(word2 = "word")) %>%
count(word2,value,sort = T)
#check the contribution of each words
not_words %>% mutate(contribution = n*value) %>%
arrange(desc(abs(contribution))) %>% head(20) %>%
mutate(word2 = reorder(word2,contribution)) %>%
ggplot()+
geom_col(aes(word2,contribution,fill=contribution>0))+
xlab("Words preceded by \"not\"") +
ylab("Sentiment score * number of occurrences") +
coord_flip()
#negation words
negation_words <- c("not", "no", "never", "without")
negated_word <- bigram_separate %>% filter(word1 %in% negation_words) %>%
inner_join(get_sentiments("afinn"),by = c(word2 = "word")) %>%
count(word1,word2,value,sort = T) %>% mutate(contribution = n*value)
negated_word %>% group_by(word1) %>%
top_n(15,abs(contribution)) %>% ungroup() %>%
mutate(word2 = reorder(word2,abs(contribution))) %>%
ggplot()+
geom_bar(aes(word2,contribution,fill=contribution>0),stat = "identity")+
facet_wrap(~word1,scales = "free")+
coord_flip()
#Network of bigrams
bigram_graph <- bigram_count %>%
filter(n > 20) %>%
graph_from_data_frame()
set.seed(2017)
ggraph(bigram_graph, layout = "fr") +
geom_edge_link() +
geom_node_point() +
geom_node_text(aes(label = name), vjust = 1, hjust = 1)
set.seed(2016)
a <- grid::arrow(type = "closed", length = unit(.15, "inches"))
ggraph(bigram_graph, layout = "fr") +
geom_edge_link(aes(edge_alpha = n), show.legend = T,
arrow = a, end_cap = circle(.07, 'inches')) +
geom_node_point(color = "lightblue", size = 5) +
geom_node_text(aes(label = name), vjust = 1, hjust = 1) +
theme_void()
#counting and correlating among sections
austen_section_words <- austen_books() %>%
filter(book == "Pride & Prejudice") %>%
mutate(section = row_number() %/% 10) %>%
filter(section > 0) %>%
unnest_tokens(word, text) %>%
filter(!word %in% stop_words$word)
word_pair <- austen_section_words %>% pairwise_count(word,section,sort=T)
#find the words that most often occur with Darcy
word_pair %>% filter(item1=="darcy")
#pind phi-correlation
word_cor <- austen_section_words %>% group_by(word) %>% filter(n() >= 20) %>%
pairwise_cor(word,section,sort=T)
word_cor %>%
filter(item1 %in% c("elizabeth", "pounds", "married", "pride")) %>%
group_by(item1) %>%
top_n(6) %>%
ungroup() %>%
mutate(item2 = reorder(item2, correlation)) %>%
ggplot(aes(item2, correlation)) +
geom_bar(stat = "identity") +
facet_wrap(~ item1, scales = "free") +
coord_flip()
set.seed(2016)
word_cor %>%
filter(correlation > .15) %>%
graph_from_data_frame() %>%
ggraph(layout = "fr") +
geom_edge_link(aes(edge_alpha = correlation), show.legend = FALSE) +
geom_node_point(color = "lightblue", size = 5) +
geom_node_text(aes(label = name), repel = TRUE) +
theme_void()
#EXCERCISE ON DARWIN'S BOOKS --------------------------------------------------
darwin <- gutenberg_download(c(1228),meta_fields = "title")
darwin_bigram <- darwin %>% unnest_tokens(bigram,text,token = "ngrams",n=2) %>%
separate(bigram,c("word1","word2"))
#see negative word contribution
negation_words <- c("not", "no", "never", "without")
darwin_negate <- darwin_bigram %>% filter(word1 %in% negation_words) %>%
inner_join(get_sentiments("afinn"),by = c(word2="word")) %>%
count(word1,word2,value,sort = T) %>% mutate(contribution = n*value)
png("D:/R/tutorial/text_mining/chap4_negate1.png",
width = 3960, height = 2160, units = 'px', res = 300)
darwin_negate %>% group_by(word1) %>% top_n(10,abs(contribution)) %>% ungroup() %>%
mutate(word2 = reorder(word2,abs(contribution))) %>%
ggplot(aes(word2,contribution,fill=contribution>0))+
geom_bar(stat = "identity")+
facet_wrap(~word1,scales = "free")+
xlab("Words preceded by \"not\"") +
ylab("Sentiment score * number of occurrences")+
labs(title = "Words preceded by negation words in Darwin's On The Origin Of Species",
subtitle = "Negation words (like never doubt) can misrepresent the sentiment score in text")+
theme(panel.grid = element_blank(),
panel.background = element_rect(fill = "#1D2024"),
plot.background = element_rect(fill = "#1D2024"),
text = element_text(colour = "white"),
axis.text = element_text(colour = "lightyellow"),
strip.background = element_blank(),
strip.text = element_text(colour = "lightyellow",size=12),
legend.background = element_rect(fill = "#1D2024"),
legend.text = element_text(colour = "white"))+
coord_flip()
dev.off()
#bind_tf_idf
darwin <- gutenberg_download(c(1228,2300,944,1227,3620),meta_fields = "title")
darwin_bigram <- darwin %>% unnest_tokens(bigram,text,token = "ngrams",n=2) %>%
separate(bigram,c("word1","word2"))
darwin_tf <- darwin_bigram %>% unite(bigram,word1,word2,sep = " ") %>%
count(title,bigram,sort = T) %>%
bind_tf_idf(bigram,title,n) %>% arrange(desc(tf_idf))
custom_stop <- data.frame(bigram = c("vol i", "vol ii", "g b", "m d"))
darwin_tf <- darwin_tf %>% anti_join(custom_stop)
png("D:/R/tutorial/text_mining/chap4_bigramtf1.png",
width = 3960, height = 2160, units = 'px', res = 300)
darwin_tf %>% group_by(title) %>% top_n(10,tf_idf) %>% ungroup() %>%
mutate(bigram = factor(bigram,levels = rev(unique(bigram))),
title = factor(title,levels = rev(unique(title)))) %>%
ggplot()+
geom_col(aes(bigram,tf_idf,fill=tf_idf),show.legend = F)+
facet_wrap(~title,scales = "free_y",ncol = 2)+
coord_flip()+
scale_fill_viridis_c(option = "B")+
theme(panel.grid = element_blank(),
panel.background = element_rect(fill = "#1D2024"),
plot.background = element_rect(fill = "#1D2024"),
text = element_text(colour = "white"),
axis.text = element_text(colour = "lightyellow"),
strip.background = element_blank(),
strip.text = element_text(colour = "lightyellow",size=12),
legend.background = element_rect(fill = "#1D2024"),
legend.text = element_text(colour = "white"))+
labs(x=NULL,y=NULL,fill="tf-idf",
title = "Most Important Bigrams (2 consecutive words) in Each of Darwin's Book",
subtitle = "Using the score of term frequency (tf)- inverse document frequency (idf)")
dev.off()
#bigrams network for origin of species
darwin <- gutenberg_download(c(1228),meta_fields = "title")
darwin_bigram <- darwin %>% unnest_tokens(bigram,text,token = "ngrams",n=2) %>%
separate(bigram,c("word1","word2"))
darwin_network <- darwin_bigram %>% count(word1,word2,sort = T) %>%
filter(n > 75) %>%
graph_from_data_frame()
set.seed(2017)
a <- grid::arrow(type = "closed", length = unit(.15, "inches"))
png("D:/R/tutorial/text_mining/chap4_bigram2.png",
width = 3960, height = 2160, units = 'px', res = 300)
ggraph(darwin_network, layout = "fr") +
geom_edge_link(aes(edge_alpha = n), show.legend = T,
arrow = a, end_cap = circle(.07, 'inches')) +
geom_node_point(color = "lightblue", size = 5) +
geom_node_text(aes(label = name), vjust = 1, hjust = 1)+
theme_void()
dev.off()
#bigram correlation in the same section
darwin_section_words <- darwin %>%
mutate(section = row_number() %/% 10) %>%
filter(section > 0) %>%
unnest_tokens(word, text) %>%
filter(!word %in% stop_words$word)
word_cor <- darwin_section_words %>% group_by(word) %>% filter(n() >= 20) %>%
pairwise_cor(word,section,sort=T)
set.seed(2016)
png("D:/R/tutorial/text_mining/chap4_bigram3.png",
width = 3960, height = 2160, units = 'px', res = 300)
word_cor %>%
filter(correlation > .3) %>%
graph_from_data_frame() %>%
ggraph(layout = "fr") +
geom_edge_link(aes(edge_alpha = correlation), show.legend = FALSE) +
geom_node_point(color = "lightblue", size = 5) +
geom_node_text(aes(label = name), repel = TRUE) +
theme_void()+
labs(title = "Correlation > 0.3 between words in the same section")
dev.off()
| /chapter4/chapter4.R | no_license | Argaadya/Text_Mining_Tutorial | R | false | false | 9,889 | r | library(tidyverse)
library(tidytext)
library(gutenbergr)
library(janeaustenr)
library(ggraph)
library(widyr)
library(igraph)
#construct a bigram
austen_bigram <- austen_books() %>% unnest_tokens(bigram, text, token = "ngrams",n=2)
#count the bigram
austen_bigram %>% count(bigram,sort = T)
#separate bigram to filter stop-words
bigram_separate <- austen_bigram %>% separate(bigram,c("word1","word2"),sep = " ")
bigram_filter <- bigram_separate %>%
filter(!word1 %in% stop_words$word) %>%
filter(!word2 %in% stop_words$word)
bigram_count <- bigram_filter %>% count(word1,word2,sort = T)
#unite the bigram
austen_bigram <- bigram_filter %>% unite(bigram,word1,word2,sep = " ")
#using 3-grams
trigram <- austen_books() %>% unnest_tokens(trigram, text, token = "ngrams",n=3) %>%
separate(trigram,c("word1","word2","word3"),sep = " ") %>%
filter(!word1 %in% stop_words$word) %>%
filter(!word2 %in% stop_words$word) %>%
filter(!word3 %in% stop_words$word)
trigram %>% count(word1,word2,word3,sort = T)
#check the most common street in the books
bigram_filter %>%
filter(word2 == "street") %>%
count(book, word1, sort = TRUE)
#bind_tf_idf
bigram_tf <- austen_bigram %>% count(book,bigram) %>%
bind_tf_idf(bigram,book,n) %>% arrange(desc(tf_idf))
bigram_tf %>% group_by(book) %>% top_n(15,tf_idf) %>% ungroup() %>%
mutate(bigram = factor(bigram,levels = rev(unique(bigram)))) %>%
ggplot()+
geom_col(aes(bigram,tf_idf,fill=book),show.legend = F)+
facet_wrap(~book,scales = "free")+
coord_flip()
#see phrase with a negation (no!)
bigram_separate %>% filter(word1 == "not") %>% count(word1,word2,sort = T)
not_words <- bigram_separate %>% filter(word1=="not") %>%
inner_join(get_sentiments("afinn"),by = c(word2 = "word")) %>%
count(word2,value,sort = T)
#check the contribution of each words
not_words %>% mutate(contribution = n*value) %>%
arrange(desc(abs(contribution))) %>% head(20) %>%
mutate(word2 = reorder(word2,contribution)) %>%
ggplot()+
geom_col(aes(word2,contribution,fill=contribution>0))+
xlab("Words preceded by \"not\"") +
ylab("Sentiment score * number of occurrences") +
coord_flip()
#negation words
negation_words <- c("not", "no", "never", "without")
negated_word <- bigram_separate %>% filter(word1 %in% negation_words) %>%
inner_join(get_sentiments("afinn"),by = c(word2 = "word")) %>%
count(word1,word2,value,sort = T) %>% mutate(contribution = n*value)
negated_word %>% group_by(word1) %>%
top_n(15,abs(contribution)) %>% ungroup() %>%
mutate(word2 = reorder(word2,abs(contribution))) %>%
ggplot()+
geom_bar(aes(word2,contribution,fill=contribution>0),stat = "identity")+
facet_wrap(~word1,scales = "free")+
coord_flip()
#Network of bigrams
bigram_graph <- bigram_count %>%
filter(n > 20) %>%
graph_from_data_frame()
set.seed(2017)
ggraph(bigram_graph, layout = "fr") +
geom_edge_link() +
geom_node_point() +
geom_node_text(aes(label = name), vjust = 1, hjust = 1)
set.seed(2016)
a <- grid::arrow(type = "closed", length = unit(.15, "inches"))
ggraph(bigram_graph, layout = "fr") +
geom_edge_link(aes(edge_alpha = n), show.legend = T,
arrow = a, end_cap = circle(.07, 'inches')) +
geom_node_point(color = "lightblue", size = 5) +
geom_node_text(aes(label = name), vjust = 1, hjust = 1) +
theme_void()
#counting and correlating among sections
austen_section_words <- austen_books() %>%
filter(book == "Pride & Prejudice") %>%
mutate(section = row_number() %/% 10) %>%
filter(section > 0) %>%
unnest_tokens(word, text) %>%
filter(!word %in% stop_words$word)
word_pair <- austen_section_words %>% pairwise_count(word,section,sort=T)
#find the words that most often occur with Darcy
word_pair %>% filter(item1=="darcy")
#pind phi-correlation
word_cor <- austen_section_words %>% group_by(word) %>% filter(n() >= 20) %>%
pairwise_cor(word,section,sort=T)
word_cor %>%
filter(item1 %in% c("elizabeth", "pounds", "married", "pride")) %>%
group_by(item1) %>%
top_n(6) %>%
ungroup() %>%
mutate(item2 = reorder(item2, correlation)) %>%
ggplot(aes(item2, correlation)) +
geom_bar(stat = "identity") +
facet_wrap(~ item1, scales = "free") +
coord_flip()
set.seed(2016)
word_cor %>%
filter(correlation > .15) %>%
graph_from_data_frame() %>%
ggraph(layout = "fr") +
geom_edge_link(aes(edge_alpha = correlation), show.legend = FALSE) +
geom_node_point(color = "lightblue", size = 5) +
geom_node_text(aes(label = name), repel = TRUE) +
theme_void()
#EXCERCISE ON DARWIN'S BOOKS --------------------------------------------------
darwin <- gutenberg_download(c(1228),meta_fields = "title")
darwin_bigram <- darwin %>% unnest_tokens(bigram,text,token = "ngrams",n=2) %>%
separate(bigram,c("word1","word2"))
#see negative word contribution
negation_words <- c("not", "no", "never", "without")
darwin_negate <- darwin_bigram %>% filter(word1 %in% negation_words) %>%
inner_join(get_sentiments("afinn"),by = c(word2="word")) %>%
count(word1,word2,value,sort = T) %>% mutate(contribution = n*value)
png("D:/R/tutorial/text_mining/chap4_negate1.png",
width = 3960, height = 2160, units = 'px', res = 300)
darwin_negate %>% group_by(word1) %>% top_n(10,abs(contribution)) %>% ungroup() %>%
mutate(word2 = reorder(word2,abs(contribution))) %>%
ggplot(aes(word2,contribution,fill=contribution>0))+
geom_bar(stat = "identity")+
facet_wrap(~word1,scales = "free")+
xlab("Words preceded by \"not\"") +
ylab("Sentiment score * number of occurrences")+
labs(title = "Words preceded by negation words in Darwin's On The Origin Of Species",
subtitle = "Negation words (like never doubt) can misrepresent the sentiment score in text")+
theme(panel.grid = element_blank(),
panel.background = element_rect(fill = "#1D2024"),
plot.background = element_rect(fill = "#1D2024"),
text = element_text(colour = "white"),
axis.text = element_text(colour = "lightyellow"),
strip.background = element_blank(),
strip.text = element_text(colour = "lightyellow",size=12),
legend.background = element_rect(fill = "#1D2024"),
legend.text = element_text(colour = "white"))+
coord_flip()
dev.off()
#bind_tf_idf
darwin <- gutenberg_download(c(1228,2300,944,1227,3620),meta_fields = "title")
darwin_bigram <- darwin %>% unnest_tokens(bigram,text,token = "ngrams",n=2) %>%
separate(bigram,c("word1","word2"))
darwin_tf <- darwin_bigram %>% unite(bigram,word1,word2,sep = " ") %>%
count(title,bigram,sort = T) %>%
bind_tf_idf(bigram,title,n) %>% arrange(desc(tf_idf))
custom_stop <- data.frame(bigram = c("vol i", "vol ii", "g b", "m d"))
darwin_tf <- darwin_tf %>% anti_join(custom_stop)
png("D:/R/tutorial/text_mining/chap4_bigramtf1.png",
width = 3960, height = 2160, units = 'px', res = 300)
darwin_tf %>% group_by(title) %>% top_n(10,tf_idf) %>% ungroup() %>%
mutate(bigram = factor(bigram,levels = rev(unique(bigram))),
title = factor(title,levels = rev(unique(title)))) %>%
ggplot()+
geom_col(aes(bigram,tf_idf,fill=tf_idf),show.legend = F)+
facet_wrap(~title,scales = "free_y",ncol = 2)+
coord_flip()+
scale_fill_viridis_c(option = "B")+
theme(panel.grid = element_blank(),
panel.background = element_rect(fill = "#1D2024"),
plot.background = element_rect(fill = "#1D2024"),
text = element_text(colour = "white"),
axis.text = element_text(colour = "lightyellow"),
strip.background = element_blank(),
strip.text = element_text(colour = "lightyellow",size=12),
legend.background = element_rect(fill = "#1D2024"),
legend.text = element_text(colour = "white"))+
labs(x=NULL,y=NULL,fill="tf-idf",
title = "Most Important Bigrams (2 consecutive words) in Each of Darwin's Book",
subtitle = "Using the score of term frequency (tf)- inverse document frequency (idf)")
dev.off()
#bigrams network for origin of species
darwin <- gutenberg_download(c(1228),meta_fields = "title")
darwin_bigram <- darwin %>% unnest_tokens(bigram,text,token = "ngrams",n=2) %>%
separate(bigram,c("word1","word2"))
darwin_network <- darwin_bigram %>% count(word1,word2,sort = T) %>%
filter(n > 75) %>%
graph_from_data_frame()
set.seed(2017)
a <- grid::arrow(type = "closed", length = unit(.15, "inches"))
png("D:/R/tutorial/text_mining/chap4_bigram2.png",
width = 3960, height = 2160, units = 'px', res = 300)
ggraph(darwin_network, layout = "fr") +
geom_edge_link(aes(edge_alpha = n), show.legend = T,
arrow = a, end_cap = circle(.07, 'inches')) +
geom_node_point(color = "lightblue", size = 5) +
geom_node_text(aes(label = name), vjust = 1, hjust = 1)+
theme_void()
dev.off()
#bigram correlation in the same section
darwin_section_words <- darwin %>%
mutate(section = row_number() %/% 10) %>%
filter(section > 0) %>%
unnest_tokens(word, text) %>%
filter(!word %in% stop_words$word)
word_cor <- darwin_section_words %>% group_by(word) %>% filter(n() >= 20) %>%
pairwise_cor(word,section,sort=T)
set.seed(2016)
png("D:/R/tutorial/text_mining/chap4_bigram3.png",
width = 3960, height = 2160, units = 'px', res = 300)
word_cor %>%
filter(correlation > .3) %>%
graph_from_data_frame() %>%
ggraph(layout = "fr") +
geom_edge_link(aes(edge_alpha = correlation), show.legend = FALSE) +
geom_node_point(color = "lightblue", size = 5) +
geom_node_text(aes(label = name), repel = TRUE) +
theme_void()+
labs(title = "Correlation > 0.3 between words in the same section")
dev.off()
|
testlist <- list(iK = 15990784L)
result <- do.call(eDMA:::PowerSet,testlist)
str(result) | /eDMA/inst/testfiles/PowerSet/AFL_PowerSet/PowerSet_valgrind_files/1609869919-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 88 | r | testlist <- list(iK = 15990784L)
result <- do.call(eDMA:::PowerSet,testlist)
str(result) |
#' Function to count variables/columns/fields in a database table.
#'
#' @author Stuart K. Grange
#'
#' @export
db_count_variables <- function(con, table = NA) {
if (is.na(table[1])) table <- db_list_tables(con)
# Do
df <- plyr::ldply(table, function(x) db_count_variables_worker(con, x))
# Return
df
}
db_count_variables_worker <- function(con, table) {
data.frame(
table = table,
variable_count = length(db_list_variables(con, table)),
stringsAsFactors = FALSE
)
}
| /R/db_count_variables.R | no_license | MohoWu/databaser | R | false | false | 516 | r | #' Function to count variables/columns/fields in a database table.
#'
#' @author Stuart K. Grange
#'
#' @export
db_count_variables <- function(con, table = NA) {
if (is.na(table[1])) table <- db_list_tables(con)
# Do
df <- plyr::ldply(table, function(x) db_count_variables_worker(con, x))
# Return
df
}
db_count_variables_worker <- function(con, table) {
data.frame(
table = table,
variable_count = length(db_list_variables(con, table)),
stringsAsFactors = FALSE
)
}
|
\name{MVar}
\alias{MVar-package}
\docType{package}
\title{Multivariate Analysis.}
\description{Package for multivariate analysis, having functions that perform simple correspondence analysis (CA) and multiple correspondence analysis (MCA), principal components analysis (PCA), canonical correlation analysis (CCA), factorial analysis (FA), multidimensional scaling (MDS), linear (LDA) and quadratic discriminant analysis (QDA), hierarchical and non-hierarchical cluster analysis, simple and multiple linear regression, multiple factor analysis (MFA) for quantitative, qualitative, frequency (MFACT) and mixed data, biplot, scatter plot, projection pursuit (PP), grant tour method and other useful functions for the multivariate analysis.
}
\details{
\tabular{ll}{
Package: \tab MVar\cr
Type: \tab Package\cr
Version: \tab 2.2.1\cr
Date: \tab 2023-08-19\cr
License: \tab GPL(>= 2)\cr
LazyLoad: \tab yes\cr
}
}
\author{
Paulo Cesar Ossani and Marcelo Angelo Cirillo.
Maintainer: Paulo Cesar Ossani <ossanipc@hotmail.com>
}
\references{
ABDESSEMED, L. and ESCOFIER, B.; Analyse factorielle multiple de tableaux de frequencies: comparaison avec l'analyse canonique des correspondences. \emph{Journal de la Societe de Statistique de Paris}, Paris, v. 137, n. 2, p. 3-18, 1996.
ABDI, H. Singular Value Decomposition (SVD) and Generalized Singular Value Decomposition (GSVD). In: SALKIND, N. J. (Ed.). \emph{Encyclopedia of measurement and statistics.} Thousand Oaks: Sage, 2007. p. 907-912.
ABDI, H.; VALENTIN, D. Multiple factor analysis (MFA). In: SALKIND, N. J. (Ed.). \emph{Encyclopedia of measurement and statistics.} Thousand Oaks: Sage, 2007. p. 657-663.
ABDI, H.; WILLIAMS, L. Principal component analysis. \emph{WIREs Computational Statatistics}, New York, v. 2, n. 4, p. 433-459, July/Aug. 2010.
ABDI, H.; WILLIAMS, L.; VALENTIN, D. Multiple factor analysis: principal component analysis for multitable and multiblock data sets. \emph{WIREs Computational Statatistics}, New York, v. 5, n. 2, p. 149-179, Feb. 2013.
ASIMOV, D. The Grand Tour: A Tool for Viewing Multidimensional Data. \emph{SIAM Journal of Scientific and Statistical Computing}, 6(1), 128-143, 1985.
ASIMOV, D.; BUJA, A. The grand tour via geodesic interpolation of 2-frames. in Visual Data Exploration and Analysis. \emph{Symposium on Electronic Imaging Science and Technology}, IS&T/SPIE. 1994.
BECUE-BERTAUT, M.; PAGES, J. A principal axes method for comparing contingency tables: MFACT. \emph{Computational Statistics & Data Analysis}, New York, v. 45, n. 3, p. 481-503, Feb. 2004
BECUE-BERTAUT, M.; PAGES, J. Multiple factor analysis and clustering of a mixture of quantitative, categorical and frequency data. \emph{Computational Statistics & Data Analysis}, New York, v. 52, n. 6, p. 3255-3268, Feb. 2008.
BENZECRI, J. Analyse de l'inertie intraclasse par l'analyse d'un tableau de contingence: intra-classinertia analysis through the analysis of a contingency table. \emph{Les Cahiers de l'Analyse des Donnees}, Paris, v. 8, n. 3, p. 351-358, 1983.
BUJA, A.; ASIMOV, D. Grand tour methods: An outline. \emph{Computer Science and Statistics}, 17:63-67. 1986.
BUJA, A.; COOK, D.; ASIMOV, D.; HURLEY, C. Computational Methods for High-Dimensional Rotations in Data Visualization, in C. R. Rao, E. J. Wegman & J. L. Solka, eds, \emph{"Handbook of Statistics: Data Mining and Visualization"}, Elsevier/North Holland, http://www.elsevier.com, pp. 391-413. 2005.
CHARNET, R., at al.. \emph{Analise de modelos de regressao lienar,} 2a ed. Campinas: Editora da Unicamp, 2008. 357 p.
COOK, D., LEE, E. K., BUJA, A., WICKHAM, H.. Grand tours, projection pursuit guided tours and manual controls. In Chen, Chunhouh, Hardle, Wolfgang, Unwin, e Antony (Eds.), \emph{Handbook of Data Visualization}, Springer Handbooks of Computational Statistics, chapter III.2, p. 295-314. Springer, 2008.
COOK, D., BUJA, A., CABRERA, J.. Projection pursuit indexes based on orthonormal function expansions. \emph{Journal of Computational and Graphical Statistics}, 2(3):225-250, 1993.
COOK, D., BUJA, A., CABRERA, J., HURLEY, C.. Grand tour and projection pursuit, \emph{Journal of Computational and Graphical Statistics}, 4(3), 155-172, 1995.
COOK, D., SWAYNE, D. F.. Interactive and Dynamic Graphics for Data Analysis: With R and GGobi. Springer. 2007.
ESCOFIER, B. Analyse factorielle en reference a un modele: application a l'analyse d'un tableau d'echanges. \emph{Revue de Statistique Appliquee}, Paris, v. 32, n. 4, p. 25-36, 1984.
ESCOFIER, B.; DROUET, D. Analyse des differences entre plusieurs tableaux de frequence. \emph{Les Cahiers de l'Analyse des Donnees}, Paris, v. 8, n. 4, p. 491-499, 1983.
ESCOFIER, B.; PAGES, J. \emph{Analyse factorielles simples et multiples.} Paris: Dunod, 1990. 267 p.
ESCOFIER, B.; PAGES, J. \emph{Analyses factorielles simples et multiples:} objectifs, methodes et interpretation. 4th ed. Paris: Dunod, 2008. 318 p.
ESCOFIER, B.; PAGES, J. \emph{Comparaison de groupes de variables definies sur le meme ensemble d'individus:} un exemple d'applications. Le Chesnay: Institut National de Recherche en Informatique et en Automatique, 1982. 121 p.
ESCOFIER, B.; PAGES, J. Multiple factor analysis (AFUMULT package). \emph{Computational Statistics & Data Analysis}, New York, v. 18, n. 1, p. 121-140, Aug. 1994
ESPEZUA, S., VILLANUEVA, E., MACIEL, C.D., CARVALHO, A.. A projection pursuit framework for supervised dimension reduction of high dimensional small sample datasets. \emph{Neurocomputing}, 149, 767-776, 2015.
FERREIRA, D. F. \emph{Estatistica multivariada.} 2. ed. rev. e ampl. Lavras: UFLA, 2011. 675 p.
FRIEDMAN, J. H., TUKEY, J. W. A projection pursuit algorithm for exploratory data analysis. \emph{IEEE Transaction on Computers}, 23(9):881-890, 1974.
GREENACRE, M.; BLASIUS, J. \emph{Multiple correspondence analysis and related methods.} New York: Taylor and Francis, 2006. 607 p.
HASTIE, T., BUJA, A., TIBSHIRANI, R.: Penalized discriminant analysis. \emph{The Annals of Statistics}. 23(1), 73-102 . 1995.
HOTELLING, H. Analysis of a complex of statistical variables into principal components. \emph{Journal of Educational Psychology}, Arlington, v. 24, p. 417-441, Sept. 1933.
HUBER, P. J.. Projection pursuit. \emph{Annals of Statistics}, 13(2):435-475, 1985.
HURLEY, C.; BUJA, A. Analyzing high-dimensional data with motion graphics, \emph{SIAM Journal of Scientific and Statistical Computing}, 11 (6), 1193-1211. 1990.
JOHNSON, R. A.; WICHERN, D. W. \emph{Applied multivariate statistical analysis.} 6th ed. New Jersey: Prentice Hall, 2007. 794 p.
JONES, M. C., SIBSON, R.. What is projection pursuit, (with discussion), \emph{Journal of the Royal Statistical Society}, Series A 150, 1-36, 1987.
LEE, E., COOK, D., KLINKE, S., LUMLEY, T.. Projection pursuit for exploratory supervised classification. \emph{Journal of Computational and Graphical Statistics}, 14(4):831-846, 2005.
LEE, E. K., COOK, D.. A projection pursuit index for large p small n data. \emph{Statistics and Computing}, 20(3):381-392, 2010.
MARTINEZ, W. L., MARTINEZ, A. R.; \emph{Computational Statistics Handbook with MATLAB}, 2th. ed. New York: Chapman & Hall/CRC, 2007. 794 p.
MARTINEZ, W. L., MARTINEZ, A. R., SOLKA, J.; \emph{Exploratory Data Analysis with MATLAB}, 2th. ed. New York: Chapman & Hall/CRC, 2010. 499 p.
MINGOTI, S. A. \emph{Analise de dados atraves de metodos de estatistica multivariada:} uma abordagem aplicada. Belo Horizonte: UFMG, 2005. 297 p.
OSSANI, P. C.; CIRILLO, M. A.; BOREM, F. M.; RIBEIRO, D. E.; CORTEZ, R. M.. Quality of specialty coffees: a sensory evaluation by consumers using the MFACT technique. \emph{Revista Ciencia Agronomica (UFC. Online)}, v. 48, p. 92-100, 2017.
OSSANI, P. C. \emph{Qualidade de cafes especiais e nao especiais por meio da analise de multiplos fatores para tabelas de contingencias.} 2015. 107 p. Dissertacao (Mestrado em Estatistica e Experimentacao Agropecuaria) - Universidade Federal de Lavras, Lavras, 2015.
PAGES, J. Analyse factorielle multiple appliquee aux variables qualitatives et aux donnees mixtes. \emph{Revue de Statistique Appliquee}, Paris, v. 50, n. 4, p. 5-37, 2002.
PAGES, J. Multiple factor analysis: main features and application to sensory data. \emph{Revista Colombiana de Estadistica}, Bogota, v. 27, n. 1, p. 1-26, 2004.
PENA, D., PRIETO, F.. Cluster identification using projections. \emph{Journal of the American Statistical Association}, 96(456):1433-1445, 2001.
POSSE, C.. Projection pursuit exploratory data analysis, \emph{Computational Statistics and Data Analysis}, 29:669-687, 1995a.
POSSE, C.. Tools for two-dimensional exploratory projection pursuit, \emph{Journal of Computational and Graphical Statistics}, 4:83-100, 1995b
RENCHER, A.C.; \emph{Methods of Multivariate Analysis.} 2th. ed. New York: J.Wiley, 2002. 708 p.
YOUNG, F. W.; RHEINGANS P. Visualizing structure in high-dimensional multivariate data, \emph{IBM Journal of Research and Development}, 35:97-107, 1991.
YOUNG, F. W.; FALDOWSKI R. A.; McFARLANE M. M. \emph{Multivariate statistical visualization, in Handbook of Statistics}, Vol 9, C. R. Rao (ed.), The Netherlands: Elsevier Science Publishers, 959-998, 1993.
}
\keyword{Multivariate Analysis}
| /man/MVar-package.Rd | no_license | cran/MVar | R | false | false | 9,382 | rd | \name{MVar}
\alias{MVar-package}
\docType{package}
\title{Multivariate Analysis.}
\description{Package for multivariate analysis, having functions that perform simple correspondence analysis (CA) and multiple correspondence analysis (MCA), principal components analysis (PCA), canonical correlation analysis (CCA), factorial analysis (FA), multidimensional scaling (MDS), linear (LDA) and quadratic discriminant analysis (QDA), hierarchical and non-hierarchical cluster analysis, simple and multiple linear regression, multiple factor analysis (MFA) for quantitative, qualitative, frequency (MFACT) and mixed data, biplot, scatter plot, projection pursuit (PP), grant tour method and other useful functions for the multivariate analysis.
}
\details{
\tabular{ll}{
Package: \tab MVar\cr
Type: \tab Package\cr
Version: \tab 2.2.1\cr
Date: \tab 2023-08-19\cr
License: \tab GPL(>= 2)\cr
LazyLoad: \tab yes\cr
}
}
\author{
Paulo Cesar Ossani and Marcelo Angelo Cirillo.
Maintainer: Paulo Cesar Ossani <ossanipc@hotmail.com>
}
\references{
ABDESSEMED, L. and ESCOFIER, B.; Analyse factorielle multiple de tableaux de frequencies: comparaison avec l'analyse canonique des correspondences. \emph{Journal de la Societe de Statistique de Paris}, Paris, v. 137, n. 2, p. 3-18, 1996.
ABDI, H. Singular Value Decomposition (SVD) and Generalized Singular Value Decomposition (GSVD). In: SALKIND, N. J. (Ed.). \emph{Encyclopedia of measurement and statistics.} Thousand Oaks: Sage, 2007. p. 907-912.
ABDI, H.; VALENTIN, D. Multiple factor analysis (MFA). In: SALKIND, N. J. (Ed.). \emph{Encyclopedia of measurement and statistics.} Thousand Oaks: Sage, 2007. p. 657-663.
ABDI, H.; WILLIAMS, L. Principal component analysis. \emph{WIREs Computational Statatistics}, New York, v. 2, n. 4, p. 433-459, July/Aug. 2010.
ABDI, H.; WILLIAMS, L.; VALENTIN, D. Multiple factor analysis: principal component analysis for multitable and multiblock data sets. \emph{WIREs Computational Statatistics}, New York, v. 5, n. 2, p. 149-179, Feb. 2013.
ASIMOV, D. The Grand Tour: A Tool for Viewing Multidimensional Data. \emph{SIAM Journal of Scientific and Statistical Computing}, 6(1), 128-143, 1985.
ASIMOV, D.; BUJA, A. The grand tour via geodesic interpolation of 2-frames. in Visual Data Exploration and Analysis. \emph{Symposium on Electronic Imaging Science and Technology}, IS&T/SPIE. 1994.
BECUE-BERTAUT, M.; PAGES, J. A principal axes method for comparing contingency tables: MFACT. \emph{Computational Statistics & Data Analysis}, New York, v. 45, n. 3, p. 481-503, Feb. 2004
BECUE-BERTAUT, M.; PAGES, J. Multiple factor analysis and clustering of a mixture of quantitative, categorical and frequency data. \emph{Computational Statistics & Data Analysis}, New York, v. 52, n. 6, p. 3255-3268, Feb. 2008.
BENZECRI, J. Analyse de l'inertie intraclasse par l'analyse d'un tableau de contingence: intra-classinertia analysis through the analysis of a contingency table. \emph{Les Cahiers de l'Analyse des Donnees}, Paris, v. 8, n. 3, p. 351-358, 1983.
BUJA, A.; ASIMOV, D. Grand tour methods: An outline. \emph{Computer Science and Statistics}, 17:63-67. 1986.
BUJA, A.; COOK, D.; ASIMOV, D.; HURLEY, C. Computational Methods for High-Dimensional Rotations in Data Visualization, in C. R. Rao, E. J. Wegman & J. L. Solka, eds, \emph{"Handbook of Statistics: Data Mining and Visualization"}, Elsevier/North Holland, http://www.elsevier.com, pp. 391-413. 2005.
CHARNET, R., at al.. \emph{Analise de modelos de regressao lienar,} 2a ed. Campinas: Editora da Unicamp, 2008. 357 p.
COOK, D., LEE, E. K., BUJA, A., WICKHAM, H.. Grand tours, projection pursuit guided tours and manual controls. In Chen, Chunhouh, Hardle, Wolfgang, Unwin, e Antony (Eds.), \emph{Handbook of Data Visualization}, Springer Handbooks of Computational Statistics, chapter III.2, p. 295-314. Springer, 2008.
COOK, D., BUJA, A., CABRERA, J.. Projection pursuit indexes based on orthonormal function expansions. \emph{Journal of Computational and Graphical Statistics}, 2(3):225-250, 1993.
COOK, D., BUJA, A., CABRERA, J., HURLEY, C.. Grand tour and projection pursuit, \emph{Journal of Computational and Graphical Statistics}, 4(3), 155-172, 1995.
COOK, D., SWAYNE, D. F.. Interactive and Dynamic Graphics for Data Analysis: With R and GGobi. Springer. 2007.
ESCOFIER, B. Analyse factorielle en reference a un modele: application a l'analyse d'un tableau d'echanges. \emph{Revue de Statistique Appliquee}, Paris, v. 32, n. 4, p. 25-36, 1984.
ESCOFIER, B.; DROUET, D. Analyse des differences entre plusieurs tableaux de frequence. \emph{Les Cahiers de l'Analyse des Donnees}, Paris, v. 8, n. 4, p. 491-499, 1983.
ESCOFIER, B.; PAGES, J. \emph{Analyse factorielles simples et multiples.} Paris: Dunod, 1990. 267 p.
ESCOFIER, B.; PAGES, J. \emph{Analyses factorielles simples et multiples:} objectifs, methodes et interpretation. 4th ed. Paris: Dunod, 2008. 318 p.
ESCOFIER, B.; PAGES, J. \emph{Comparaison de groupes de variables definies sur le meme ensemble d'individus:} un exemple d'applications. Le Chesnay: Institut National de Recherche en Informatique et en Automatique, 1982. 121 p.
ESCOFIER, B.; PAGES, J. Multiple factor analysis (AFUMULT package). \emph{Computational Statistics & Data Analysis}, New York, v. 18, n. 1, p. 121-140, Aug. 1994
ESPEZUA, S., VILLANUEVA, E., MACIEL, C.D., CARVALHO, A.. A projection pursuit framework for supervised dimension reduction of high dimensional small sample datasets. \emph{Neurocomputing}, 149, 767-776, 2015.
FERREIRA, D. F. \emph{Estatistica multivariada.} 2. ed. rev. e ampl. Lavras: UFLA, 2011. 675 p.
FRIEDMAN, J. H., TUKEY, J. W. A projection pursuit algorithm for exploratory data analysis. \emph{IEEE Transaction on Computers}, 23(9):881-890, 1974.
GREENACRE, M.; BLASIUS, J. \emph{Multiple correspondence analysis and related methods.} New York: Taylor and Francis, 2006. 607 p.
HASTIE, T., BUJA, A., TIBSHIRANI, R.: Penalized discriminant analysis. \emph{The Annals of Statistics}. 23(1), 73-102 . 1995.
HOTELLING, H. Analysis of a complex of statistical variables into principal components. \emph{Journal of Educational Psychology}, Arlington, v. 24, p. 417-441, Sept. 1933.
HUBER, P. J.. Projection pursuit. \emph{Annals of Statistics}, 13(2):435-475, 1985.
HURLEY, C.; BUJA, A. Analyzing high-dimensional data with motion graphics, \emph{SIAM Journal of Scientific and Statistical Computing}, 11 (6), 1193-1211. 1990.
JOHNSON, R. A.; WICHERN, D. W. \emph{Applied multivariate statistical analysis.} 6th ed. New Jersey: Prentice Hall, 2007. 794 p.
JONES, M. C., SIBSON, R.. What is projection pursuit, (with discussion), \emph{Journal of the Royal Statistical Society}, Series A 150, 1-36, 1987.
LEE, E., COOK, D., KLINKE, S., LUMLEY, T.. Projection pursuit for exploratory supervised classification. \emph{Journal of Computational and Graphical Statistics}, 14(4):831-846, 2005.
LEE, E. K., COOK, D.. A projection pursuit index for large p small n data. \emph{Statistics and Computing}, 20(3):381-392, 2010.
MARTINEZ, W. L., MARTINEZ, A. R.; \emph{Computational Statistics Handbook with MATLAB}, 2th. ed. New York: Chapman & Hall/CRC, 2007. 794 p.
MARTINEZ, W. L., MARTINEZ, A. R., SOLKA, J.; \emph{Exploratory Data Analysis with MATLAB}, 2th. ed. New York: Chapman & Hall/CRC, 2010. 499 p.
MINGOTI, S. A. \emph{Analise de dados atraves de metodos de estatistica multivariada:} uma abordagem aplicada. Belo Horizonte: UFMG, 2005. 297 p.
OSSANI, P. C.; CIRILLO, M. A.; BOREM, F. M.; RIBEIRO, D. E.; CORTEZ, R. M.. Quality of specialty coffees: a sensory evaluation by consumers using the MFACT technique. \emph{Revista Ciencia Agronomica (UFC. Online)}, v. 48, p. 92-100, 2017.
OSSANI, P. C. \emph{Qualidade de cafes especiais e nao especiais por meio da analise de multiplos fatores para tabelas de contingencias.} 2015. 107 p. Dissertacao (Mestrado em Estatistica e Experimentacao Agropecuaria) - Universidade Federal de Lavras, Lavras, 2015.
PAGES, J. Analyse factorielle multiple appliquee aux variables qualitatives et aux donnees mixtes. \emph{Revue de Statistique Appliquee}, Paris, v. 50, n. 4, p. 5-37, 2002.
PAGES, J. Multiple factor analysis: main features and application to sensory data. \emph{Revista Colombiana de Estadistica}, Bogota, v. 27, n. 1, p. 1-26, 2004.
PENA, D., PRIETO, F.. Cluster identification using projections. \emph{Journal of the American Statistical Association}, 96(456):1433-1445, 2001.
POSSE, C.. Projection pursuit exploratory data analysis, \emph{Computational Statistics and Data Analysis}, 29:669-687, 1995a.
POSSE, C.. Tools for two-dimensional exploratory projection pursuit, \emph{Journal of Computational and Graphical Statistics}, 4:83-100, 1995b
RENCHER, A.C.; \emph{Methods of Multivariate Analysis.} 2th. ed. New York: J.Wiley, 2002. 708 p.
YOUNG, F. W.; RHEINGANS P. Visualizing structure in high-dimensional multivariate data, \emph{IBM Journal of Research and Development}, 35:97-107, 1991.
YOUNG, F. W.; FALDOWSKI R. A.; McFARLANE M. M. \emph{Multivariate statistical visualization, in Handbook of Statistics}, Vol 9, C. R. Rao (ed.), The Netherlands: Elsevier Science Publishers, 959-998, 1993.
}
\keyword{Multivariate Analysis}
|
setwd("C:/Users/TerranceZhou/Documents/Coursera/course4")
filename <- file.path(getwd(), "exdata%2Fdata%2Fhousehold_power_consumption")
## Download and unzip the dataset:
if (!file.exists(filename)){
fileURL <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(fileURL, filename)
}
if (!file.exists("exdata%2Fdata%2Fhousehold_power_consumption")) {
unzip(filename) }
setwd("C:/Users/TerranceZhou/Documents/Coursera/course4/exdata%2Fdata%2Fhousehold_power_consumption")
# Load Data
powerConsumption <- read.table(file="household_power_consumption.txt",sep = ";",header=TRUE,as.is = TRUE)
dataSelected <-powerConsumption[powerConsumption$Date=="1/2/2007"|powerConsumption$Date=="2/2/2007",]
dataSelected<-dataSelected[complete.cases(dataSelected),]
dataSelected$DateTime<-strptime(paste(dataSelected$Date,dataSelected$Time),
format="%d/%m/%Y %H:%M")
dataSelected$Global_active_power<-as.numeric(dataSelected$Global_active_power)
par(mfrow=c(2,2))
par(mar=c(4,4,4,4))
plot(dataSelected$DateTime,
dataSelected$Global_active_power,
type = "l",
ylab = "Global Active Power",
xlab = "")
plot(dataSelected$DateTime,
dataSelected$Voltage,
type = "l",
ylab = "Voltage",
xlab = "datetime")
plot(dataSelected$DateTime,
dataSelected$Sub_metering_1,
type = "l",
ylab = "Energy sub metering",
xlab = "")
lines(dataSelected$DateTime,
dataSelected$Sub_metering_2,
col="red")
lines(dataSelected$DateTime,
dataSelected$Sub_metering_3,
col="blue")
legend("topright",c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),
lty=c(1,1,1), lwd=c(2,2,2),col=c("black","red","blue"),bty = "n",cex=0.7)
plot(dataSelected$DateTime,
dataSelected$Global_reactive_power,
type = "l",
ylab = "Global_reactive_power",
xlab = "datetime")
dev.copy(png,file="plot4.png")
dev.off()
| /plot4.R | no_license | TerranceZhou/ExData_Plotting1 | R | false | false | 1,938 | r |
setwd("C:/Users/TerranceZhou/Documents/Coursera/course4")
filename <- file.path(getwd(), "exdata%2Fdata%2Fhousehold_power_consumption")
## Download and unzip the dataset:
if (!file.exists(filename)){
fileURL <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(fileURL, filename)
}
if (!file.exists("exdata%2Fdata%2Fhousehold_power_consumption")) {
unzip(filename) }
setwd("C:/Users/TerranceZhou/Documents/Coursera/course4/exdata%2Fdata%2Fhousehold_power_consumption")
# Load Data
powerConsumption <- read.table(file="household_power_consumption.txt",sep = ";",header=TRUE,as.is = TRUE)
dataSelected <-powerConsumption[powerConsumption$Date=="1/2/2007"|powerConsumption$Date=="2/2/2007",]
dataSelected<-dataSelected[complete.cases(dataSelected),]
dataSelected$DateTime<-strptime(paste(dataSelected$Date,dataSelected$Time),
format="%d/%m/%Y %H:%M")
dataSelected$Global_active_power<-as.numeric(dataSelected$Global_active_power)
par(mfrow=c(2,2))
par(mar=c(4,4,4,4))
plot(dataSelected$DateTime,
dataSelected$Global_active_power,
type = "l",
ylab = "Global Active Power",
xlab = "")
plot(dataSelected$DateTime,
dataSelected$Voltage,
type = "l",
ylab = "Voltage",
xlab = "datetime")
plot(dataSelected$DateTime,
dataSelected$Sub_metering_1,
type = "l",
ylab = "Energy sub metering",
xlab = "")
lines(dataSelected$DateTime,
dataSelected$Sub_metering_2,
col="red")
lines(dataSelected$DateTime,
dataSelected$Sub_metering_3,
col="blue")
legend("topright",c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),
lty=c(1,1,1), lwd=c(2,2,2),col=c("black","red","blue"),bty = "n",cex=0.7)
plot(dataSelected$DateTime,
dataSelected$Global_reactive_power,
type = "l",
ylab = "Global_reactive_power",
xlab = "datetime")
dev.copy(png,file="plot4.png")
dev.off()
|
getOrders <- function(store, newRowList, currentPos, params) {
allzero <- rep(0,length(newRowList)) # used for initializing vectors
################################################
# You do not need to edit this part of the code
# that initializes and updates the store
################################################
if (is.null(store))
store <- initStore(newRowList)
else
store <- updateStore(store, newRowList)
################################################
pos <- allzero
################################################
# This next code section is the only one you
# need to edit for getOrders
#
# The if condition is already correct:
# you should only start computing the moving
# averages when you have enough close prices
# for the long moving average
################################################
if (store$iter > params$lookbacks$long) {
# ENTER STRATEGY LOGIC HERE
# You will need to get the current_close
# either from newRowList or from store$close
# You will also need to get close_prices
# from store$cl
# With these you can use getTMA, getPosSignFromTMA
# and getPosSize to assign positions to the vector pos
}
################################################
# You do not need to edit this part of the code
# that initializes and updates the store
################################################
marketOrders <- -currentPos + pos
return(list(store=store,marketOrders=marketOrders,
limitOrders1=allzero,limitPrices1=allzero,
limitOrders2=allzero,limitPrices2=allzero))
}
########################################################################
# The following function should be edited to complete steps 1 to 3
# of comp22 assignment 2
getTMA <- function(close_prices, lookbacks) {
# close_prices should be an xts with a column called "Close"
# lookbacks should be list with exactly three elements:
# lookbacks$short is an integer
# lookbacks$medium is an integer
# lookbacks$long is an integer
# It should be the case that:
# lookbacks$short < lookbacks$medium < lookbacks$long
####################################################################
# First we implement checks on the arguments
# Replace TRUE to
# check that lookbacks contains named elements short, medium and long
if (TRUE)
stop("E01: At least one of \"short\", \"medium\", \"long\" is missing from names(lookbacks)")
# Replace TRUE to
# check that the elements of lookbacks are all integers
if (TRUE)
stop("E02: At least one of the lookbacks is not an integer according to is.integer()")
# Replace TRUE to
# check that lookbacks$short < lookbacks$medium < lookbacks$long
if (TRUE)
stop("E03: The lookbacks do not satisfy lookbacks$short < lookbacks$medium < lookbacks$long")
# Replace TRUE to
# check that close_prices is an xts
if (TRUE)
stop("E04: close_prices is not an xts according to is.xts()")
# Replace TRUE to
# check that close_prices has enough rows
if (TRUE)
stop("E05: close_prices does not enough rows")
# Replace TRUE to
# check that close_prices contains a column called "Close"
if (TRUE)
stop("E06: close_prices does not contain a column \"Close\"")
ret <- 0
# You need to replace the assignment to ret so that the
# returned object:
# - is a list
# - has the right names (short, medium, long), and
# - contains numeric and not xts objects
# - and contains the correct moving average values, which should
# have windows of the correct sizes which should all end in the
# same period which should be the last row of close_prices
return(ret)
}
getPosSignFromTMA <- function(tma_list) {
# This function takes a list of numbers tma_list
# with three elements called short, medium, and long.
# These three numbers correspond to the SMA values for
# a short, medium and long lookback, respecitvely.
# Note that if both this function and get TMA
# are correctly implemented then the
# following should work if the inputs close_prices
# and lookbacks are of the correct form:
# getPositionFromTMA(getTMA(close_prices,lookbacks))
# This function should return a single number that is:
# 1 if the short SMA < medium SMA < long SMA
# -1 if the short SMA > medium SMA > long SMA
# 0 otherwise
return(0)
}
getPosSize <- function(current_close,constant=1000) {
# This function should return (constant divided
# by current_close) rounded down to the nearest
# integer, i.e., due the quotient and then take
# the floor.
return(0)
}
getInSampleResult <- function() {
# Here you should replace the return value 0 with
# the PD ratio for the following lookbacks
# short: 10
# medium: 20
# long: 30
# when the strategy is run on your
# username-specific in-sample period
# DO NOT PUT THE ACTUAL CODE TO COMPUTE THIS RETURN VALUE HERE
return(0)
}
getInSampleOptResult <- function() {
# Here you should replace the return value 0 with
# the best PD ratio that can be found for the pre-defined
# parameter ranges
# (see the Assignment 2 handout for details of those ranges)
# and your username-specific in-sample period
# DO NOT PUT THE ACTUAL CODE TO COMPUTE THIS RETURN VALUE HERE
return(0)
}
########################################################################
# The functions below do NOT need to be edited for comp226 assignment 2
initClStore <- function(newRowList) {
clStore <- lapply(newRowList, function(x) x$Close)
return(clStore)
}
updateClStore <- function(clStore, newRowList) {
clStore <- mapply(function(x,y) rbind(x,y$Close),clStore,newRowList,SIMPLIFY=FALSE)
return(clStore)
}
initStore <- function(newRowList,series) {
return(list(iter=1,cl=initClStore(newRowList)))
}
updateStore <- function(store, newRowList) {
store$iter <- store$iter + 1
store$cl <- updateClStore(store$cl,newRowList)
return(store)
}
| /Strategy/backtester_v4.2/strategies/a2_template.R | no_license | fentwer/homework | R | false | false | 6,296 | r | getOrders <- function(store, newRowList, currentPos, params) {
allzero <- rep(0,length(newRowList)) # used for initializing vectors
################################################
# You do not need to edit this part of the code
# that initializes and updates the store
################################################
if (is.null(store))
store <- initStore(newRowList)
else
store <- updateStore(store, newRowList)
################################################
pos <- allzero
################################################
# This next code section is the only one you
# need to edit for getOrders
#
# The if condition is already correct:
# you should only start computing the moving
# averages when you have enough close prices
# for the long moving average
################################################
if (store$iter > params$lookbacks$long) {
# ENTER STRATEGY LOGIC HERE
# You will need to get the current_close
# either from newRowList or from store$close
# You will also need to get close_prices
# from store$cl
# With these you can use getTMA, getPosSignFromTMA
# and getPosSize to assign positions to the vector pos
}
################################################
# You do not need to edit this part of the code
# that initializes and updates the store
################################################
marketOrders <- -currentPos + pos
return(list(store=store,marketOrders=marketOrders,
limitOrders1=allzero,limitPrices1=allzero,
limitOrders2=allzero,limitPrices2=allzero))
}
########################################################################
# The following function should be edited to complete steps 1 to 3
# of comp22 assignment 2
getTMA <- function(close_prices, lookbacks) {
# close_prices should be an xts with a column called "Close"
# lookbacks should be list with exactly three elements:
# lookbacks$short is an integer
# lookbacks$medium is an integer
# lookbacks$long is an integer
# It should be the case that:
# lookbacks$short < lookbacks$medium < lookbacks$long
####################################################################
# First we implement checks on the arguments
# Replace TRUE to
# check that lookbacks contains named elements short, medium and long
if (TRUE)
stop("E01: At least one of \"short\", \"medium\", \"long\" is missing from names(lookbacks)")
# Replace TRUE to
# check that the elements of lookbacks are all integers
if (TRUE)
stop("E02: At least one of the lookbacks is not an integer according to is.integer()")
# Replace TRUE to
# check that lookbacks$short < lookbacks$medium < lookbacks$long
if (TRUE)
stop("E03: The lookbacks do not satisfy lookbacks$short < lookbacks$medium < lookbacks$long")
# Replace TRUE to
# check that close_prices is an xts
if (TRUE)
stop("E04: close_prices is not an xts according to is.xts()")
# Replace TRUE to
# check that close_prices has enough rows
if (TRUE)
stop("E05: close_prices does not enough rows")
# Replace TRUE to
# check that close_prices contains a column called "Close"
if (TRUE)
stop("E06: close_prices does not contain a column \"Close\"")
ret <- 0
# You need to replace the assignment to ret so that the
# returned object:
# - is a list
# - has the right names (short, medium, long), and
# - contains numeric and not xts objects
# - and contains the correct moving average values, which should
# have windows of the correct sizes which should all end in the
# same period which should be the last row of close_prices
return(ret)
}
getPosSignFromTMA <- function(tma_list) {
# This function takes a list of numbers tma_list
# with three elements called short, medium, and long.
# These three numbers correspond to the SMA values for
# a short, medium and long lookback, respecitvely.
# Note that if both this function and get TMA
# are correctly implemented then the
# following should work if the inputs close_prices
# and lookbacks are of the correct form:
# getPositionFromTMA(getTMA(close_prices,lookbacks))
# This function should return a single number that is:
# 1 if the short SMA < medium SMA < long SMA
# -1 if the short SMA > medium SMA > long SMA
# 0 otherwise
return(0)
}
getPosSize <- function(current_close,constant=1000) {
# This function should return (constant divided
# by current_close) rounded down to the nearest
# integer, i.e., due the quotient and then take
# the floor.
return(0)
}
getInSampleResult <- function() {
# Here you should replace the return value 0 with
# the PD ratio for the following lookbacks
# short: 10
# medium: 20
# long: 30
# when the strategy is run on your
# username-specific in-sample period
# DO NOT PUT THE ACTUAL CODE TO COMPUTE THIS RETURN VALUE HERE
return(0)
}
getInSampleOptResult <- function() {
# Here you should replace the return value 0 with
# the best PD ratio that can be found for the pre-defined
# parameter ranges
# (see the Assignment 2 handout for details of those ranges)
# and your username-specific in-sample period
# DO NOT PUT THE ACTUAL CODE TO COMPUTE THIS RETURN VALUE HERE
return(0)
}
########################################################################
# The functions below do NOT need to be edited for comp226 assignment 2
initClStore <- function(newRowList) {
clStore <- lapply(newRowList, function(x) x$Close)
return(clStore)
}
updateClStore <- function(clStore, newRowList) {
clStore <- mapply(function(x,y) rbind(x,y$Close),clStore,newRowList,SIMPLIFY=FALSE)
return(clStore)
}
initStore <- function(newRowList,series) {
return(list(iter=1,cl=initClStore(newRowList)))
}
updateStore <- function(store, newRowList) {
store$iter <- store$iter + 1
store$cl <- updateClStore(store$cl,newRowList)
return(store)
}
|
\name{getLDS}
\alias{getLDS}
\title{Retrieves information from two linked datasets}
\description{This function is the main biomaRt query function that links 2 datasets and retrieves information from these linked BioMart datasets. In Ensembl this translates to homology mapping.}
\usage{getLDS(attributes, filters = "", values = "", mart, attributesL,
filtersL = "", valuesL = "", martL, verbose = FALSE, uniqueRows = TRUE,
bmHeader=TRUE)}
\arguments{
\item{attributes}{Attributes you want to retrieve of primary dataset. A possible list of attributes can be retrieved using the function listAttributes.}
\item{filters}{Filters that should be used in the query. These filters will be applied to primary dataset. A possible list of filters can be retrieved using the function listFilters.}
\item{values}{Values of the filter, e.g. list of affy IDs}
\item{mart}{object of class Mart created with the useMart function.}
\item{attributesL}{Attributes of linked dataset that needs to be retrieved}
\item{filtersL}{Filters to be applied to the linked dataset}
\item{valuesL}{Values for the linked dataset filters}
\item{martL}{Mart object representing linked dataset}
\item{verbose}{When using biomaRt in webservice mode and setting verbose to TRUE, the XML query to the webservice will be printed. Alternatively in MySQL mode the MySQL query will be printed.}
\item{uniqueRows}{Logical to indicate if the BioMart web service should return unique rows only or not. Has the value of either TRUE or FALSE}
\item{bmHeader}{Boolean to indicate if the result retrieved from the
BioMart server should include the data headers or not, defaults to
TRUE. This should only be switched off if the default behavior
results in errors, setting to off might still be able to retrieve
your data in that case}
}
\author{Steffen Durinck}
\examples{
if(interactive()){
human = useMart("ensembl", dataset = "hsapiens_gene_ensembl")
mouse = useMart("ensembl", dataset = "mmusculus_gene_ensembl")
getLDS(attributes = c("hgnc_symbol","chromosome_name", "start_position"),
filters = "hgnc_symbol", values = "TP53", mart = human,
attributesL = c("chromosome_name","start_position"), martL = mouse)
}
}
\keyword{methods}
| /man/getLDS.Rd | no_license | grimbough/biomaRt | R | false | false | 2,255 | rd | \name{getLDS}
\alias{getLDS}
\title{Retrieves information from two linked datasets}
\description{This function is the main biomaRt query function that links 2 datasets and retrieves information from these linked BioMart datasets. In Ensembl this translates to homology mapping.}
\usage{getLDS(attributes, filters = "", values = "", mart, attributesL,
filtersL = "", valuesL = "", martL, verbose = FALSE, uniqueRows = TRUE,
bmHeader=TRUE)}
\arguments{
\item{attributes}{Attributes you want to retrieve of primary dataset. A possible list of attributes can be retrieved using the function listAttributes.}
\item{filters}{Filters that should be used in the query. These filters will be applied to primary dataset. A possible list of filters can be retrieved using the function listFilters.}
\item{values}{Values of the filter, e.g. list of affy IDs}
\item{mart}{object of class Mart created with the useMart function.}
\item{attributesL}{Attributes of linked dataset that needs to be retrieved}
\item{filtersL}{Filters to be applied to the linked dataset}
\item{valuesL}{Values for the linked dataset filters}
\item{martL}{Mart object representing linked dataset}
\item{verbose}{When using biomaRt in webservice mode and setting verbose to TRUE, the XML query to the webservice will be printed. Alternatively in MySQL mode the MySQL query will be printed.}
\item{uniqueRows}{Logical to indicate if the BioMart web service should return unique rows only or not. Has the value of either TRUE or FALSE}
\item{bmHeader}{Boolean to indicate if the result retrieved from the
BioMart server should include the data headers or not, defaults to
TRUE. This should only be switched off if the default behavior
results in errors, setting to off might still be able to retrieve
your data in that case}
}
\author{Steffen Durinck}
\examples{
if(interactive()){
human = useMart("ensembl", dataset = "hsapiens_gene_ensembl")
mouse = useMart("ensembl", dataset = "mmusculus_gene_ensembl")
getLDS(attributes = c("hgnc_symbol","chromosome_name", "start_position"),
filters = "hgnc_symbol", values = "TP53", mart = human,
attributesL = c("chromosome_name","start_position"), martL = mouse)
}
}
\keyword{methods}
|
library(readxl)
library(writexl)
library(tidyr)
library(tidyverse)
setwd("~/Modena data") # sets the working directory in the folder containing the data file
data=read_excel("Allen/Soil Moisture/2021/Corn/Modena 4 Corn West July 6.xlsx", sheet="SM Data") # load the data from excel
w=3 # window used to calculate the derivatives in days
### sensor 1/101 ################################################################
data.101=data.frame(Date=data$`Date`, sm1=data$`1 Acclima`,sm2=data$`2 Acclima`, sm3=data$`3 Acclima`, sm4=data$`4 Acclima`, sm5=data$`5 Acclima`, sm6=data$`6 Acclima`, sm7=data$`7 Acclima`, sm8=data$`8 Acclima`)
#data.101=data.frame(Date=data$`Date`, sm1=data$`1 Acclima`,sm2=data$`2 Acclima`, sm3=data$`3 Acclima`, sm4=data$`4 Acclima`, sm5=data$`5 Acclima`, sm6=data$`6 Acclima`, sm7=data$`7 Acclima`)
data.101=na.omit(data.101)
n=length(data.101$Date)
# sensor depths
##Depths for Corn 215, 245, 252
sd1=3*25.4 # sensor 1: from surface to 3 in
sd2=3*25.4 # sensor 2: from surface to 3 in
sd3=6*25.4 # sensor 3: from 3 in to 6 in
sd4=6*25.4 # sensor 4: from 3 in to 6 in
sd5=12*25.4 # sensor 5: from 6 in to 1ft
sd6=24*25.4 # sensor 6: from 1ft to 2ft
sd7=36*25.4 # sensor 7: from 2ft to 3ft
sd8=48*25.4 # sensor 8: from 3ft to 4ft
##Depths for Corn 215, 263
#sd1=3*25.4 # sensor 1: from surface to 3 in
#sd2=6*25.4 # sensor 2: from 6 in to 12 in
#sd3=12*25.4 # sensor 3: from 12 in to 1ft
#sd4=24*25.4 # sensor 4: from 1ft to 2ft
#sd5=36*25.4 # sensor 5: from 2ft to 3ft
#sd6=48*25.4 # sensor 6: from 3ft to 4ft
#sd7=60*25.4 # sensor 7: from 4ft to 5ft
##Depths for Corn 215
#sd1=3*25.4 # sensor 1: from surface to 3 in
#sd2=6*25.4 # sensor 2: from 6 in to 12 in
#sd3=12*25.4 # sensor 3: from 12 in to 1ft
#sd4=24*25.4 # sensor 4: from 1ft to 2ft
#sd5=36*25.4 # sensor 5: from 2ft to 3ft
#sd6=48*25.4 # sensor 6: from 3ft to 4ft
#sd7=60*25.4 # sensor 7: from 4ft to 5ft
# CORN soil moisture (mm)
sm1=data.101$sm1*sd1/100
sm2=data.101$sm2*sd2/100
sm3=data.101$sm3*(sd3-sd2)/100
sm4=data.101$sm4*(sd4-sd2)/100
sm5=data.101$sm5*(sd5-sd4)/100
sm6=data.101$sm6*(sd6-sd5)/100
sm7=data.101$sm7*(sd7-sd6)/100
sm8=data.101$sm8*(sd8-sd7)/100
# ALFALFA soil moisture (mm)
#sm1=data.101$sm1*sd1/100
#sm2=data.101$sm2*(sd2-sd1)/100
#sm3=data.101$sm3*(sd3-sd2)/100
#sm4=data.101$sm4*(sd4-sd3)/100
#sm5=data.101$sm5*(sd5-sd4)/100
#sm6=data.101$sm6*(sd6-sd5)/100
#sm7=data.101$sm7*(sd7-sd6)/100
# sensor depth 1 ####
# 1st derivative - slope of the SM curve
f1=c() # 1st derivative
f1[1]=NA
for (i in 2:n) {
f1[i]=(sm1[i+1]-sm1[i-1])/w
}
plot(data.101$Date, f1,
type='o', pch=19, cex=0.8, lwd=2, col='darkgoldenrod',
main='Corn 215 1st derivative - soil depth 1', xlab='', ylab='f1')
abline(h=0, lwd=2)
# 2nd derivative - calculate the inflection point
f2=c() # 2nd derivative
f2[1]=NA
for (i in 2:n) {
f2[i]=(f1[i+1]-f1[i-1])/w
}
plot(data.101$Date, f2,
type='o', pch=19, cex=0.8, lwd=2, col='brown2',
main='Corn 215 2nd derivative - soil depth 1', xlab='', ylab='f2')
abline(h=0, lwd=2)
sensor1=data.frame(Date=data.101$Date, SM=sm1, f1, f2) # data frame with the results for sensor1
# Estimate ET rate - if f1<0 & f2>0: ET=|f1|
sensor1$ET=c(rep(NA, n)) # create ET column
for (i in 3:(n-3)) {
if (sensor1$f1[i]<0 & sensor1$f2[i]>0) {sensor1$ET[i]=abs(sensor1$f1[i])}
}
plot(sensor1$Date, sensor1$ET,
type='h', lwd=3, col='deepskyblue',
main='Corn 215 ET - soil depth 1', xlab='', ylab='ET (mm)',
ylim=c(0,4))
# sensor depth 2 ####
# 1st derivative - slope of the SM curve
w=3 # window width in days
f1=c() # 1st derivative
f1[1]=NA
for (i in 2:n) {
f1[i]=(sm2[i+1]-sm2[i-1])/w
}
plot(data.101$Date, f1,
type='o', pch=19, cex=0.8, lwd=2, col='darkgoldenrod',
main='Corn 215 1st derivative - soil depth 2', xlab='', ylab='f1')
abline(h=0, lwd=2)
# 2nd derivative - calculate the inflection point
f2=c() # 2nd derivative
f2[1]=NA
for (i in 2:n) {
f2[i]=(f1[i+1]-f1[i-1])/w
}
plot(data.101$Date, f2,
type='o', pch=19, cex=0.8, lwd=2, col='brown2',
main='Corn 215 2nd derivative - soil depth 2', xlab='', ylab='f2')
abline(h=0, lwd=2)
sensor2=data.frame(Date=data.101$Date, SM=sm2, f1, f2) # data frame with the results for sensor2
# Estimate ET rate - if f1<0 & f2>0: ET=|f1|
sensor2$ET=c(rep(NA, n)) # create ET column
for (i in 3:(n-3)) {
if (sensor2$f1[i]<0 & sensor2$f2[i]>0) {sensor2$ET[i]=abs(sensor2$f1[i])}
}
plot(sensor2$Date, sensor2$ET,
type='h', lwd=3, col='deepskyblue',
main='Corn 215 ET from - depth 2', xlab='', ylab='ET (mm)',
ylim=c(0,4))
# sensor depth 3 ####
# 1st derivative - slope of the SM curve
w=3 # window width in days
f1=c() # 1st derivative
f1[1]=NA
for (i in 2:n) {
f1[i]=(sm3[i+1]-sm3[i-1])/w
}
plot(data.101$Date, f1,
type='o', pch=19, cex=0.8, lwd=2, col='darkgoldenrod',
main='Corn 215 1st derivative - soil depth 3', xlab='', ylab='f1')
abline(h=0, lwd=2)
# 2nd derivative - calculate the inflection point
f2=c() # 2nd derivative
f2[1]=NA
for (i in 2:n) {
f2[i]=(f1[i+1]-f1[i-1])/w
}
plot(data.101$Date, f2,
type='o', pch=19, cex=0.8, lwd=2, col='brown2',
main='Corn 215 2nd derivative - soil depth 3', xlab='', ylab='f2')
abline(h=0, lwd=2)
sensor3=data.frame(Date=data.101$Date, SM=sm3, f1, f2) # data frame with the results for sensor3
# Estimate ET rate - if f1<0 & f2>0: ET=|f1|
sensor3$ET=c(rep(NA, n)) # create ET column
for (i in 3:(n-3)) {
if (sensor3$f1[i]<0 & sensor3$f2[i]>0) {sensor3$ET[i]=abs(sensor3$f1[i])}
}
plot(sensor3$Date, sensor3$ET,
type='h', lwd=3, col='deepskyblue',
main='Corn 215 ET - soil depth 3', xlab='', ylab='ET (mm)',
ylim=c(0, 4))
# sensor depth 4 ####
# 1st derivative - slope of the SM curve
w=3 # window width in days
f1=c() # 1st derivative
f1[1]=NA
for (i in 2:n) {
f1[i]=(sm4[i+1]-sm4[i-1])/w
}
plot(data.101$Date, f1,
type='o', pch=19, cex=0.8, lwd=2, col='darkgoldenrod',
main='Corn 215 sm4 slope - soil depth 4', xlab='', ylab='f1')
abline(h=0, lwd=2)
# 2nd derivative - calculate the inflection point
f2=c() # 2nd derivative
f2[1]=NA
for (i in 2:n) {
f2[i]=(f1[i+1]-f1[i-1])/w
}
plot(data.101$Date, f2,
type='o', pch=19, cex=0.8, lwd=2, col='brown2',
main='Corn 215 2nd derivative - soil depth 4', xlab='', ylab='f2')
abline(h=0, lwd=2)
sensor4=data.frame(Date=data.101$Date, SM=sm4, f1, f2) # data frame with the results for sensor4
# Estimate ET rate - if f1<0 & f2>0: ET=|f1|
sensor4$ET=c(rep(NA, n)) # create ET column
for (i in 3:(n-3)) {
if (sensor4$f1[i]<0 & sensor4$f2[i]>0) {sensor4$ET[i]=abs(sensor4$f1[i])}
}
plot(sensor4$Date, sensor4$ET,
type='h', lwd=3, col='deepskyblue',
main='Corn 215 ET - soil depth 4', xlab='', ylab='ET (mm)',
ylim=c(0, 6))
# sensor depth 5 ####
# 1st derivative - slope of the SM curve
w=3 # window width in days
f1=c() # 1st derivative
f1[1]=NA
for (i in 2:n) {
f1[i]=(sm5[i+1]-sm5[i-1])/w
}
plot(data.101$Date, f1,
type='o', pch=19, cex=0.8, lwd=2, col='darkgoldenrod',
main='Corn 215 sm5 slope - soil depth 5', xlab='', ylab='f1')
abline(h=0, lwd=2)
# 2nd derivative - calculate the inflection point
f2=c() # 2nd derivative
f2[1]=NA
for (i in 2:n) {
f2[i]=(f1[i+1]-f1[i-1])/w
}
plot(data.101$Date, f2,
type='o', pch=19, cex=0.8, lwd=2, col='brown2',
main='Corn 215 2nd derivative - soil depth 5', xlab='', ylab='f2')
abline(h=0, lwd=2)
sensor5=data.frame(Date=data.101$Date, SM=sm5, f1, f2) # data frame with the results for sensor4
# Estimate ET rate - if f1<0 & f2>0: ET=|f1|
sensor5$ET=c(rep(NA, n)) # create ET column
for (i in 3:(n-3)) {
if (sensor5$f1[i]<0 & sensor5$f2[i]>0) {sensor5$ET[i]=abs(sensor5$f1[i])}
}
plot(sensor5$Date, sensor5$ET,
type='h', lwd=3, col='deepskyblue',
main='Corn 215 ET - soil depth 5', xlab='', ylab='ET (mm)',
ylim=c(0, 4))
# sensor depth 6 ####
# 1st derivative - slope of the SM curve
w=3 # window width in days
f1=c() # 1st derivative
f1[1]=NA
for (i in 2:n) {
f1[i]=(sm6[i+1]-sm6[i-1])/w
}
plot(data.101$Date, f1,
type='o', pch=19, cex=0.8, lwd=2, col='darkgoldenrod',
main='Corn 215 sm6 slope - soil depth 6', xlab='', ylab='f1')
abline(h=0, lwd=2)
# 2nd derivative - calculate the inflection point
f2=c() # 2nd derivative
f2[1]=NA
for (i in 2:n) {
f2[i]=(f1[i+1]-f1[i-1])/w
}
plot(data.101$Date, f2,
type='o', pch=19, cex=0.8, lwd=2, col='brown2',
main='Corn 215 2nd derivative - soil depth 6', xlab='', ylab='f2')
abline(h=0, lwd=2)
sensor6=data.frame(Date=data.101$Date, SM=sm6, f1, f2) # data frame with the results for sensor4
# Estimate ET rate - if f1<0 & f2>0: ET=|f1|
sensor6$ET=c(rep(NA, n)) # create ET column
for (i in 3:(n-3)) {
if (sensor6$f1[i]<0 & sensor6$f2[i]>0) {sensor6$ET[i]=abs(sensor6$f1[i])}
}
plot(sensor6$Date, sensor6$ET,
type='h', lwd=3, col='deepskyblue',
main='Corn 215 ET - soil depth 6', xlab='', ylab='ET (mm)',
ylim=c(0, 4))
# sensor depth 7 ####
# 1st derivative - slope of the SM curve
w=3 # window width in days
f1=c() # 1st derivative
f1[1]=NA
for (i in 2:n) {
f1[i]=(sm7[i+1]-sm7[i-1])/w
}
plot(data.101$Date, f1,
type='o', pch=19, cex=0.8, lwd=2, col='darkgoldenrod',
main='Corn 215 sm7 slope - soil depth 7', xlab='', ylab='f1')
abline(h=0, lwd=2)
# 2nd derivative - calculate the inflection point
f2=c() # 2nd derivative
f2[1]=NA
for (i in 2:n) {
f2[i]=(f1[i+1]-f1[i-1])/w
}
plot(data.101$Date, f2,
type='o', pch=19, cex=0.8, lwd=2, col='brown2',
main='Corn 215 2nd derivative - soil depth 7', xlab='', ylab='f2')
abline(h=0, lwd=2)
sensor7=data.frame(Date=data.101$Date, SM=sm7, f1, f2) # data frame with the results for sensor4
# Estimate ET rate - if f1<0 & f2>0: ET=|f1|
sensor7$ET=c(rep(NA, n)) # create ET column
for (i in 3:(n-3)) {
if (sensor7$f1[i]<0 & sensor7$f2[i]>0) {sensor7$ET[i]=abs(sensor7$f1[i])}
}
plot(sensor7$Date, sensor7$ET,
type='h', lwd=3, col='deepskyblue',
main='Corn 215 ET - soil depth 7', xlab='', ylab='ET (mm)',
ylim=c(0, 4))
# sensor depth 8 ####
# 1st derivative - slope of the SM curve
w=3 # window width in days
f1=c() # 1st derivative
f1[1]=NA
for (i in 2:n) {
f1[i]=(sm8[i+1]-sm8[i-1])/w
}
plot(data.101$Date, f1,
type='o', pch=19, cex=0.8, lwd=2, col='darkgoldenrod',
main='Corn 215 sm8 slope - soil depth 8', xlab='', ylab='f1')
abline(h=0, lwd=2)
# 2nd derivative - calculate the inflection point
f2=c() # 2nd derivative
f2[1]=NA
for (i in 2:n) {
f2[i]=(f1[i+1]-f1[i-1])/w
}
plot(data.101$Date, f2,
type='o', pch=19, cex=0.8, lwd=2, col='brown2',
main='Corn 215 2nd derivative - soil depth 8', xlab='', ylab='f2')
abline(h=0, lwd=2)
sensor8=data.frame(Date=data.101$Date, SM=sm8, f1, f2) # data frame with the results for sensor4
# Estimate ET rate - if f1<0 & f2>0: ET=|f1|
sensor8$ET=c(rep(NA, n)) # create ET column
for (i in 3:(n-3)) {
if (sensor8$f1[i]<0 & sensor8$f2[i]>0) {sensor8$ET[i]=abs(sensor8$f1[i])}
}
plot(sensor8$Date, sensor8$ET,
type='h', lwd=3, col='deepskyblue',
main='Corn 215 ET - soil depth 8', xlab='', ylab='ET (mm)',
ylim=c(0, 3))
# Total ET from the entire soil profile ####
sensor1$ET[is.na(sensor1$ET)]=0 # replaces NA values with zero
sensor2$ET[is.na(sensor2$ET)]=0 # replaces NA values with zero
sensor3$ET[is.na(sensor3$ET)]=0 # replaces NA values with zero
sensor4$ET[is.na(sensor4$ET)]=0 # replaces NA values with zero
sensor5$ET[is.na(sensor5$ET)]=0 # replaces NA values with zero
sensor6$ET[is.na(sensor6$ET)]=0 # replaces NA values with zero
sensor7$ET[is.na(sensor7$ET)]=0 # replaces NA values with zero
sensor8$ET[is.na(sensor8$ET)]=0 # replaces NA values with zero
ET.tot=sensor1$ET+
sensor2$ET+
sensor3$ET+
sensor4$ET+
sensor5$ET+
sensor6$ET+
sensor7$ET+
sensor8$ET
plot(data.101$Date, ET.tot,
type='h', lwd=2,
main='Corn 215 ET from entire soil profile', xlab='', ylab='ET (mm)',
ylim=c(0,20))
# Heatmap ####
library(ggplot2)
library(hrbrthemes)
library(viridis)
x=data.101$Date
y=paste('sensor', 8:1)
ET=c(sensor8$ET, sensor7$ET, sensor6$ET, sensor5$ET, sensor4$ET, sensor3$ET, sensor2$ET, sensor1$ET) #Corn 8 sensors
#ET=c(sensor7$ET, sensor6$ET, sensor5$ET, sensor4$ET, sensor3$ET, sensor2$ET, sensor1$ET) #Alfalfa 7 sensors
ET[ET==0]=NA # turns zeros into NAs
heatmap.data=expand.grid(x=x, y=y)
ggplot(heatmap.data, mapping=aes(x, y, fill=ET),) +
geom_tile() +
labs(x='', y='', title='Corn 215 Daily ET (mm)') +
scale_fill_viridis(direction=-1, na.value='white') +
theme_ipsum()
| /Modena DerivativesCorrect 2021.R | no_license | laurachristi/ET_derivatives_2021 | R | false | false | 13,001 | r | library(readxl)
library(writexl)
library(tidyr)
library(tidyverse)
setwd("~/Modena data") # sets the working directory in the folder containing the data file
data=read_excel("Allen/Soil Moisture/2021/Corn/Modena 4 Corn West July 6.xlsx", sheet="SM Data") # load the data from excel
w=3 # window used to calculate the derivatives in days
### sensor 1/101 ################################################################
data.101=data.frame(Date=data$`Date`, sm1=data$`1 Acclima`,sm2=data$`2 Acclima`, sm3=data$`3 Acclima`, sm4=data$`4 Acclima`, sm5=data$`5 Acclima`, sm6=data$`6 Acclima`, sm7=data$`7 Acclima`, sm8=data$`8 Acclima`)
#data.101=data.frame(Date=data$`Date`, sm1=data$`1 Acclima`,sm2=data$`2 Acclima`, sm3=data$`3 Acclima`, sm4=data$`4 Acclima`, sm5=data$`5 Acclima`, sm6=data$`6 Acclima`, sm7=data$`7 Acclima`)
data.101=na.omit(data.101)
n=length(data.101$Date)
# sensor depths
##Depths for Corn 215, 245, 252
sd1=3*25.4 # sensor 1: from surface to 3 in
sd2=3*25.4 # sensor 2: from surface to 3 in
sd3=6*25.4 # sensor 3: from 3 in to 6 in
sd4=6*25.4 # sensor 4: from 3 in to 6 in
sd5=12*25.4 # sensor 5: from 6 in to 1ft
sd6=24*25.4 # sensor 6: from 1ft to 2ft
sd7=36*25.4 # sensor 7: from 2ft to 3ft
sd8=48*25.4 # sensor 8: from 3ft to 4ft
##Depths for Corn 215, 263
#sd1=3*25.4 # sensor 1: from surface to 3 in
#sd2=6*25.4 # sensor 2: from 6 in to 12 in
#sd3=12*25.4 # sensor 3: from 12 in to 1ft
#sd4=24*25.4 # sensor 4: from 1ft to 2ft
#sd5=36*25.4 # sensor 5: from 2ft to 3ft
#sd6=48*25.4 # sensor 6: from 3ft to 4ft
#sd7=60*25.4 # sensor 7: from 4ft to 5ft
##Depths for Corn 215
#sd1=3*25.4 # sensor 1: from surface to 3 in
#sd2=6*25.4 # sensor 2: from 6 in to 12 in
#sd3=12*25.4 # sensor 3: from 12 in to 1ft
#sd4=24*25.4 # sensor 4: from 1ft to 2ft
#sd5=36*25.4 # sensor 5: from 2ft to 3ft
#sd6=48*25.4 # sensor 6: from 3ft to 4ft
#sd7=60*25.4 # sensor 7: from 4ft to 5ft
# CORN soil moisture (mm)
sm1=data.101$sm1*sd1/100
sm2=data.101$sm2*sd2/100
sm3=data.101$sm3*(sd3-sd2)/100
sm4=data.101$sm4*(sd4-sd2)/100
sm5=data.101$sm5*(sd5-sd4)/100
sm6=data.101$sm6*(sd6-sd5)/100
sm7=data.101$sm7*(sd7-sd6)/100
sm8=data.101$sm8*(sd8-sd7)/100
# ALFALFA soil moisture (mm)
#sm1=data.101$sm1*sd1/100
#sm2=data.101$sm2*(sd2-sd1)/100
#sm3=data.101$sm3*(sd3-sd2)/100
#sm4=data.101$sm4*(sd4-sd3)/100
#sm5=data.101$sm5*(sd5-sd4)/100
#sm6=data.101$sm6*(sd6-sd5)/100
#sm7=data.101$sm7*(sd7-sd6)/100
# sensor depth 1 ####
# 1st derivative - slope of the SM curve
f1=c() # 1st derivative
f1[1]=NA
for (i in 2:n) {
f1[i]=(sm1[i+1]-sm1[i-1])/w
}
plot(data.101$Date, f1,
type='o', pch=19, cex=0.8, lwd=2, col='darkgoldenrod',
main='Corn 215 1st derivative - soil depth 1', xlab='', ylab='f1')
abline(h=0, lwd=2)
# 2nd derivative - calculate the inflection point
f2=c() # 2nd derivative
f2[1]=NA
for (i in 2:n) {
f2[i]=(f1[i+1]-f1[i-1])/w
}
plot(data.101$Date, f2,
type='o', pch=19, cex=0.8, lwd=2, col='brown2',
main='Corn 215 2nd derivative - soil depth 1', xlab='', ylab='f2')
abline(h=0, lwd=2)
sensor1=data.frame(Date=data.101$Date, SM=sm1, f1, f2) # data frame with the results for sensor1
# Estimate ET rate - if f1<0 & f2>0: ET=|f1|
sensor1$ET=c(rep(NA, n)) # create ET column
for (i in 3:(n-3)) {
if (sensor1$f1[i]<0 & sensor1$f2[i]>0) {sensor1$ET[i]=abs(sensor1$f1[i])}
}
plot(sensor1$Date, sensor1$ET,
type='h', lwd=3, col='deepskyblue',
main='Corn 215 ET - soil depth 1', xlab='', ylab='ET (mm)',
ylim=c(0,4))
# sensor depth 2 ####
# 1st derivative - slope of the SM curve
w=3 # window width in days
f1=c() # 1st derivative
f1[1]=NA
for (i in 2:n) {
f1[i]=(sm2[i+1]-sm2[i-1])/w
}
plot(data.101$Date, f1,
type='o', pch=19, cex=0.8, lwd=2, col='darkgoldenrod',
main='Corn 215 1st derivative - soil depth 2', xlab='', ylab='f1')
abline(h=0, lwd=2)
# 2nd derivative - calculate the inflection point
f2=c() # 2nd derivative
f2[1]=NA
for (i in 2:n) {
f2[i]=(f1[i+1]-f1[i-1])/w
}
plot(data.101$Date, f2,
type='o', pch=19, cex=0.8, lwd=2, col='brown2',
main='Corn 215 2nd derivative - soil depth 2', xlab='', ylab='f2')
abline(h=0, lwd=2)
sensor2=data.frame(Date=data.101$Date, SM=sm2, f1, f2) # data frame with the results for sensor2
# Estimate ET rate - if f1<0 & f2>0: ET=|f1|
sensor2$ET=c(rep(NA, n)) # create ET column
for (i in 3:(n-3)) {
if (sensor2$f1[i]<0 & sensor2$f2[i]>0) {sensor2$ET[i]=abs(sensor2$f1[i])}
}
plot(sensor2$Date, sensor2$ET,
type='h', lwd=3, col='deepskyblue',
main='Corn 215 ET from - depth 2', xlab='', ylab='ET (mm)',
ylim=c(0,4))
# sensor depth 3 ####
# 1st derivative - slope of the SM curve
w=3 # window width in days
f1=c() # 1st derivative
f1[1]=NA
for (i in 2:n) {
f1[i]=(sm3[i+1]-sm3[i-1])/w
}
plot(data.101$Date, f1,
type='o', pch=19, cex=0.8, lwd=2, col='darkgoldenrod',
main='Corn 215 1st derivative - soil depth 3', xlab='', ylab='f1')
abline(h=0, lwd=2)
# 2nd derivative - calculate the inflection point
f2=c() # 2nd derivative
f2[1]=NA
for (i in 2:n) {
f2[i]=(f1[i+1]-f1[i-1])/w
}
plot(data.101$Date, f2,
type='o', pch=19, cex=0.8, lwd=2, col='brown2',
main='Corn 215 2nd derivative - soil depth 3', xlab='', ylab='f2')
abline(h=0, lwd=2)
sensor3=data.frame(Date=data.101$Date, SM=sm3, f1, f2) # data frame with the results for sensor3
# Estimate ET rate - if f1<0 & f2>0: ET=|f1|
sensor3$ET=c(rep(NA, n)) # create ET column
for (i in 3:(n-3)) {
if (sensor3$f1[i]<0 & sensor3$f2[i]>0) {sensor3$ET[i]=abs(sensor3$f1[i])}
}
plot(sensor3$Date, sensor3$ET,
type='h', lwd=3, col='deepskyblue',
main='Corn 215 ET - soil depth 3', xlab='', ylab='ET (mm)',
ylim=c(0, 4))
# sensor depth 4 ####
# 1st derivative - slope of the SM curve
w=3 # window width in days
f1=c() # 1st derivative
f1[1]=NA
for (i in 2:n) {
f1[i]=(sm4[i+1]-sm4[i-1])/w
}
plot(data.101$Date, f1,
type='o', pch=19, cex=0.8, lwd=2, col='darkgoldenrod',
main='Corn 215 sm4 slope - soil depth 4', xlab='', ylab='f1')
abline(h=0, lwd=2)
# 2nd derivative - calculate the inflection point
f2=c() # 2nd derivative
f2[1]=NA
for (i in 2:n) {
f2[i]=(f1[i+1]-f1[i-1])/w
}
plot(data.101$Date, f2,
type='o', pch=19, cex=0.8, lwd=2, col='brown2',
main='Corn 215 2nd derivative - soil depth 4', xlab='', ylab='f2')
abline(h=0, lwd=2)
sensor4=data.frame(Date=data.101$Date, SM=sm4, f1, f2) # data frame with the results for sensor4
# Estimate ET rate - if f1<0 & f2>0: ET=|f1|
sensor4$ET=c(rep(NA, n)) # create ET column
for (i in 3:(n-3)) {
if (sensor4$f1[i]<0 & sensor4$f2[i]>0) {sensor4$ET[i]=abs(sensor4$f1[i])}
}
plot(sensor4$Date, sensor4$ET,
type='h', lwd=3, col='deepskyblue',
main='Corn 215 ET - soil depth 4', xlab='', ylab='ET (mm)',
ylim=c(0, 6))
# sensor depth 5 ####
# 1st derivative - slope of the SM curve
w=3 # window width in days
f1=c() # 1st derivative
f1[1]=NA
for (i in 2:n) {
f1[i]=(sm5[i+1]-sm5[i-1])/w
}
plot(data.101$Date, f1,
type='o', pch=19, cex=0.8, lwd=2, col='darkgoldenrod',
main='Corn 215 sm5 slope - soil depth 5', xlab='', ylab='f1')
abline(h=0, lwd=2)
# 2nd derivative - calculate the inflection point
f2=c() # 2nd derivative
f2[1]=NA
for (i in 2:n) {
f2[i]=(f1[i+1]-f1[i-1])/w
}
plot(data.101$Date, f2,
type='o', pch=19, cex=0.8, lwd=2, col='brown2',
main='Corn 215 2nd derivative - soil depth 5', xlab='', ylab='f2')
abline(h=0, lwd=2)
sensor5=data.frame(Date=data.101$Date, SM=sm5, f1, f2) # data frame with the results for sensor4
# Estimate ET rate - if f1<0 & f2>0: ET=|f1|
sensor5$ET=c(rep(NA, n)) # create ET column
for (i in 3:(n-3)) {
if (sensor5$f1[i]<0 & sensor5$f2[i]>0) {sensor5$ET[i]=abs(sensor5$f1[i])}
}
plot(sensor5$Date, sensor5$ET,
type='h', lwd=3, col='deepskyblue',
main='Corn 215 ET - soil depth 5', xlab='', ylab='ET (mm)',
ylim=c(0, 4))
# sensor depth 6 ####
# 1st derivative - slope of the SM curve
w=3 # window width in days
f1=c() # 1st derivative
f1[1]=NA
for (i in 2:n) {
f1[i]=(sm6[i+1]-sm6[i-1])/w
}
plot(data.101$Date, f1,
type='o', pch=19, cex=0.8, lwd=2, col='darkgoldenrod',
main='Corn 215 sm6 slope - soil depth 6', xlab='', ylab='f1')
abline(h=0, lwd=2)
# 2nd derivative - calculate the inflection point
f2=c() # 2nd derivative
f2[1]=NA
for (i in 2:n) {
f2[i]=(f1[i+1]-f1[i-1])/w
}
plot(data.101$Date, f2,
type='o', pch=19, cex=0.8, lwd=2, col='brown2',
main='Corn 215 2nd derivative - soil depth 6', xlab='', ylab='f2')
abline(h=0, lwd=2)
sensor6=data.frame(Date=data.101$Date, SM=sm6, f1, f2) # data frame with the results for sensor4
# Estimate ET rate - if f1<0 & f2>0: ET=|f1|
sensor6$ET=c(rep(NA, n)) # create ET column
for (i in 3:(n-3)) {
if (sensor6$f1[i]<0 & sensor6$f2[i]>0) {sensor6$ET[i]=abs(sensor6$f1[i])}
}
plot(sensor6$Date, sensor6$ET,
type='h', lwd=3, col='deepskyblue',
main='Corn 215 ET - soil depth 6', xlab='', ylab='ET (mm)',
ylim=c(0, 4))
# sensor depth 7 ####
# 1st derivative - slope of the SM curve
w=3 # window width in days
f1=c() # 1st derivative
f1[1]=NA
for (i in 2:n) {
f1[i]=(sm7[i+1]-sm7[i-1])/w
}
plot(data.101$Date, f1,
type='o', pch=19, cex=0.8, lwd=2, col='darkgoldenrod',
main='Corn 215 sm7 slope - soil depth 7', xlab='', ylab='f1')
abline(h=0, lwd=2)
# 2nd derivative - calculate the inflection point
f2=c() # 2nd derivative
f2[1]=NA
for (i in 2:n) {
f2[i]=(f1[i+1]-f1[i-1])/w
}
plot(data.101$Date, f2,
type='o', pch=19, cex=0.8, lwd=2, col='brown2',
main='Corn 215 2nd derivative - soil depth 7', xlab='', ylab='f2')
abline(h=0, lwd=2)
sensor7=data.frame(Date=data.101$Date, SM=sm7, f1, f2) # data frame with the results for sensor4
# Estimate ET rate - if f1<0 & f2>0: ET=|f1|
sensor7$ET=c(rep(NA, n)) # create ET column
for (i in 3:(n-3)) {
if (sensor7$f1[i]<0 & sensor7$f2[i]>0) {sensor7$ET[i]=abs(sensor7$f1[i])}
}
plot(sensor7$Date, sensor7$ET,
type='h', lwd=3, col='deepskyblue',
main='Corn 215 ET - soil depth 7', xlab='', ylab='ET (mm)',
ylim=c(0, 4))
# sensor depth 8 ####
# 1st derivative - slope of the SM curve
w=3 # window width in days
f1=c() # 1st derivative
f1[1]=NA
for (i in 2:n) {
f1[i]=(sm8[i+1]-sm8[i-1])/w
}
plot(data.101$Date, f1,
type='o', pch=19, cex=0.8, lwd=2, col='darkgoldenrod',
main='Corn 215 sm8 slope - soil depth 8', xlab='', ylab='f1')
abline(h=0, lwd=2)
# 2nd derivative - calculate the inflection point
f2=c() # 2nd derivative
f2[1]=NA
for (i in 2:n) {
f2[i]=(f1[i+1]-f1[i-1])/w
}
plot(data.101$Date, f2,
type='o', pch=19, cex=0.8, lwd=2, col='brown2',
main='Corn 215 2nd derivative - soil depth 8', xlab='', ylab='f2')
abline(h=0, lwd=2)
sensor8=data.frame(Date=data.101$Date, SM=sm8, f1, f2) # data frame with the results for sensor4
# Estimate ET rate - if f1<0 & f2>0: ET=|f1|
sensor8$ET=c(rep(NA, n)) # create ET column
for (i in 3:(n-3)) {
if (sensor8$f1[i]<0 & sensor8$f2[i]>0) {sensor8$ET[i]=abs(sensor8$f1[i])}
}
plot(sensor8$Date, sensor8$ET,
type='h', lwd=3, col='deepskyblue',
main='Corn 215 ET - soil depth 8', xlab='', ylab='ET (mm)',
ylim=c(0, 3))
# Total ET from the entire soil profile ####
sensor1$ET[is.na(sensor1$ET)]=0 # replaces NA values with zero
sensor2$ET[is.na(sensor2$ET)]=0 # replaces NA values with zero
sensor3$ET[is.na(sensor3$ET)]=0 # replaces NA values with zero
sensor4$ET[is.na(sensor4$ET)]=0 # replaces NA values with zero
sensor5$ET[is.na(sensor5$ET)]=0 # replaces NA values with zero
sensor6$ET[is.na(sensor6$ET)]=0 # replaces NA values with zero
sensor7$ET[is.na(sensor7$ET)]=0 # replaces NA values with zero
sensor8$ET[is.na(sensor8$ET)]=0 # replaces NA values with zero
ET.tot=sensor1$ET+
sensor2$ET+
sensor3$ET+
sensor4$ET+
sensor5$ET+
sensor6$ET+
sensor7$ET+
sensor8$ET
plot(data.101$Date, ET.tot,
type='h', lwd=2,
main='Corn 215 ET from entire soil profile', xlab='', ylab='ET (mm)',
ylim=c(0,20))
# Heatmap ####
library(ggplot2)
library(hrbrthemes)
library(viridis)
x=data.101$Date
y=paste('sensor', 8:1)
ET=c(sensor8$ET, sensor7$ET, sensor6$ET, sensor5$ET, sensor4$ET, sensor3$ET, sensor2$ET, sensor1$ET) #Corn 8 sensors
#ET=c(sensor7$ET, sensor6$ET, sensor5$ET, sensor4$ET, sensor3$ET, sensor2$ET, sensor1$ET) #Alfalfa 7 sensors
ET[ET==0]=NA # turns zeros into NAs
heatmap.data=expand.grid(x=x, y=y)
ggplot(heatmap.data, mapping=aes(x, y, fill=ET),) +
geom_tile() +
labs(x='', y='', title='Corn 215 Daily ET (mm)') +
scale_fill_viridis(direction=-1, na.value='white') +
theme_ipsum()
|
a <- 2
b <- 2
c <- 2
(mat <- matrix(sample(c(TRUE, FALSE), 12, replace = TRUE), 3))
# 1)
(ifelse(mat == TRUE,1,0))
# 2) but not so good since we lose the structure
as.numeric(mat)
# 3)
mat + 0
###########################################
# 3.3.3 Exercises
# 1)
advr38pkg::sum_every(1:10, 2)
sum_every <- function(x, n) {
l <- length(x)
dim(x) <- c(n, 1/n)
colSums(x)
}
sum_every(1:10, 2)
# 2)
str(iris)
index <- sapply(iris, is.numeric)
iris[index]
sapply(iris[index], mean)
#or do
colMeans(iris[index])
# 3)
mat <- matrix(0, 10, 2); mat[c(5, 8, 9, 12, 15, 16, 17, 19)] <- 1; mat
(decode <- matrix(c(0,NA, 1,2), 2))
x <- mat[1,]
apply(mat, 1, function(x) decode[x[1] + x[2] + 1])
decode[mat + 1]
| /test-script.R | no_license | boztdk/test-git | R | false | false | 726 | r | a <- 2
b <- 2
c <- 2
(mat <- matrix(sample(c(TRUE, FALSE), 12, replace = TRUE), 3))
# 1)
(ifelse(mat == TRUE,1,0))
# 2) but not so good since we lose the structure
as.numeric(mat)
# 3)
mat + 0
###########################################
# 3.3.3 Exercises
# 1)
advr38pkg::sum_every(1:10, 2)
sum_every <- function(x, n) {
l <- length(x)
dim(x) <- c(n, 1/n)
colSums(x)
}
sum_every(1:10, 2)
# 2)
str(iris)
index <- sapply(iris, is.numeric)
iris[index]
sapply(iris[index], mean)
#or do
colMeans(iris[index])
# 3)
mat <- matrix(0, 10, 2); mat[c(5, 8, 9, 12, 15, 16, 17, 19)] <- 1; mat
(decode <- matrix(c(0,NA, 1,2), 2))
x <- mat[1,]
apply(mat, 1, function(x) decode[x[1] + x[2] + 1])
decode[mat + 1]
|
library(agricolae)
### Name: haynes
### Title: Data of AUDPC for nonparametrical stability analysis
### Aliases: haynes
### Keywords: datasets
### ** Examples
library(agricolae)
data(haynes)
str(haynes)
| /data/genthat_extracted_code/agricolae/examples/haynes.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 210 | r | library(agricolae)
### Name: haynes
### Title: Data of AUDPC for nonparametrical stability analysis
### Aliases: haynes
### Keywords: datasets
### ** Examples
library(agricolae)
data(haynes)
str(haynes)
|
#Importing the data into R
library(readr)
US_EPA_data <- read.csv("C:/Users/gmutya048/Downloads/EPA_data.csv")
View(US_EPA_data)
#to remove the spaces in column names
names(US_EPA_data) <- make.names(names(US_EPA_data))
names(US_EPA_data) # to view the column names
# to view number of rows in the data
nrow(US_EPA_data)
#to view number of columns in the dataset
ncol(US_EPA_data)
#TO summarize the data
summary(US_EPA_data)
str(US_EPA_data)
#to view top few rows of data,
head(US_EPA_data[, c(9:11, 17)])
#to view last few rows of data
tail(US_EPA_data[, c(7:11, 17)])
# to view data from any particular column
table(US_EPA_data$Pollutant.Standard)
#to check how many unique States are listed in the daa
select(US_EPA_data, State.Name) %>% unique %>% nrow
#to list distinct state names
unique(US_EPA_data$State.Name)
#filter data based on presence of Completeness.Indicator
library(dplyr)
EPA_Data<-filter(US_EPA_data, Completeness.Indicator == "Y"
& State.Name =='California') %>%
select(Parameter.Code, Parameter.Name, Pollutant.Standard, Method.Name, Datum,Observation.Count,Observation.Percent,State.Name, Arithmetic.Mean, City.Name, County.Name, CBSA.Name)
View(EPA_Data)
#To summarize any one column
summary(EPA_Data$Observation.Count)
summary(EPA_Data$Observation.Percent, seq(0, 1, 0.1))
#Question 1: Which California city is having more pollution levels
ranking <- group_by(EPA_Data, City.Name, County.Name) %>%
summarize(EPA_Data = mean(Observation.Percent)) %>%
as.data.frame %>%
arrange(desc(EPA_Data))
ranking
# Carlsbad has more level of pollutants in air.
head(ranking, 10) # to view top 10 california cities with more pollution levels
tail(ranking, 10) # last 10 california cities with less pollution levels
# no of cities from SFO Oakland area in California
filter(EPA_Data, CBSA.Name == "San Francisco-Oakland-Hayward, CA") %>% nrow
data<-filter(EPA_Data, CBSA.Name == "San Francisco-Oakland-Hayward, CA" & City.Name == "Livermore")
data
| /R-Code/week 3.R | no_license | Gmutyala/ANLY-506-Code_Portfolio | R | false | false | 2,038 | r | #Importing the data into R
library(readr)
US_EPA_data <- read.csv("C:/Users/gmutya048/Downloads/EPA_data.csv")
View(US_EPA_data)
#to remove the spaces in column names
names(US_EPA_data) <- make.names(names(US_EPA_data))
names(US_EPA_data) # to view the column names
# to view number of rows in the data
nrow(US_EPA_data)
#to view number of columns in the dataset
ncol(US_EPA_data)
#TO summarize the data
summary(US_EPA_data)
str(US_EPA_data)
#to view top few rows of data,
head(US_EPA_data[, c(9:11, 17)])
#to view last few rows of data
tail(US_EPA_data[, c(7:11, 17)])
# to view data from any particular column
table(US_EPA_data$Pollutant.Standard)
#to check how many unique States are listed in the daa
select(US_EPA_data, State.Name) %>% unique %>% nrow
#to list distinct state names
unique(US_EPA_data$State.Name)
#filter data based on presence of Completeness.Indicator
library(dplyr)
EPA_Data<-filter(US_EPA_data, Completeness.Indicator == "Y"
& State.Name =='California') %>%
select(Parameter.Code, Parameter.Name, Pollutant.Standard, Method.Name, Datum,Observation.Count,Observation.Percent,State.Name, Arithmetic.Mean, City.Name, County.Name, CBSA.Name)
View(EPA_Data)
#To summarize any one column
summary(EPA_Data$Observation.Count)
summary(EPA_Data$Observation.Percent, seq(0, 1, 0.1))
#Question 1: Which California city is having more pollution levels
ranking <- group_by(EPA_Data, City.Name, County.Name) %>%
summarize(EPA_Data = mean(Observation.Percent)) %>%
as.data.frame %>%
arrange(desc(EPA_Data))
ranking
# Carlsbad has more level of pollutants in air.
head(ranking, 10) # to view top 10 california cities with more pollution levels
tail(ranking, 10) # last 10 california cities with less pollution levels
# no of cities from SFO Oakland area in California
filter(EPA_Data, CBSA.Name == "San Francisco-Oakland-Hayward, CA") %>% nrow
data<-filter(EPA_Data, CBSA.Name == "San Francisco-Oakland-Hayward, CA" & City.Name == "Livermore")
data
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/bench.r
\name{benchplot}
\alias{benchplot}
\title{Benchmark plot creation time.
Broken down into construct, build, render and draw times.}
\usage{
benchplot(x)
}
\arguments{
\item{x}{code to create ggplot2 plot}
}
\description{
Benchmark plot creation time.
Broken down into construct, build, render and draw times.
}
\examples{
benchplot(ggplot(mtcars, aes(mpg, wt)) + geom_point())
benchplot(ggplot(mtcars, aes(mpg, wt)) + geom_point() + facet_grid(. ~ cyl))
}
\keyword{internal}
| /man/benchplot.Rd | no_license | pricky2903/ggplot2 | R | false | false | 569 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/bench.r
\name{benchplot}
\alias{benchplot}
\title{Benchmark plot creation time.
Broken down into construct, build, render and draw times.}
\usage{
benchplot(x)
}
\arguments{
\item{x}{code to create ggplot2 plot}
}
\description{
Benchmark plot creation time.
Broken down into construct, build, render and draw times.
}
\examples{
benchplot(ggplot(mtcars, aes(mpg, wt)) + geom_point())
benchplot(ggplot(mtcars, aes(mpg, wt)) + geom_point() + facet_grid(. ~ cyl))
}
\keyword{internal}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sequenceCheck.R
\name{sequenceCheck}
\alias{sequenceCheck}
\title{Sequence Check Function}
\usage{
sequenceCheck(
sequence,
method = "stop",
outputType = "string",
nonstandardResidues = NA,
suppressAAWarning = FALSE,
suppressOutputMessage = FALSE
)
}
\arguments{
\item{sequence}{amino acid sequence as a single character string,
a vector of single characters, or an AAString object.
It also supports a single character string that specifies
the path to a .fasta or .fa file.}
\item{method}{Required Setting.
\code{method = c("stop", "warn")}. "stop" by default.
"stop" Reports invalid residues as an error and
prevents the function from continuing.
"warn" Reports invalid residues through a warning
Any invalid sequences will be reported as intended.}
\item{outputType}{Required Setting. "string" By default.
\code{outputType = c("string", "vector", "none")}
"string" returns the sequence as a single string of amino acids.
"vector" returns the sequence as a vector of individual characters.
"none" prevents the function from returning a sequence.}
\item{nonstandardResidues}{Optional setting.
Expands the amino acid alphabet.
NA or Character vector required.
Default values are "ACDEFGHIKLMNPQRSTVWY". Additional letters added here.
\code{nonstandardResidues = c("O,U")}
to allow Pyrrolysine (O) and Selenocysteine (U).}
\item{suppressAAWarning}{If using nonstandardResidues,
a warning will be issued.
set \code{nonstandardResidues = T}
to confirm addition of non-standard residues.}
\item{suppressOutputMessage}{Set \code{suppressOutputMessage = T}
to prevent sequence validity message}
}
\value{
A message and sequence are returned.
If \code{suppressOutputMessage = T}, the message is not returned.
If \code{outputType = "None")}, the sequence is not returned.
Otherwise, outputType will determine the format of the returned sequence.
If the sequence contains an error, it will be reported
based on the value of method.
The Sequence will be assigned to the value "Sequence" if sequenceName
is not specified. Otherwise the sequence is assigned to the value of
sequenceName. This allows the sequences to be called by the user.
}
\description{
This is used validate a sequence of amino acids.
It can additionally be used to load an amino acid sequence.
It can also be used to coerce a sequence into a specific format.
}
\examples{
#Amino acid sequences can be character strings
aaString <- "ACDEFGHIKLMNPQRSTVWY"
#Amino acid sequences can also be character vectors
aaVector <- c("A", "C", "D", "E", "F",
"G", "H", "I", "K", "L",
"M", "N", "P", "Q", "R",
"S", "T", "V", "W", "Y")
#Alternatively, .fasta files can also be used by providing
##The path to the file as a character string
\dontrun{
sequenceCheck(aaString)
sequenceCheck(aaVector)
#To allow O and U
sequenceCheck(aaString,
nonstandardResidues = c("O", "U"),
suppressAAWarning = TRUE)
#To turn off output message
sequenceCheck(aaString,
suppressOutputMessage = TRUE)
#To change string to be a vector
sequenceCheck(aaString,
outputType = "vector")
#To not return a sequence but check the input
sequenceCheck(aaVector,
outputType = "none")
}
}
| /man/sequenceCheck.Rd | no_license | alptaciroglu/idpr | R | false | true | 3,312 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sequenceCheck.R
\name{sequenceCheck}
\alias{sequenceCheck}
\title{Sequence Check Function}
\usage{
sequenceCheck(
sequence,
method = "stop",
outputType = "string",
nonstandardResidues = NA,
suppressAAWarning = FALSE,
suppressOutputMessage = FALSE
)
}
\arguments{
\item{sequence}{amino acid sequence as a single character string,
a vector of single characters, or an AAString object.
It also supports a single character string that specifies
the path to a .fasta or .fa file.}
\item{method}{Required Setting.
\code{method = c("stop", "warn")}. "stop" by default.
"stop" Reports invalid residues as an error and
prevents the function from continuing.
"warn" Reports invalid residues through a warning
Any invalid sequences will be reported as intended.}
\item{outputType}{Required Setting. "string" By default.
\code{outputType = c("string", "vector", "none")}
"string" returns the sequence as a single string of amino acids.
"vector" returns the sequence as a vector of individual characters.
"none" prevents the function from returning a sequence.}
\item{nonstandardResidues}{Optional setting.
Expands the amino acid alphabet.
NA or Character vector required.
Default values are "ACDEFGHIKLMNPQRSTVWY". Additional letters added here.
\code{nonstandardResidues = c("O,U")}
to allow Pyrrolysine (O) and Selenocysteine (U).}
\item{suppressAAWarning}{If using nonstandardResidues,
a warning will be issued.
set \code{nonstandardResidues = T}
to confirm addition of non-standard residues.}
\item{suppressOutputMessage}{Set \code{suppressOutputMessage = T}
to prevent sequence validity message}
}
\value{
A message and sequence are returned.
If \code{suppressOutputMessage = T}, the message is not returned.
If \code{outputType = "None")}, the sequence is not returned.
Otherwise, outputType will determine the format of the returned sequence.
If the sequence contains an error, it will be reported
based on the value of method.
The Sequence will be assigned to the value "Sequence" if sequenceName
is not specified. Otherwise the sequence is assigned to the value of
sequenceName. This allows the sequences to be called by the user.
}
\description{
This is used validate a sequence of amino acids.
It can additionally be used to load an amino acid sequence.
It can also be used to coerce a sequence into a specific format.
}
\examples{
#Amino acid sequences can be character strings
aaString <- "ACDEFGHIKLMNPQRSTVWY"
#Amino acid sequences can also be character vectors
aaVector <- c("A", "C", "D", "E", "F",
"G", "H", "I", "K", "L",
"M", "N", "P", "Q", "R",
"S", "T", "V", "W", "Y")
#Alternatively, .fasta files can also be used by providing
##The path to the file as a character string
\dontrun{
sequenceCheck(aaString)
sequenceCheck(aaVector)
#To allow O and U
sequenceCheck(aaString,
nonstandardResidues = c("O", "U"),
suppressAAWarning = TRUE)
#To turn off output message
sequenceCheck(aaString,
suppressOutputMessage = TRUE)
#To change string to be a vector
sequenceCheck(aaString,
outputType = "vector")
#To not return a sequence but check the input
sequenceCheck(aaVector,
outputType = "none")
}
}
|
# Copyright (C) 2020 Hocine Bendou <hocine@sanbi.ac.za>
# Abdulazeez Giwa <3901476@myuwc.ac.za>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>
library(ABC.RAP)
data("nonspecific_probes")
data("annotation_file")
x <- read.csv("RMvsCNM-update.csv")
process.ABC.RAP(x, 1, 18, 19, 43, 1e-7, 0.4, -0.4, 0.8, 0.2)
| /R/RMAvCMN/methylation.R | no_license | SANBI-SA/NBMethyl | R | false | false | 917 | r | # Copyright (C) 2020 Hocine Bendou <hocine@sanbi.ac.za>
# Abdulazeez Giwa <3901476@myuwc.ac.za>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>
library(ABC.RAP)
data("nonspecific_probes")
data("annotation_file")
x <- read.csv("RMvsCNM-update.csv")
process.ABC.RAP(x, 1, 18, 19, 43, 1e-7, 0.4, -0.4, 0.8, 0.2)
|
library(metR)
### Name: WrapCircular
### Title: Wrap periodic data to any range
### Aliases: WrapCircular RepeatCircular
### ** Examples
library(ggplot2)
library(data.table)
data(geopotential)
g <- ggplot(geopotential[date == date[1]], aes(lon, lat)) +
geom_contour(aes(z = gh)) +
coord_polar() +
ylim(c(-90, -10))
# This plot has problems in lon = 0
g
# But using WrapCircular solves it.
g %+% WrapCircular(geopotential[date == date[1]], "lon", c(0, 360))
# Aditionally data can be just repeatet to the right and
# left
ggplot(WrapCircular(geopotential[date == date[1]], wrap = c(-180, 360 + 180)),
aes(lon, lat)) +
geom_contour(aes(z = gh))
# The same behaviour is now implemented directly in geom_contour2
# and geom_contour_fill
ggplot(geopotential[date == date[1]], aes(lon, lat)) +
geom_contour2(aes(z = gh), xwrap = c(-180, 360 + 180))
| /data/genthat_extracted_code/metR/examples/WrapCircular.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 882 | r | library(metR)
### Name: WrapCircular
### Title: Wrap periodic data to any range
### Aliases: WrapCircular RepeatCircular
### ** Examples
library(ggplot2)
library(data.table)
data(geopotential)
g <- ggplot(geopotential[date == date[1]], aes(lon, lat)) +
geom_contour(aes(z = gh)) +
coord_polar() +
ylim(c(-90, -10))
# This plot has problems in lon = 0
g
# But using WrapCircular solves it.
g %+% WrapCircular(geopotential[date == date[1]], "lon", c(0, 360))
# Aditionally data can be just repeatet to the right and
# left
ggplot(WrapCircular(geopotential[date == date[1]], wrap = c(-180, 360 + 180)),
aes(lon, lat)) +
geom_contour(aes(z = gh))
# The same behaviour is now implemented directly in geom_contour2
# and geom_contour_fill
ggplot(geopotential[date == date[1]], aes(lon, lat)) +
geom_contour2(aes(z = gh), xwrap = c(-180, 360 + 180))
|
#安装包#
install.packages("abind")
install.packages("magrittr")
install.packages("plyr")
install.packages("Rcpp")
install.packages("reshape2")
install.packages("Rserve")
install.packages("stringi")
install.packages("stringr")
install.packages("DBI")
install.packages("splitstackshape")
install.packages("data.table")
install.packages("RMySQL")
install.packages("tmcn") | /R/rscript/package_install.R | no_license | ming19871211/docker-tools | R | false | false | 371 | r | #安装包#
install.packages("abind")
install.packages("magrittr")
install.packages("plyr")
install.packages("Rcpp")
install.packages("reshape2")
install.packages("Rserve")
install.packages("stringi")
install.packages("stringr")
install.packages("DBI")
install.packages("splitstackshape")
install.packages("data.table")
install.packages("RMySQL")
install.packages("tmcn") |
\name{get_notifications}
\alias{get_notifications}
\title{Get notifications}
\description{Get a specific notification or all notifications}
\usage{
get_notifications(id = NULL, only_new = FALSE, ...)
}
\arguments{
\item{id}{A conversation ID. If \code{NULL}, all notifications (or all new notifications, depending on \code{only_new}) are returned.}
\item{only_new}{A logical indicating whether only new notifications should be returned. Default is \code{FALSE}.}
\item{...}{Other arguments passed to HTTP request functions, for example: \code{token} (an OAuth2.0 token), which is required.}
}
\details{Retrieves a named notification, possibly returned by \code{\link{get_notifications}}, or all (new) notifications.}
\value{An object of class \dQuote{imgur_notification}.}
%\references{}
\author{Thomas J. Leeper}
\seealso{
\code{\link{mark_notification}}
}
\examples{
\dontrun{
tkn <- imgur_login()
get_notifications(only_new = TRUE, token = tkn)
}
}
| /man/get_notifications.Rd | no_license | Zedseayou/imguR | R | false | false | 959 | rd | \name{get_notifications}
\alias{get_notifications}
\title{Get notifications}
\description{Get a specific notification or all notifications}
\usage{
get_notifications(id = NULL, only_new = FALSE, ...)
}
\arguments{
\item{id}{A conversation ID. If \code{NULL}, all notifications (or all new notifications, depending on \code{only_new}) are returned.}
\item{only_new}{A logical indicating whether only new notifications should be returned. Default is \code{FALSE}.}
\item{...}{Other arguments passed to HTTP request functions, for example: \code{token} (an OAuth2.0 token), which is required.}
}
\details{Retrieves a named notification, possibly returned by \code{\link{get_notifications}}, or all (new) notifications.}
\value{An object of class \dQuote{imgur_notification}.}
%\references{}
\author{Thomas J. Leeper}
\seealso{
\code{\link{mark_notification}}
}
\examples{
\dontrun{
tkn <- imgur_login()
get_notifications(only_new = TRUE, token = tkn)
}
}
|
# read pest tpl file and return as a data.frame for hbv paramters
read_pest_tpl <- function(tpl_fil) {
#tpl_fil <- "soil_parameters.tpl"
tpl_str <- readLines(tpl_fil, n = -1)
par_str <- unlist(strsplit(tpl_str[-1], " "))
loc_sep <- which(par_str == "#")
if ((length(loc_sep)%% 2) != 0) stop("error in tpl file")
par_names <- NULL
for (ipar in seq(1, length(loc_sep), 2)) {
par_names <- c(par_names, par_str[loc_sep[ipar]+1])
}
return(par_names)
}
| /R/read_pest_tpl.R | permissive | NVE/hongR | R | false | false | 451 | r | # read pest tpl file and return as a data.frame for hbv paramters
read_pest_tpl <- function(tpl_fil) {
#tpl_fil <- "soil_parameters.tpl"
tpl_str <- readLines(tpl_fil, n = -1)
par_str <- unlist(strsplit(tpl_str[-1], " "))
loc_sep <- which(par_str == "#")
if ((length(loc_sep)%% 2) != 0) stop("error in tpl file")
par_names <- NULL
for (ipar in seq(1, length(loc_sep), 2)) {
par_names <- c(par_names, par_str[loc_sep[ipar]+1])
}
return(par_names)
}
|
#-- Read the data for days 2007-02-01 and 2007-02-02
#-- Read data up to 2007-02-02
data.df <- read.table(file="household_power_consumption.txt",header=T,sep=";",
nrows=70000,
na.strings="?",stringsAsFactors=F,
colClasses=c("character","character",rep("numeric",7)),)
#-- Select data for days 2007-02-01 and 2007-02-02
ind <- which(data.df$Date %in% c("1/2/2007","2/2/2007"))
#-- Add one extra entry for the day "3/2/2007" at time 00:00:00
ind <- c(ind,ind[length(ind)]+1)
data.df <- data.df[ind,]
data.df$Date <- as.Date(x=data.df$Date,format="%d/%m/%Y")
head(data.df)
sapply(X=data.df,class)
# Construct Plot4 (no axis labels) --------------------------------------------------
# Construct xaxis labels ----#
ind_day2 <- which(data.df$Date=="2007-02-02")[1] #- first row with date =="2007-02-02"
xtick_pos.vec <- c(1,ind_day2,length(data.df$Date))
xlabel.vec <- weekdays(data.df$Date[xtick_pos.vec],abbreviate=T)
par(mfrow=c(2,2))
#-- Plot upper left
plot(data.df$Global_active_power,type="l",xaxt="n",xlab="",col='black',cex.axis=0.9,cex.lab=0.9,
ylab="Global Active Power")
axis(side=1,at=xtick_pos.vec,labels=xlabel.vec,cex.axis=0.9,cex.lab=0.9)
#-- Plot upper right
plot(data.df$Voltage,type="l",xaxt="n",xlab="datetime",cex.axis=0.9,cex.lab=0.9,ylab="Voltage")
axis(side=1,at=xtick_pos.vec,labels=xlabel.vec,cex.axis=0.9,cex.lab=0.9)
#-- Plot lower left
plot(data.df$Sub_metering_1,type="l",xaxt="n",xlab="",col='black',cex.axis=0.9,cex.lab=0.9, ylab="Energy sub metering")
lines(data.df$Sub_metering_2,type="l",xaxt="n",xlab="",col='red',cex.axis=0.9)
lines(data.df$Sub_metering_3,type="l",xaxt="n",xlab="",col='blue',cex.axis=0.9)
axis(side=1,at=xtick_pos.vec,labels=xlabel.vec,cex.axis=0.9,cex.lab=0.9)
legend("topright",legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),lty=1,
col=c("black","red","blue"),cex=0.7,bty="n")
#-- Plot lower right
plot(data.df$Global_reactive_power,type="l",xaxt="n",xlab="datetime",cex.axis=0.9,cex.lab=0.9, ylab="Global_reactive_power")
axis(side=1,at=xtick_pos.vec,labels=xlabel.vec,cex.axis=0.9,cex.lab=0.9)
par(mfrow=c(1,1))
# Save plot to plot4.png
dev.copy(device=png,file="plot4.png",width=480,height=480)
dev.off()
| /plot4.R | no_license | tvaidis/explore-anal-project01 | R | false | false | 2,282 | r | #-- Read the data for days 2007-02-01 and 2007-02-02
#-- Read data up to 2007-02-02
data.df <- read.table(file="household_power_consumption.txt",header=T,sep=";",
nrows=70000,
na.strings="?",stringsAsFactors=F,
colClasses=c("character","character",rep("numeric",7)),)
#-- Select data for days 2007-02-01 and 2007-02-02
ind <- which(data.df$Date %in% c("1/2/2007","2/2/2007"))
#-- Add one extra entry for the day "3/2/2007" at time 00:00:00
ind <- c(ind,ind[length(ind)]+1)
data.df <- data.df[ind,]
data.df$Date <- as.Date(x=data.df$Date,format="%d/%m/%Y")
head(data.df)
sapply(X=data.df,class)
# Construct Plot4 (no axis labels) --------------------------------------------------
# Construct xaxis labels ----#
ind_day2 <- which(data.df$Date=="2007-02-02")[1] #- first row with date =="2007-02-02"
xtick_pos.vec <- c(1,ind_day2,length(data.df$Date))
xlabel.vec <- weekdays(data.df$Date[xtick_pos.vec],abbreviate=T)
par(mfrow=c(2,2))
#-- Plot upper left
plot(data.df$Global_active_power,type="l",xaxt="n",xlab="",col='black',cex.axis=0.9,cex.lab=0.9,
ylab="Global Active Power")
axis(side=1,at=xtick_pos.vec,labels=xlabel.vec,cex.axis=0.9,cex.lab=0.9)
#-- Plot upper right
plot(data.df$Voltage,type="l",xaxt="n",xlab="datetime",cex.axis=0.9,cex.lab=0.9,ylab="Voltage")
axis(side=1,at=xtick_pos.vec,labels=xlabel.vec,cex.axis=0.9,cex.lab=0.9)
#-- Plot lower left
plot(data.df$Sub_metering_1,type="l",xaxt="n",xlab="",col='black',cex.axis=0.9,cex.lab=0.9, ylab="Energy sub metering")
lines(data.df$Sub_metering_2,type="l",xaxt="n",xlab="",col='red',cex.axis=0.9)
lines(data.df$Sub_metering_3,type="l",xaxt="n",xlab="",col='blue',cex.axis=0.9)
axis(side=1,at=xtick_pos.vec,labels=xlabel.vec,cex.axis=0.9,cex.lab=0.9)
legend("topright",legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),lty=1,
col=c("black","red","blue"),cex=0.7,bty="n")
#-- Plot lower right
plot(data.df$Global_reactive_power,type="l",xaxt="n",xlab="datetime",cex.axis=0.9,cex.lab=0.9, ylab="Global_reactive_power")
axis(side=1,at=xtick_pos.vec,labels=xlabel.vec,cex.axis=0.9,cex.lab=0.9)
par(mfrow=c(1,1))
# Save plot to plot4.png
dev.copy(device=png,file="plot4.png",width=480,height=480)
dev.off()
|
library(GenomicAlignments)
param.table = read.table("parameters.txt", header=T, sep="\t")
output.folder = as.character(param.table$Value[param.table$Parameter == "Raw_Code_PC"])
alignment.folder = as.character(param.table$Value[param.table$Parameter == "Alignment_Folder_PC"])
genome=as.character(param.table$Value[param.table$Parameter == "genome"])
htseq.anno.folder = as.character(param.table$Value[param.table$Parameter == "HTseq_input_folder"])
total.reads.file = as.character(param.table$Value[param.table$Parameter == "total_counts_file"])
aligned.stats.file = as.character(param.table$Value[param.table$Parameter == "aligned_stats_file"])
setwd(output.folder)
length.file = paste(htseq.anno.folder,"\\",genome,"_chr_length.txt",sep="")
full.annotation.file = paste(htseq.anno.folder,"\\TxDb_",genome,"_exon_annotations.txt",sep="")
length.table = read.table(length.file, header=T, sep="\t")
chr_length = as.numeric(length.table$Length)
names(chr_length) = as.character(length.table$Chr)
total.reads.table = read.table(total.reads.file, header=T, sep="\t")
sampleIDs = as.character(total.reads.table$Sample)
exon.info = read.table(full.annotation.file, header=T, sep="\t")
#remove non-canonical chromosomes
nonCanonical <- grep("_", exon.info$chr)
if (length(nonCanonical) > 0) {
exon.info = exon.info[-nonCanonical, ]
}
chromosomes = as.character(levels(as.factor(as.character(exon.info$chr))))
aligned.reads = rep(0, times=length(sampleIDs))
exonic.reads = rep(0, times = length(sampleIDs))
bam.files = list.files(alignment.folder, pattern=".bam$")
if(length(grep("sort.bam",bam.files)) > 0){
bam.files = bam.files[-grep("sort.bam",bam.files)]
}
sampleIDs = sub(".bam$","",bam.files)
for(i in 1:length(bam.files)){
inputfile = paste(alignment.folder, bam.files[i], sep="/")
print(inputfile)
exon_reads <- list()
total_reads <- list()
for(chr in chromosomes){
print(chr)
data <- readGAlignments(file = inputfile, use.names = TRUE,
param = ScanBamParam(which = GRanges(chr, IRanges(1, chr_length[chr]))))
total_reads[[chr]] <- names(data)
}#end for(chr in chromosomes)
aligned.reads[i] = length(unique(unlist(total_reads)))
print(aligned.reads)
}#end for(i in 1:length(bam.files))
stat.table = data.frame(Sample = sampleIDs, aligned.reads = aligned.reads)
write.table(stat.table, aligned.stats.file, row.names=F, sep="\t", quote=F)
print(warnings())
| /TopHat_Workflow/exonic_read_counts.R | no_license | luyang-coh/RNAseq_templates | R | false | false | 2,431 | r | library(GenomicAlignments)
param.table = read.table("parameters.txt", header=T, sep="\t")
output.folder = as.character(param.table$Value[param.table$Parameter == "Raw_Code_PC"])
alignment.folder = as.character(param.table$Value[param.table$Parameter == "Alignment_Folder_PC"])
genome=as.character(param.table$Value[param.table$Parameter == "genome"])
htseq.anno.folder = as.character(param.table$Value[param.table$Parameter == "HTseq_input_folder"])
total.reads.file = as.character(param.table$Value[param.table$Parameter == "total_counts_file"])
aligned.stats.file = as.character(param.table$Value[param.table$Parameter == "aligned_stats_file"])
setwd(output.folder)
length.file = paste(htseq.anno.folder,"\\",genome,"_chr_length.txt",sep="")
full.annotation.file = paste(htseq.anno.folder,"\\TxDb_",genome,"_exon_annotations.txt",sep="")
length.table = read.table(length.file, header=T, sep="\t")
chr_length = as.numeric(length.table$Length)
names(chr_length) = as.character(length.table$Chr)
total.reads.table = read.table(total.reads.file, header=T, sep="\t")
sampleIDs = as.character(total.reads.table$Sample)
exon.info = read.table(full.annotation.file, header=T, sep="\t")
#remove non-canonical chromosomes
nonCanonical <- grep("_", exon.info$chr)
if (length(nonCanonical) > 0) {
exon.info = exon.info[-nonCanonical, ]
}
chromosomes = as.character(levels(as.factor(as.character(exon.info$chr))))
aligned.reads = rep(0, times=length(sampleIDs))
exonic.reads = rep(0, times = length(sampleIDs))
bam.files = list.files(alignment.folder, pattern=".bam$")
if(length(grep("sort.bam",bam.files)) > 0){
bam.files = bam.files[-grep("sort.bam",bam.files)]
}
sampleIDs = sub(".bam$","",bam.files)
for(i in 1:length(bam.files)){
inputfile = paste(alignment.folder, bam.files[i], sep="/")
print(inputfile)
exon_reads <- list()
total_reads <- list()
for(chr in chromosomes){
print(chr)
data <- readGAlignments(file = inputfile, use.names = TRUE,
param = ScanBamParam(which = GRanges(chr, IRanges(1, chr_length[chr]))))
total_reads[[chr]] <- names(data)
}#end for(chr in chromosomes)
aligned.reads[i] = length(unique(unlist(total_reads)))
print(aligned.reads)
}#end for(i in 1:length(bam.files))
stat.table = data.frame(Sample = sampleIDs, aligned.reads = aligned.reads)
write.table(stat.table, aligned.stats.file, row.names=F, sep="\t", quote=F)
print(warnings())
|
#' Summary statistics of interactions for a given feature set
#'
#' This function will calculate summary statistics for each element in the
#' given feature set, including the number of interactions (the sum of all
#' interaction counts), number of unique interactions and number of trans-
#' (interchromosomal) interations. It also returns some statistics for the
#' distances of interactions for all interactions of the feature, and for the
#' different interaction types e.g. promoter-distal.
#'
#' @param GIObject An annotated GInteractions object
#' @param features A GRanges object containing the feature set
#' @param feature.name The name of the feature set
#' @param distance.method Method for calculating distances between anchors, see
#' ?calculateDistances
#' @param annotate.self Logical. Indicates whether to annotate self interactions,
#' i.e. where a feature in `features` overlaps both anchors of an interaction.
#' Default: FALSE.
#'
#' @return A data frame with one line for each range in `features'
#' @rdname summariseByFeatures
#' @docType methods
#' @import GenomicRanges
#' @importFrom stats median
#' @export
#' @examples
#' data('hic_example_data')
#' data('mm9_refseq_promoters')
#' annotateInteractions(hic_example_data, list(promoter = mm9_refseq_promoters))
#' summariseByFeatures(hic_example_data, mm9_refseq_promoters[1:10], 'promoter')
setGeneric("summariseByFeatures", function(GIObject, features, feature.name, distance.method = "midpoint", annotate.self = FALSE) {
standardGeneric("summariseByFeatures")
})
#' @rdname summariseByFeatures
#' @export
setMethod("summariseByFeatures", "GInteractions", function(GIObject, features, feature.name, distance.method = "midpoint", annotate.self = FALSE) {
if (!("node.class" %in% names(elementMetadata(regions(GIObject))))) {
stop("GIObject has not been annotated")
}
potential.node.classes <- unique(GIObject@regions$node.class)
feature.names.full <- .get_gr_names(features)
feature.names <- unique(feature.names.full)
summary.df <- data.frame(matrix(0, ncol = (5 + (length(potential.node.classes) * 2) + ifelse(annotate.self, (length(potential.node.classes) -
1) * 2, 0) + 5), nrow = length(feature.names)))
summary.names <- c(paste(capitalize(feature.name), "id", sep = "."), paste("numberOf", capitalize(feature.name), "Interactions", sep = ""),
paste("numberOf", capitalize(feature.name), "UniqueInteractions", sep = ""), paste("numberOf", capitalize(feature.name), "InterChromosomalInteractions",
sep = ""), paste("numberOf", capitalize(feature.name), "UniqueInterChromosomalInteractions", sep = ""), paste("numberOf",
capitalize(feature.name), capitalize(potential.node.classes), "Interactions", sep = ""), paste("numberOfUnique", capitalize(feature.name),
capitalize(potential.node.classes), "Interactions", sep = ""), paste(capitalize(feature.name), "DistanceMedian", sep = ""),
paste(capitalize(feature.name), "DistanceMean", sep = ""), paste(capitalize(feature.name), "DistanceMinimum", sep = ""), paste(capitalize(feature.name),
"DistanceMaximum", sep = ""), paste(capitalize(feature.name), "DistanceWeightedMedian", sep = ""))
if (annotate.self) {
pc <- potential.node.classes[-which(potential.node.classes == "distal")]
summary.names <- append(summary.names, c(paste("numberOfSelf", capitalize(feature.name), capitalize(pc), "Interactions", sep = ""),
paste("numberOfSelfUnique", capitalize(feature.name), capitalize(pc), "Interactions", sep = "")))
}
names(summary.df) <- summary.names
summary.df[, paste(capitalize(feature.name), "id", sep = ".")] <- feature.names
# hack
anchor_one <- anchorOne(GIObject)
anchor_two <- anchorTwo(GIObject)
one.ol <- findOverlaps(features, anchor_one)
two.ol <- findOverlaps(features, anchor_two)
one.indexes <- queryHits(one.ol)[anchor_one[subjectHits(one.ol)]$node.class == feature.name]
two.indexes <- queryHits(two.ol)[anchor_two[subjectHits(two.ol)]$node.class == feature.name]
features.with.interactions.indexes <- unique(c(one.indexes, two.indexes))
features.with.interactions.indexes <- features.with.interactions.indexes[order(features.with.interactions.indexes)]
feature.names.with.interactions <- feature.names.full[features.with.interactions.indexes]
anchor_one.df <- data.frame(seqnames = as.character(seqnames(anchor_one)), start = start(anchor_one), end = end(anchor_one), width = width(anchor_one),
strand = as.character(strand(anchor_one)), stringsAsFactors = FALSE)
anchor_two.df <- data.frame(seqnames = as.character(seqnames(anchor_two)), start = start(anchor_two), end = end(anchor_two), width = width(anchor_two),
strand = as.character(strand(anchor_two)), stringsAsFactors = FALSE)
# for(i in features.with.interactions.indexes){
for (fn in unique(feature.names.with.interactions)) {
# print(fn)
i <- which(summary.df[, paste(capitalize(feature.name), "id", sep = ".")] == fn)
iss <- which(feature.names.full == fn)
# print(i) if(length(iss)>1){ print(fn) print(i) print(iss) }
interactions <- unique(c(subjectHits(one.ol[queryHits(one.ol) %in% iss]), subjectHits(two.ol[queryHits(two.ol) %in% iss])))
interactions.one <- unique(subjectHits(one.ol[queryHits(one.ol) %in% iss]))
interactions.two <- unique(subjectHits(two.ol[queryHits(two.ol) %in% iss]))
numberOfInteractions <- sum(interactionCounts(GIObject)[interactions])
numberOfUniqueInteractions <- length(interactions)
summary.df[i, paste("numberOf", capitalize(feature.name), "Interactions", sep = "")] <- numberOfInteractions
summary.df[i, paste("numberOf", capitalize(feature.name), "UniqueInteractions", sep = "")] <- numberOfUniqueInteractions
intercis <- interactions[as.character(seqnames(anchor_one[interactions])) != as.character(seqnames(anchor_two[interactions]))]
if (length(intercis) > 0) {
numberOfInterChromosomalInteractions <- sum(interactionCounts(GIObject)[intercis])
summary.df[i, paste("numberOf", capitalize(feature.name), "InterChromosomalInteractions", sep = "")] <- numberOfInterChromosomalInteractions
numberOfUniqueInterChromosomalInteractions <- length(intercis)
summary.df[i, paste("numberOf", capitalize(feature.name), "UniqueInterChromosomalInteractions", sep = "")] <- numberOfUniqueInterChromosomalInteractions
}
for (nc in potential.node.classes) {
nc1Counts <- 0
nc1Indexes <- c()
nc2Counts <- 0
nc2Indexes <- c()
if (length(interactions.one) > 0) {
nc1Indexes <- interactions.one[which(anchor_one$node.class[interactions.one] == feature.name & anchor_two$node.class[interactions.one] ==
nc)]
nc1Counts <- sum(interactionCounts(GIObject)[nc1Indexes])
}
if (length(interactions.two) > 0) {
nc2Indexes <- interactions.two[which(anchor_one$node.class[interactions.two] == nc & anchor_two$node.class[interactions.two] ==
feature.name)]
if (nc == feature.name) {
if (length(nc2Indexes[(nc2Indexes %in% nc1Indexes)]) > 0) {
summary.df[i, paste("numberOfSelf", capitalize(feature.name), capitalize(nc), "Interactions", sep = "")] <- sum(interactionCounts(GIObject)[nc2Indexes[(nc2Indexes %in%
nc1Indexes)]])
summary.df[i, paste("numberOfSelfUnique", capitalize(feature.name), capitalize(nc), "Interactions", sep = "")] <- length(nc2Indexes[(nc2Indexes %in%
nc1Indexes)])
}
nc2Indexes <- nc2Indexes[!(nc2Indexes %in% nc1Indexes)] # stop double counting of interactions where both anchors are in the same feature
}
nc2Counts <- sum(interactionCounts(GIObject)[nc2Indexes])
}
if ((nc1Counts > 0) | (nc2Counts > 0)) {
numberOfNCInteractions <- nc1Counts + nc2Counts
summary.df[i, paste("numberOf", capitalize(feature.name), capitalize(nc), "Interactions", sep = "")] <- numberOfNCInteractions
numberOfUniqueNCInteractions <- length(c(nc1Indexes, nc2Indexes))
summary.df[i, paste("numberOfUnique", capitalize(feature.name), capitalize(nc), "Interactions", sep = "")] <- numberOfUniqueNCInteractions
}
}
# annotate.self .. ie id is the same id at both ends for different classes
if (annotate.self) {
for (nc in potential.node.classes) {
if (nc != feature.name & nc != "distal") {
ncs1Counts <- 0
ncs1Indexes <- c()
ncs2Counts <- 0
ncs2Indexes <- c()
if (length(interactions.one) > 0) {
ncs1Indexes <- interactions.one[which(anchor_one$node.class[interactions.one] == feature.name & anchor_two$node.class[interactions.one] ==
nc)]
ncs1Indexes <- names(features)[i] %in% elementMetadata(anchor_two)[[paste(nc, "id", sep = ".")]][ncs1Indexes]
ncs1Counts <- sum(interactionCounts(GIObject)[ncs1Indexes])
}
if (length(interactions.two) > 0) {
nc2sIndexes <- interactions.two[which(anchor_one$node.class[interactions.two] == nc & anchor_two$node.class[interactions.two] ==
feature.name)]
ncs2Indexes <- names(features)[i] %in% elementMetadata(anchor_one)[[paste(nc, "id", sep = ".")]][ncs2Indexes]
ncs2Counts <- sum(interactionCounts(GIObject)[ncs2Indexes])
}
if ((ncs1Counts > 0) | (ncs2Counts > 0)) {
numberOfNCSInteractions <- ncs1Counts + ncs2Counts
summary.df[i, paste("numberOfSelf", capitalize(feature.name), capitalize(nc), "Interactions", sep = "")] <- numberOfNCInteractions
numberOfUniqueNCSInteractions <- length(c(ncs1Indexes, ncs2Indexes))
summary.df[i, paste("numberOfSelfUnique", capitalize(feature.name), capitalize(nc), "Interactions", sep = "")] <- numberOfUniqueNCInteractions
}
}
}
}
distances <- .calculateDistances.df(anchor_one.df[interactions, ], anchor_two.df[interactions, ], distance.method)
dis <- distances[!is.na(distances)]
wdis <- rep(distances, interactionCounts(GIObject)[interactions])
wdis <- wdis[!is.na(wdis)]
median.distance <- median(dis)
median.distance <- ifelse(is.infinite(median.distance) | is.nan(median.distance), NA, median.distance)
mean.distance <- median(dis)
mean.distance <- ifelse(is.infinite(mean.distance) | is.nan(mean.distance), NA, mean.distance)
min.distance <- min(dis)
min.distance <- ifelse(is.infinite(min.distance) | is.nan(min.distance), NA, min.distance)
max.distance <- max(dis)
max.distance <- ifelse(is.infinite(max.distance) | is.nan(max.distance), NA, max.distance)
wmedian.distance <- median(wdis)
wmedian.distance <- ifelse(is.infinite(wmedian.distance) | is.nan(wmedian.distance), NA, wmedian.distance)
summary.df[i, paste(capitalize(feature.name), "DistanceMedian", sep = "")] <- median.distance
summary.df[i, paste(capitalize(feature.name), "DistanceMean", sep = "")] <- mean.distance
summary.df[i, paste(capitalize(feature.name), "DistanceMinimum", sep = "")] <- min.distance
summary.df[i, paste(capitalize(feature.name), "DistanceMaximum", sep = "")] <- max.distance
summary.df[i, paste(capitalize(feature.name), "DistanceWeightedMedian", sep = "")] <- wmedian.distance
}
return(summary.df)
})
#' Summarise the number of interactions between two sets of features.
#'
#' This function will calculate the number of observed interactions between
#' two sets of features provided by the end-user. This allows the summarisation
#' of the number of features of a specific type a particular region is involved in
#' and how many interactions exist between them.
#'
#' @param GIObject An annotated GInteractions object
#' @param features.one A GRanges object containing the feature set of interest
#' @param feature.name.one The name of the first feature set of interest
#' @param features.two A GRanges object containing the second feature set of interest
#' @param feature.name.two The name of the second feature set of interest
#'
#' @return A data frame with one line for each range in `features'
#' @rdname summariseByFeaturePairs
#' @docType methods
#' @import GenomicRanges
#' @export
#' @examples
#' data('hic_example_data')
#' data('mm9_refseq_promoters')
#' data('thymus_enhancers')
#' annotateInteractions(hic_example_data, list(promoter = mm9_refseq_promoters, enhancer = thymus_enh))
#' # can be slow so subset of features used for examples
#' p <- unique(unlist(head(regions(hic_example_data)$promoter.id)))
#' e <- unique(unlist(head(regions(hic_example_data)$enhancer.id)))
#' p <- p[!is.na(p)]
#' p <- mm9_refseq_promoters[p]
#' e <- e[!is.na(e)]
#' e <- thymus_enh[e]
#' ep_summary <- summariseByFeaturePairs(hic_example_data, p, 'promoter', e, 'enhancer')
setGeneric("summariseByFeaturePairs", function(GIObject, features.one, feature.name.one, features.two, feature.name.two) {
standardGeneric("summariseByFeaturePairs")
})
#' @rdname summariseByFeaturePairs
#' @export
setMethod("summariseByFeaturePairs", "GInteractions", function(GIObject, features.one, feature.name.one, features.two, feature.name.two) {
if (!("node.class" %in% names(elementMetadata(regions(GIObject))))) {
stop("GIObject has not been annotated")
}
potential.node.classes <- unique(GIObject@regions$node.class)
feature.one.names.full <- .get_gr_names(features.one)
feature.one.names <- unique(feature.one.names.full)
feature.two.names.full <- .get_gr_names(features.two)
feature.two.names <- unique(feature.two.names.full)
x.gi <- subsetByFeatures(GIObject, features.one)
x.gi <- subsetByFeatures(x.gi, features.two)
anchor_one <- anchorOne(x.gi)
anchor_two <- anchorTwo(x.gi)
one.one.ol <- findOverlaps(features.one, anchor_one)
two.one.ol <- findOverlaps(features.one, anchor_two)
f1.one.f2.two <- subjectHits(one.one.ol)[which(anchor_one[subjectHits(one.one.ol)]$node.class == feature.name.one & anchor_two[subjectHits(one.one.ol)]$node.class ==
feature.name.two)]
f1.two.f2.one <- subjectHits(two.one.ol)[which(anchor_two[subjectHits(two.one.ol)]$node.class == feature.name.one & anchor_one[subjectHits(two.one.ol)]$node.class ==
feature.name.two)]
results <- NULL
# this now results in a GI object only contain feature.name.one:feature.name.two interactions
x.gi <- x.gi[unique(c(f1.one.f2.two, f1.two.f2.one)), ]
one.one.ol <- findOverlaps(features.one, anchor_one)
two.one.ol <- findOverlaps(features.one, anchor_two)
one.two.ol <- findOverlaps(features.two, anchor_one)
two.two.ol <- findOverlaps(features.two, anchor_two)
one.indexes <- queryHits(one.one.ol)[anchor_one[subjectHits(one.one.ol)]$node.class == feature.name.one]
two.indexes <- queryHits(two.one.ol)[anchor_two[subjectHits(two.one.ol)]$node.class == feature.name.one]
features.one.with.interactions.indexes <- unique(c(one.indexes, two.indexes))
all.features.one.with.interactions <- features.one[features.one.with.interactions.indexes]
feature.ones.id <- feature.one.names.full[features.one.with.interactions.indexes]
for (fn in unique(feature.ones.id)) {
# print(fn)
iss <- which(feature.one.names.full == fn)
interactions <- unique(c(subjectHits(one.one.ol[queryHits(one.one.ol) %in% iss]), subjectHits(two.one.ol[queryHits(two.one.ol) %in%
iss])))
interactions.one <- unique(subjectHits(one.one.ol[queryHits(one.one.ol) %in% iss]))
interactions.two <- unique(subjectHits(two.one.ol[queryHits(two.one.ol) %in% iss]))
features.two.involved.one <- unique(unlist(elementMetadata(anchorOne(x.gi))[[paste(feature.name.two, "id", sep = ".")]][interactions.two]))
features.two.involved.two <- unique(unlist(elementMetadata(anchorTwo(x.gi))[[paste(feature.name.two, "id", sep = ".")]][interactions.one]))
# print(unique(c(features.two.involved.one, features.two.involved.two)))
for (fn.two in unique(c(features.two.involved.one, features.two.involved.two))) {
# print(feature.two)
iss.two <- which(feature.two.names.full == fn.two)
# print(iss.two)
indexes <- unique(intersect(interactions, unique(c(subjectHits(one.two.ol[queryHits(one.two.ol) %in% iss.two]), subjectHits(two.two.ol[queryHits(two.two.ol) %in%
iss.two])))))
counts <- sum(interactionCounts(x.gi)[indexes])
# print(counts)
results <- rbind(results, c(fn, fn.two, counts))
}
}
results <- data.frame(results[, 1], results[, 2], as.numeric(results[, 3]))
colnames(results) <- c(paste(capitalize(feature.name.one), "id", sep = "."), paste(capitalize(feature.name.two), "id", sep = "."),
"counts")
return(results)
})
| /R/summarise_annotations.r | no_license | ComputationalRegulatoryGenomicsICL/GenomicInteractions | R | false | false | 17,751 | r | #' Summary statistics of interactions for a given feature set
#'
#' This function will calculate summary statistics for each element in the
#' given feature set, including the number of interactions (the sum of all
#' interaction counts), number of unique interactions and number of trans-
#' (interchromosomal) interations. It also returns some statistics for the
#' distances of interactions for all interactions of the feature, and for the
#' different interaction types e.g. promoter-distal.
#'
#' @param GIObject An annotated GInteractions object
#' @param features A GRanges object containing the feature set
#' @param feature.name The name of the feature set
#' @param distance.method Method for calculating distances between anchors, see
#' ?calculateDistances
#' @param annotate.self Logical. Indicates whether to annotate self interactions,
#' i.e. where a feature in `features` overlaps both anchors of an interaction.
#' Default: FALSE.
#'
#' @return A data frame with one line for each range in `features'
#' @rdname summariseByFeatures
#' @docType methods
#' @import GenomicRanges
#' @importFrom stats median
#' @export
#' @examples
#' data('hic_example_data')
#' data('mm9_refseq_promoters')
#' annotateInteractions(hic_example_data, list(promoter = mm9_refseq_promoters))
#' summariseByFeatures(hic_example_data, mm9_refseq_promoters[1:10], 'promoter')
setGeneric("summariseByFeatures", function(GIObject, features, feature.name, distance.method = "midpoint", annotate.self = FALSE) {
standardGeneric("summariseByFeatures")
})
#' @rdname summariseByFeatures
#' @export
setMethod("summariseByFeatures", "GInteractions", function(GIObject, features, feature.name, distance.method = "midpoint", annotate.self = FALSE) {
if (!("node.class" %in% names(elementMetadata(regions(GIObject))))) {
stop("GIObject has not been annotated")
}
potential.node.classes <- unique(GIObject@regions$node.class)
feature.names.full <- .get_gr_names(features)
feature.names <- unique(feature.names.full)
summary.df <- data.frame(matrix(0, ncol = (5 + (length(potential.node.classes) * 2) + ifelse(annotate.self, (length(potential.node.classes) -
1) * 2, 0) + 5), nrow = length(feature.names)))
summary.names <- c(paste(capitalize(feature.name), "id", sep = "."), paste("numberOf", capitalize(feature.name), "Interactions", sep = ""),
paste("numberOf", capitalize(feature.name), "UniqueInteractions", sep = ""), paste("numberOf", capitalize(feature.name), "InterChromosomalInteractions",
sep = ""), paste("numberOf", capitalize(feature.name), "UniqueInterChromosomalInteractions", sep = ""), paste("numberOf",
capitalize(feature.name), capitalize(potential.node.classes), "Interactions", sep = ""), paste("numberOfUnique", capitalize(feature.name),
capitalize(potential.node.classes), "Interactions", sep = ""), paste(capitalize(feature.name), "DistanceMedian", sep = ""),
paste(capitalize(feature.name), "DistanceMean", sep = ""), paste(capitalize(feature.name), "DistanceMinimum", sep = ""), paste(capitalize(feature.name),
"DistanceMaximum", sep = ""), paste(capitalize(feature.name), "DistanceWeightedMedian", sep = ""))
if (annotate.self) {
pc <- potential.node.classes[-which(potential.node.classes == "distal")]
summary.names <- append(summary.names, c(paste("numberOfSelf", capitalize(feature.name), capitalize(pc), "Interactions", sep = ""),
paste("numberOfSelfUnique", capitalize(feature.name), capitalize(pc), "Interactions", sep = "")))
}
names(summary.df) <- summary.names
summary.df[, paste(capitalize(feature.name), "id", sep = ".")] <- feature.names
# hack
anchor_one <- anchorOne(GIObject)
anchor_two <- anchorTwo(GIObject)
one.ol <- findOverlaps(features, anchor_one)
two.ol <- findOverlaps(features, anchor_two)
one.indexes <- queryHits(one.ol)[anchor_one[subjectHits(one.ol)]$node.class == feature.name]
two.indexes <- queryHits(two.ol)[anchor_two[subjectHits(two.ol)]$node.class == feature.name]
features.with.interactions.indexes <- unique(c(one.indexes, two.indexes))
features.with.interactions.indexes <- features.with.interactions.indexes[order(features.with.interactions.indexes)]
feature.names.with.interactions <- feature.names.full[features.with.interactions.indexes]
anchor_one.df <- data.frame(seqnames = as.character(seqnames(anchor_one)), start = start(anchor_one), end = end(anchor_one), width = width(anchor_one),
strand = as.character(strand(anchor_one)), stringsAsFactors = FALSE)
anchor_two.df <- data.frame(seqnames = as.character(seqnames(anchor_two)), start = start(anchor_two), end = end(anchor_two), width = width(anchor_two),
strand = as.character(strand(anchor_two)), stringsAsFactors = FALSE)
# for(i in features.with.interactions.indexes){
for (fn in unique(feature.names.with.interactions)) {
# print(fn)
i <- which(summary.df[, paste(capitalize(feature.name), "id", sep = ".")] == fn)
iss <- which(feature.names.full == fn)
# print(i) if(length(iss)>1){ print(fn) print(i) print(iss) }
interactions <- unique(c(subjectHits(one.ol[queryHits(one.ol) %in% iss]), subjectHits(two.ol[queryHits(two.ol) %in% iss])))
interactions.one <- unique(subjectHits(one.ol[queryHits(one.ol) %in% iss]))
interactions.two <- unique(subjectHits(two.ol[queryHits(two.ol) %in% iss]))
numberOfInteractions <- sum(interactionCounts(GIObject)[interactions])
numberOfUniqueInteractions <- length(interactions)
summary.df[i, paste("numberOf", capitalize(feature.name), "Interactions", sep = "")] <- numberOfInteractions
summary.df[i, paste("numberOf", capitalize(feature.name), "UniqueInteractions", sep = "")] <- numberOfUniqueInteractions
intercis <- interactions[as.character(seqnames(anchor_one[interactions])) != as.character(seqnames(anchor_two[interactions]))]
if (length(intercis) > 0) {
numberOfInterChromosomalInteractions <- sum(interactionCounts(GIObject)[intercis])
summary.df[i, paste("numberOf", capitalize(feature.name), "InterChromosomalInteractions", sep = "")] <- numberOfInterChromosomalInteractions
numberOfUniqueInterChromosomalInteractions <- length(intercis)
summary.df[i, paste("numberOf", capitalize(feature.name), "UniqueInterChromosomalInteractions", sep = "")] <- numberOfUniqueInterChromosomalInteractions
}
for (nc in potential.node.classes) {
nc1Counts <- 0
nc1Indexes <- c()
nc2Counts <- 0
nc2Indexes <- c()
if (length(interactions.one) > 0) {
nc1Indexes <- interactions.one[which(anchor_one$node.class[interactions.one] == feature.name & anchor_two$node.class[interactions.one] ==
nc)]
nc1Counts <- sum(interactionCounts(GIObject)[nc1Indexes])
}
if (length(interactions.two) > 0) {
nc2Indexes <- interactions.two[which(anchor_one$node.class[interactions.two] == nc & anchor_two$node.class[interactions.two] ==
feature.name)]
if (nc == feature.name) {
if (length(nc2Indexes[(nc2Indexes %in% nc1Indexes)]) > 0) {
summary.df[i, paste("numberOfSelf", capitalize(feature.name), capitalize(nc), "Interactions", sep = "")] <- sum(interactionCounts(GIObject)[nc2Indexes[(nc2Indexes %in%
nc1Indexes)]])
summary.df[i, paste("numberOfSelfUnique", capitalize(feature.name), capitalize(nc), "Interactions", sep = "")] <- length(nc2Indexes[(nc2Indexes %in%
nc1Indexes)])
}
nc2Indexes <- nc2Indexes[!(nc2Indexes %in% nc1Indexes)] # stop double counting of interactions where both anchors are in the same feature
}
nc2Counts <- sum(interactionCounts(GIObject)[nc2Indexes])
}
if ((nc1Counts > 0) | (nc2Counts > 0)) {
numberOfNCInteractions <- nc1Counts + nc2Counts
summary.df[i, paste("numberOf", capitalize(feature.name), capitalize(nc), "Interactions", sep = "")] <- numberOfNCInteractions
numberOfUniqueNCInteractions <- length(c(nc1Indexes, nc2Indexes))
summary.df[i, paste("numberOfUnique", capitalize(feature.name), capitalize(nc), "Interactions", sep = "")] <- numberOfUniqueNCInteractions
}
}
# annotate.self .. ie id is the same id at both ends for different classes
if (annotate.self) {
for (nc in potential.node.classes) {
if (nc != feature.name & nc != "distal") {
ncs1Counts <- 0
ncs1Indexes <- c()
ncs2Counts <- 0
ncs2Indexes <- c()
if (length(interactions.one) > 0) {
ncs1Indexes <- interactions.one[which(anchor_one$node.class[interactions.one] == feature.name & anchor_two$node.class[interactions.one] ==
nc)]
ncs1Indexes <- names(features)[i] %in% elementMetadata(anchor_two)[[paste(nc, "id", sep = ".")]][ncs1Indexes]
ncs1Counts <- sum(interactionCounts(GIObject)[ncs1Indexes])
}
if (length(interactions.two) > 0) {
nc2sIndexes <- interactions.two[which(anchor_one$node.class[interactions.two] == nc & anchor_two$node.class[interactions.two] ==
feature.name)]
ncs2Indexes <- names(features)[i] %in% elementMetadata(anchor_one)[[paste(nc, "id", sep = ".")]][ncs2Indexes]
ncs2Counts <- sum(interactionCounts(GIObject)[ncs2Indexes])
}
if ((ncs1Counts > 0) | (ncs2Counts > 0)) {
numberOfNCSInteractions <- ncs1Counts + ncs2Counts
summary.df[i, paste("numberOfSelf", capitalize(feature.name), capitalize(nc), "Interactions", sep = "")] <- numberOfNCInteractions
numberOfUniqueNCSInteractions <- length(c(ncs1Indexes, ncs2Indexes))
summary.df[i, paste("numberOfSelfUnique", capitalize(feature.name), capitalize(nc), "Interactions", sep = "")] <- numberOfUniqueNCInteractions
}
}
}
}
distances <- .calculateDistances.df(anchor_one.df[interactions, ], anchor_two.df[interactions, ], distance.method)
dis <- distances[!is.na(distances)]
wdis <- rep(distances, interactionCounts(GIObject)[interactions])
wdis <- wdis[!is.na(wdis)]
median.distance <- median(dis)
median.distance <- ifelse(is.infinite(median.distance) | is.nan(median.distance), NA, median.distance)
mean.distance <- median(dis)
mean.distance <- ifelse(is.infinite(mean.distance) | is.nan(mean.distance), NA, mean.distance)
min.distance <- min(dis)
min.distance <- ifelse(is.infinite(min.distance) | is.nan(min.distance), NA, min.distance)
max.distance <- max(dis)
max.distance <- ifelse(is.infinite(max.distance) | is.nan(max.distance), NA, max.distance)
wmedian.distance <- median(wdis)
wmedian.distance <- ifelse(is.infinite(wmedian.distance) | is.nan(wmedian.distance), NA, wmedian.distance)
summary.df[i, paste(capitalize(feature.name), "DistanceMedian", sep = "")] <- median.distance
summary.df[i, paste(capitalize(feature.name), "DistanceMean", sep = "")] <- mean.distance
summary.df[i, paste(capitalize(feature.name), "DistanceMinimum", sep = "")] <- min.distance
summary.df[i, paste(capitalize(feature.name), "DistanceMaximum", sep = "")] <- max.distance
summary.df[i, paste(capitalize(feature.name), "DistanceWeightedMedian", sep = "")] <- wmedian.distance
}
return(summary.df)
})
#' Summarise the number of interactions between two sets of features.
#'
#' This function will calculate the number of observed interactions between
#' two sets of features provided by the end-user. This allows the summarisation
#' of the number of features of a specific type a particular region is involved in
#' and how many interactions exist between them.
#'
#' @param GIObject An annotated GInteractions object
#' @param features.one A GRanges object containing the feature set of interest
#' @param feature.name.one The name of the first feature set of interest
#' @param features.two A GRanges object containing the second feature set of interest
#' @param feature.name.two The name of the second feature set of interest
#'
#' @return A data frame with one line for each range in `features'
#' @rdname summariseByFeaturePairs
#' @docType methods
#' @import GenomicRanges
#' @export
#' @examples
#' data('hic_example_data')
#' data('mm9_refseq_promoters')
#' data('thymus_enhancers')
#' annotateInteractions(hic_example_data, list(promoter = mm9_refseq_promoters, enhancer = thymus_enh))
#' # can be slow so subset of features used for examples
#' p <- unique(unlist(head(regions(hic_example_data)$promoter.id)))
#' e <- unique(unlist(head(regions(hic_example_data)$enhancer.id)))
#' p <- p[!is.na(p)]
#' p <- mm9_refseq_promoters[p]
#' e <- e[!is.na(e)]
#' e <- thymus_enh[e]
#' ep_summary <- summariseByFeaturePairs(hic_example_data, p, 'promoter', e, 'enhancer')
setGeneric("summariseByFeaturePairs", function(GIObject, features.one, feature.name.one, features.two, feature.name.two) {
standardGeneric("summariseByFeaturePairs")
})
#' @rdname summariseByFeaturePairs
#' @export
setMethod("summariseByFeaturePairs", "GInteractions", function(GIObject, features.one, feature.name.one, features.two, feature.name.two) {
if (!("node.class" %in% names(elementMetadata(regions(GIObject))))) {
stop("GIObject has not been annotated")
}
potential.node.classes <- unique(GIObject@regions$node.class)
feature.one.names.full <- .get_gr_names(features.one)
feature.one.names <- unique(feature.one.names.full)
feature.two.names.full <- .get_gr_names(features.two)
feature.two.names <- unique(feature.two.names.full)
x.gi <- subsetByFeatures(GIObject, features.one)
x.gi <- subsetByFeatures(x.gi, features.two)
anchor_one <- anchorOne(x.gi)
anchor_two <- anchorTwo(x.gi)
one.one.ol <- findOverlaps(features.one, anchor_one)
two.one.ol <- findOverlaps(features.one, anchor_two)
f1.one.f2.two <- subjectHits(one.one.ol)[which(anchor_one[subjectHits(one.one.ol)]$node.class == feature.name.one & anchor_two[subjectHits(one.one.ol)]$node.class ==
feature.name.two)]
f1.two.f2.one <- subjectHits(two.one.ol)[which(anchor_two[subjectHits(two.one.ol)]$node.class == feature.name.one & anchor_one[subjectHits(two.one.ol)]$node.class ==
feature.name.two)]
results <- NULL
# this now results in a GI object only contain feature.name.one:feature.name.two interactions
x.gi <- x.gi[unique(c(f1.one.f2.two, f1.two.f2.one)), ]
one.one.ol <- findOverlaps(features.one, anchor_one)
two.one.ol <- findOverlaps(features.one, anchor_two)
one.two.ol <- findOverlaps(features.two, anchor_one)
two.two.ol <- findOverlaps(features.two, anchor_two)
one.indexes <- queryHits(one.one.ol)[anchor_one[subjectHits(one.one.ol)]$node.class == feature.name.one]
two.indexes <- queryHits(two.one.ol)[anchor_two[subjectHits(two.one.ol)]$node.class == feature.name.one]
features.one.with.interactions.indexes <- unique(c(one.indexes, two.indexes))
all.features.one.with.interactions <- features.one[features.one.with.interactions.indexes]
feature.ones.id <- feature.one.names.full[features.one.with.interactions.indexes]
for (fn in unique(feature.ones.id)) {
# print(fn)
iss <- which(feature.one.names.full == fn)
interactions <- unique(c(subjectHits(one.one.ol[queryHits(one.one.ol) %in% iss]), subjectHits(two.one.ol[queryHits(two.one.ol) %in%
iss])))
interactions.one <- unique(subjectHits(one.one.ol[queryHits(one.one.ol) %in% iss]))
interactions.two <- unique(subjectHits(two.one.ol[queryHits(two.one.ol) %in% iss]))
features.two.involved.one <- unique(unlist(elementMetadata(anchorOne(x.gi))[[paste(feature.name.two, "id", sep = ".")]][interactions.two]))
features.two.involved.two <- unique(unlist(elementMetadata(anchorTwo(x.gi))[[paste(feature.name.two, "id", sep = ".")]][interactions.one]))
# print(unique(c(features.two.involved.one, features.two.involved.two)))
for (fn.two in unique(c(features.two.involved.one, features.two.involved.two))) {
# print(feature.two)
iss.two <- which(feature.two.names.full == fn.two)
# print(iss.two)
indexes <- unique(intersect(interactions, unique(c(subjectHits(one.two.ol[queryHits(one.two.ol) %in% iss.two]), subjectHits(two.two.ol[queryHits(two.two.ol) %in%
iss.two])))))
counts <- sum(interactionCounts(x.gi)[indexes])
# print(counts)
results <- rbind(results, c(fn, fn.two, counts))
}
}
results <- data.frame(results[, 1], results[, 2], as.numeric(results[, 3]))
colnames(results) <- c(paste(capitalize(feature.name.one), "id", sep = "."), paste(capitalize(feature.name.two), "id", sep = "."),
"counts")
return(results)
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/screenIsotopeData.R
\name{screenIsotopeData}
\alias{screenIsotopeData}
\title{Function to plot and screen stable isotope data with one or more baselines.}
\usage{
screenIsotopeData(isotopeData = NULL, density = "both",
consumer = "Consumer", b1 = "Pelagic baseline",
b2 = "Benthic baseline", legend = c(1.15, 1.15), title = NULL, ...)
}
\arguments{
\item{isotopeData}{an isotopeData class object.}
\item{density}{string representing whether the density function is plotted.
Accepted characters are "both" in which case will plot the density function
above and to the right, "right", "above" or "none".}
\item{consumer}{string representing the consumer.}
\item{b1}{string representing baseline 1.}
\item{b2}{string representing baseline 2.}
\item{legend}{coordinates representing where to locate the legend.}
\item{title}{string representing title.}
\item{...}{additional arguments passed to this function.}
}
\value{
none
}
\description{
This function receives a named list of vectors (isotopeData class object),
and plots a biplot with 2 sources and a consumer. The user can states whether
he/she wants a density function plotted above, to the right, at both sides or
does not want it to be plotted.
}
\examples{
a <- generateTPData()
screenIsotopeData(a)
}
| /man/screenIsotopeData.Rd | no_license | jimjunker1/tRophicPosition | R | false | true | 1,348 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/screenIsotopeData.R
\name{screenIsotopeData}
\alias{screenIsotopeData}
\title{Function to plot and screen stable isotope data with one or more baselines.}
\usage{
screenIsotopeData(isotopeData = NULL, density = "both",
consumer = "Consumer", b1 = "Pelagic baseline",
b2 = "Benthic baseline", legend = c(1.15, 1.15), title = NULL, ...)
}
\arguments{
\item{isotopeData}{an isotopeData class object.}
\item{density}{string representing whether the density function is plotted.
Accepted characters are "both" in which case will plot the density function
above and to the right, "right", "above" or "none".}
\item{consumer}{string representing the consumer.}
\item{b1}{string representing baseline 1.}
\item{b2}{string representing baseline 2.}
\item{legend}{coordinates representing where to locate the legend.}
\item{title}{string representing title.}
\item{...}{additional arguments passed to this function.}
}
\value{
none
}
\description{
This function receives a named list of vectors (isotopeData class object),
and plots a biplot with 2 sources and a consumer. The user can states whether
he/she wants a density function plotted above, to the right, at both sides or
does not want it to be plotted.
}
\examples{
a <- generateTPData()
screenIsotopeData(a)
}
|
#' @title Standard normal variate transformation
#'
#' @description
#' \loadmathjax
#' This function normalizes each row of an input matrix by
#' subtracting each row by its mean and dividing it by its standard deviation
#' @usage
#' standardNormalVariate(X)
#' @param X a numeric matrix of spectral data (optionally a data frame that can
#' be coerced to a numerical matrix).
#' @author Antoine Stevens
#' @examples
#' data(NIRsoil)
#' NIRsoil$spc_snv <- standardNormalVariate(X = NIRsoil$spc)
#' # 10 first snv spectra
#' matplot(
#' x = as.numeric(colnames(NIRsoil$spc_snv)),
#' y = t(NIRsoil$spc_snv[1:10, ]),
#' type = "l",
#' xlab = "wavelength, nm",
#' ylab = "snv"
#' )
#' \dontrun{
#' apply(NIRsoil$spc_snv, 1, sd) # check
#' }
#'
#' @return a matrix of normalized spectral data.
#' @details
#' SNV is simple way for normalizing spectral data that intends to correct for
#' light scatter.
#' It operates row-wise:
#'
#' \mjdeqn{SNV_i = \frac{x_i - \bar{x}_i}{s_i}}{SNV_i = \frac{x_i - \bar{x}_i}{s_i}}
#'
#' where \mjeqn{x_i}{x_i} is the signal of the \mjeqn{i}{i}th observation,
#' \mjeqn{\bar{x}_i}{\bar{x}_i} is its mean and \mjeqn{s_i}{s_i} its standard
#' deviation.
#' @seealso \code{\link{msc}}, \code{\link{detrend}}, \code{\link{blockScale}},
#' \code{\link{blockNorm}}
#' @references Barnes RJ, Dhanoa MS, Lister SJ. 1989. Standard normal variate
#' transformation and de-trending of near-infrared diffuse reflectance spectra.
#' Applied spectroscopy, 43(5): 772-777.
#' @export
#'
standardNormalVariate <- function(X) {
if (!any(class(X) %in% c("matrix", "data.frame"))) {
stop("X must be a matrix or optionally a data.frame")
}
X <- sweep(X, 1, rowMeans(X, na.rm = TRUE), "-")
X <- sweep(X, 1, apply(X, 1, sd, na.rm = TRUE), "/")
as.matrix(X)
}
| /R/standardNormalVariate.R | no_license | cran/prospectr | R | false | false | 1,842 | r | #' @title Standard normal variate transformation
#'
#' @description
#' \loadmathjax
#' This function normalizes each row of an input matrix by
#' subtracting each row by its mean and dividing it by its standard deviation
#' @usage
#' standardNormalVariate(X)
#' @param X a numeric matrix of spectral data (optionally a data frame that can
#' be coerced to a numerical matrix).
#' @author Antoine Stevens
#' @examples
#' data(NIRsoil)
#' NIRsoil$spc_snv <- standardNormalVariate(X = NIRsoil$spc)
#' # 10 first snv spectra
#' matplot(
#' x = as.numeric(colnames(NIRsoil$spc_snv)),
#' y = t(NIRsoil$spc_snv[1:10, ]),
#' type = "l",
#' xlab = "wavelength, nm",
#' ylab = "snv"
#' )
#' \dontrun{
#' apply(NIRsoil$spc_snv, 1, sd) # check
#' }
#'
#' @return a matrix of normalized spectral data.
#' @details
#' SNV is simple way for normalizing spectral data that intends to correct for
#' light scatter.
#' It operates row-wise:
#'
#' \mjdeqn{SNV_i = \frac{x_i - \bar{x}_i}{s_i}}{SNV_i = \frac{x_i - \bar{x}_i}{s_i}}
#'
#' where \mjeqn{x_i}{x_i} is the signal of the \mjeqn{i}{i}th observation,
#' \mjeqn{\bar{x}_i}{\bar{x}_i} is its mean and \mjeqn{s_i}{s_i} its standard
#' deviation.
#' @seealso \code{\link{msc}}, \code{\link{detrend}}, \code{\link{blockScale}},
#' \code{\link{blockNorm}}
#' @references Barnes RJ, Dhanoa MS, Lister SJ. 1989. Standard normal variate
#' transformation and de-trending of near-infrared diffuse reflectance spectra.
#' Applied spectroscopy, 43(5): 772-777.
#' @export
#'
standardNormalVariate <- function(X) {
if (!any(class(X) %in% c("matrix", "data.frame"))) {
stop("X must be a matrix or optionally a data.frame")
}
X <- sweep(X, 1, rowMeans(X, na.rm = TRUE), "-")
X <- sweep(X, 1, apply(X, 1, sd, na.rm = TRUE), "/")
as.matrix(X)
}
|
##############################################################Bivariate analysis categorical ~ categorical##################################################################################
#There are x functions for bivariate analysis with categorical response ~ categorical independants/predictors
#function 1: chi square or fisher exact test
#function 2: logistic regression
bi_cat_cat1=function(choices, varname1, varname2, testdata)
{
valuesList <- vector("list", length=8)
fit=table(varname1,varname2)
fitmatrix <- as.data.frame.matrix(fit)
valuesList[[1]] <- list("Two-way table counts" = fitmatrix, "rows" = rownames(fitmatrix))
#row percent
i=dim(fit)[1] #no of row
#column percent
j=dim(fit)[2] #no of colume
rowper=matrix(nrow=i,ncol=j)
for (i in 1:i)
{
for ( j in 1:j)
{rowper[i,j]=fit[i,j]/rowSums(fit)[i]
}
}
rownames(rowper)<- names(table(varname1))
colnames(rowper)<- names(table(varname2))
rowpermatrix <- as.data.frame(round(rowper,4))
valuesList[[2]] <- list("row proportion" = rowpermatrix, "rows" = rownames(rowpermatrix))
#chi square test
fit2=chisq.test(varname1,varname2)
fit2val<-capture.output(fit2) #print this with label "chi square test"-with warning message as one table cell
valuesList[[3]] <- replace(fit2val, fit2val=="", "\n")
#fisher exact test
fit3=fisher.test(varname1,varname2)
fit3val<-capture.output(fit3) #print this with label "fisher exact test"- with warning message as one table cell
valuesList[[4]] <- replace(fit3val, fit3val=="", "\n")
#logistic regression for dependant variable with 2 subclass
nclass=length(table(varname2))
fit4=glm(varname1~factor(varname2),family=binomial)
labelvar2=names(table(varname2))
footnote <- c("Of Note: the presented odds ratios and their confidence intervals are ratios in odds between each category versus. the reference category.", "*Reference category is the lowest level of the independent variable.")
oddsRatioLabel <- paste("odds ratio of each category vs. the lowest category of ", choices[[2]], ": ",labelvar2[1])
#print this with lable and the odds ratio number in a cell
valuesList[[5]] <- setNames(list(as.list(exp(summary(fit4)$coefficients[2:nclass,1])), footnote),c(oddsRatioLabel,"footnote")) #odds ratio for every subclass i (i > 1) vs. subclass=1, no intercept
#print this with label "odds ratio's 95% confidence interval: lower bound" in a cell
ci_low= as.list(exp(summary(fit4)$coefficients[2:nclass,1]-1.96*summary(fit4)$coefficients[2:nclass,2]))
valuesList[[6]] <- list("Lower boundary of 95% confidence interval of odds ratio" = ci_low, "footnote"=footnote)
#print this with label "odds ratio's 95% confidence interval: upper bound" in a cell
ci_upper=as.list(exp(summary(fit4)$coefficients[2:nclass,1]+1.96*summary(fit4)$coefficients[2:nclass,2]))
valuesList[[7]] <- list("Upper boundary of 95% confidence interval of odds ratio" = ci_upper, "footnote"=footnote)
fit4val <- capture.output(summary(fit4))
valuesList[[8]] <- replace(fit4val, fit4val=="", "\n") #print list of summary
return(toJSON(valuesList))
}
bi_cat_cat2=function(choices, varname1, varname2, testdata)
{
valuesList <- vector("list", length=8)
fit=table(varname1,varname2) #print this with label "Two-way table counts with varname1 with varname2"
fitmatrix <- as.data.frame.matrix(fit)
valuesList[[1]] <- list("Two-way table counts" = fitmatrix, "rows"=rownames(fitmatrix))
#row percent
i=dim(fit)[1] #no of row
#column percent
j=dim(fit)[2] #no of colume
rowper=matrix(nrow=i,ncol=j)
for (i in 1:i)
{
for ( j in 1:j)
{rowper[i,j]=fit[i,j]/rowSums(fit)[i]}
}
rownames(rowper)<- names(table(varname1))
colnames(rowper)<- names(table(varname2))
rowpermatrix <- as.data.frame.matrix(round(rowper,4))
valuesList[[2]] <- list("row proportion" = rowpermatrix, "rows"=rownames(rowpermatrix))
#chi square test
fit2=chisq.test(varname1,varname2)
fit2val<-capture.output(fit2)
valuesList[[3]] <- replace(fit2val, fit2val=="", "\n") #print this with label "chi square test"-with warning message as one table cell
#fisher exact test
fit3=fisher.test(varname1,varname2)
fit3val<-capture.output(fit3)
valuesList[[4]] <- replace(fit3val, fit3val=="", "\n") #print this with label "fisher exact test"- with warning message as one table cell
footnote <- c("Of Note: the presented odds ratios and their confidence interval are ratios in odds between each category versus. the reference category.", "*Reference category is the lowest level of the independent variable.")
#multinormial logistic regression for dependant variable with more than 2 subclass
nclass=length(table(varname2))
fit4=multinom(varname1~factor(varname2))
labelvar2=names(table(varname2))
oddsLabel <- paste("odds ratio of each category vs the lowest category of ", choices[[2]], " ",labelvar2[1])
oddsratio=as.list(exp(summary(fit4)$coefficients[,2:nclass])) #print this with lable "odds ratio" -with the odds ratio numner in a cell
valuesList[[5]] <- setNames(list(oddsratio, footnote),c(oddsLabel,"footnote"))
#print this with label "odds ratio's 95% confidence interval: lower bound" in a cell
ci_low=as.list(exp(summary(fit4)$coefficients[,2:nclass]-1.96*summary(fit4)$standard.error[,2:nclass]))
valuesList[[6]] <- list("Lower boundary of 95% confidence interval of odds ratio" = ci_low, "footnote"=footnote)
#print this with label "odds ratio's 95% confidence interval: upper bound" in a cell
ci_upper=as.list(exp(summary(fit4)$coefficients[,2:nclass]+1.96*summary(fit4)$standard.error[,2:nclass]))
valuesList[[7]] <- list("Upper boundary of 95% confidence interval of odds ratio" = ci_upper, "footnote"=footnote)
#print list of summary
fit4val <- capture.output(summary(fit4))
valuesList[[8]] <- replace(fit4val, fit4val=="", "\n")
return(toJSON(valuesList))
}
bivariate_cat_cat <- function(choices, dependent, independent, uploaddata){
noclass<-length(table(dependent))
if(noclass == 2){
return(bi_cat_cat1(choices, dependent,independent,uploaddata))
}
else{
return(bi_cat_cat2(choices, dependent,independent,uploaddata))
}
} | /bivariate_cat_cat.R | no_license | buddalasunil999/onlinedatalabscripts | R | false | false | 6,126 | r | ##############################################################Bivariate analysis categorical ~ categorical##################################################################################
#There are x functions for bivariate analysis with categorical response ~ categorical independants/predictors
#function 1: chi square or fisher exact test
#function 2: logistic regression
bi_cat_cat1=function(choices, varname1, varname2, testdata)
{
valuesList <- vector("list", length=8)
fit=table(varname1,varname2)
fitmatrix <- as.data.frame.matrix(fit)
valuesList[[1]] <- list("Two-way table counts" = fitmatrix, "rows" = rownames(fitmatrix))
#row percent
i=dim(fit)[1] #no of row
#column percent
j=dim(fit)[2] #no of colume
rowper=matrix(nrow=i,ncol=j)
for (i in 1:i)
{
for ( j in 1:j)
{rowper[i,j]=fit[i,j]/rowSums(fit)[i]
}
}
rownames(rowper)<- names(table(varname1))
colnames(rowper)<- names(table(varname2))
rowpermatrix <- as.data.frame(round(rowper,4))
valuesList[[2]] <- list("row proportion" = rowpermatrix, "rows" = rownames(rowpermatrix))
#chi square test
fit2=chisq.test(varname1,varname2)
fit2val<-capture.output(fit2) #print this with label "chi square test"-with warning message as one table cell
valuesList[[3]] <- replace(fit2val, fit2val=="", "\n")
#fisher exact test
fit3=fisher.test(varname1,varname2)
fit3val<-capture.output(fit3) #print this with label "fisher exact test"- with warning message as one table cell
valuesList[[4]] <- replace(fit3val, fit3val=="", "\n")
#logistic regression for dependant variable with 2 subclass
nclass=length(table(varname2))
fit4=glm(varname1~factor(varname2),family=binomial)
labelvar2=names(table(varname2))
footnote <- c("Of Note: the presented odds ratios and their confidence intervals are ratios in odds between each category versus. the reference category.", "*Reference category is the lowest level of the independent variable.")
oddsRatioLabel <- paste("odds ratio of each category vs. the lowest category of ", choices[[2]], ": ",labelvar2[1])
#print this with lable and the odds ratio number in a cell
valuesList[[5]] <- setNames(list(as.list(exp(summary(fit4)$coefficients[2:nclass,1])), footnote),c(oddsRatioLabel,"footnote")) #odds ratio for every subclass i (i > 1) vs. subclass=1, no intercept
#print this with label "odds ratio's 95% confidence interval: lower bound" in a cell
ci_low= as.list(exp(summary(fit4)$coefficients[2:nclass,1]-1.96*summary(fit4)$coefficients[2:nclass,2]))
valuesList[[6]] <- list("Lower boundary of 95% confidence interval of odds ratio" = ci_low, "footnote"=footnote)
#print this with label "odds ratio's 95% confidence interval: upper bound" in a cell
ci_upper=as.list(exp(summary(fit4)$coefficients[2:nclass,1]+1.96*summary(fit4)$coefficients[2:nclass,2]))
valuesList[[7]] <- list("Upper boundary of 95% confidence interval of odds ratio" = ci_upper, "footnote"=footnote)
fit4val <- capture.output(summary(fit4))
valuesList[[8]] <- replace(fit4val, fit4val=="", "\n") #print list of summary
return(toJSON(valuesList))
}
bi_cat_cat2=function(choices, varname1, varname2, testdata)
{
valuesList <- vector("list", length=8)
fit=table(varname1,varname2) #print this with label "Two-way table counts with varname1 with varname2"
fitmatrix <- as.data.frame.matrix(fit)
valuesList[[1]] <- list("Two-way table counts" = fitmatrix, "rows"=rownames(fitmatrix))
#row percent
i=dim(fit)[1] #no of row
#column percent
j=dim(fit)[2] #no of colume
rowper=matrix(nrow=i,ncol=j)
for (i in 1:i)
{
for ( j in 1:j)
{rowper[i,j]=fit[i,j]/rowSums(fit)[i]}
}
rownames(rowper)<- names(table(varname1))
colnames(rowper)<- names(table(varname2))
rowpermatrix <- as.data.frame.matrix(round(rowper,4))
valuesList[[2]] <- list("row proportion" = rowpermatrix, "rows"=rownames(rowpermatrix))
#chi square test
fit2=chisq.test(varname1,varname2)
fit2val<-capture.output(fit2)
valuesList[[3]] <- replace(fit2val, fit2val=="", "\n") #print this with label "chi square test"-with warning message as one table cell
#fisher exact test
fit3=fisher.test(varname1,varname2)
fit3val<-capture.output(fit3)
valuesList[[4]] <- replace(fit3val, fit3val=="", "\n") #print this with label "fisher exact test"- with warning message as one table cell
footnote <- c("Of Note: the presented odds ratios and their confidence interval are ratios in odds between each category versus. the reference category.", "*Reference category is the lowest level of the independent variable.")
#multinormial logistic regression for dependant variable with more than 2 subclass
nclass=length(table(varname2))
fit4=multinom(varname1~factor(varname2))
labelvar2=names(table(varname2))
oddsLabel <- paste("odds ratio of each category vs the lowest category of ", choices[[2]], " ",labelvar2[1])
oddsratio=as.list(exp(summary(fit4)$coefficients[,2:nclass])) #print this with lable "odds ratio" -with the odds ratio numner in a cell
valuesList[[5]] <- setNames(list(oddsratio, footnote),c(oddsLabel,"footnote"))
#print this with label "odds ratio's 95% confidence interval: lower bound" in a cell
ci_low=as.list(exp(summary(fit4)$coefficients[,2:nclass]-1.96*summary(fit4)$standard.error[,2:nclass]))
valuesList[[6]] <- list("Lower boundary of 95% confidence interval of odds ratio" = ci_low, "footnote"=footnote)
#print this with label "odds ratio's 95% confidence interval: upper bound" in a cell
ci_upper=as.list(exp(summary(fit4)$coefficients[,2:nclass]+1.96*summary(fit4)$standard.error[,2:nclass]))
valuesList[[7]] <- list("Upper boundary of 95% confidence interval of odds ratio" = ci_upper, "footnote"=footnote)
#print list of summary
fit4val <- capture.output(summary(fit4))
valuesList[[8]] <- replace(fit4val, fit4val=="", "\n")
return(toJSON(valuesList))
}
bivariate_cat_cat <- function(choices, dependent, independent, uploaddata){
noclass<-length(table(dependent))
if(noclass == 2){
return(bi_cat_cat1(choices, dependent,independent,uploaddata))
}
else{
return(bi_cat_cat2(choices, dependent,independent,uploaddata))
}
} |
#' Ad Exchange Buyer API Objects
#' Accesses your bidding-account information, submits creatives for validation, finds available direct deals, and retrieves performance reports.
#'
#' Auto-generated code by googleAuthR::gar_create_api_objects
#' at 2017-03-05 19:20:29
#' filename: /Users/mark/dev/R/autoGoogleAPI/googleadexchangebuyerv14.auto/R/adexchangebuyer_objects.R
#' api_json: api_json
#'
#' Objects for use by the functions created by googleAuthR::gar_create_api_skeleton
#' Account Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' Configuration data for an Ad Exchange buyer account.
#'
#' @param Account.bidderLocation The \link{Account.bidderLocation} object or list of objects
#' @param bidderLocation Your bidder locations that have distinct URLs
#' @param cookieMatchingNid The nid parameter value used in cookie match requests
#' @param cookieMatchingUrl The base URL used in cookie match requests
#' @param id Account id
#' @param maximumActiveCreatives The maximum number of active creatives that an account can have, where a creative is active if it was inserted or bid with in the last 30 days
#' @param maximumTotalQps The sum of all bidderLocation
#' @param numberActiveCreatives The number of creatives that this account inserted or bid with in the last 30 days
#'
#' @return Account object
#'
#' @family Account functions
#' @export
Account <- function(Account.bidderLocation = NULL, bidderLocation = NULL, cookieMatchingNid = NULL,
cookieMatchingUrl = NULL, id = NULL, maximumActiveCreatives = NULL, maximumTotalQps = NULL,
numberActiveCreatives = NULL) {
structure(list(Account.bidderLocation = Account.bidderLocation, bidderLocation = bidderLocation,
cookieMatchingNid = cookieMatchingNid, cookieMatchingUrl = cookieMatchingUrl,
id = id, kind = `adexchangebuyer#account`, maximumActiveCreatives = maximumActiveCreatives,
maximumTotalQps = maximumTotalQps, numberActiveCreatives = numberActiveCreatives),
class = "gar_Account")
}
#' Account.bidderLocation Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#'
#'
#' @return Account.bidderLocation object
#'
#' @family Account functions
#' @export
Account.bidderLocation <- function() {
list()
}
#' AccountsList Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' An account feed lists Ad Exchange buyer accounts that the user has access to. Each entry in the feed corresponds to a single buyer account.
#'
#' @param items A list of accounts
#'
#' @return AccountsList object
#'
#' @family AccountsList functions
#' @export
AccountsList <- function(items = NULL) {
structure(list(items = items, kind = `adexchangebuyer#accountsList`), class = "gar_AccountsList")
}
#' AddOrderDealsRequest Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param deals The list of deals to add
#' @param proposalRevisionNumber The last known proposal revision number
#' @param updateAction Indicates an optional action to take on the proposal
#'
#' @return AddOrderDealsRequest object
#'
#' @family AddOrderDealsRequest functions
#' @export
AddOrderDealsRequest <- function(deals = NULL, proposalRevisionNumber = NULL, updateAction = NULL) {
structure(list(deals = deals, proposalRevisionNumber = proposalRevisionNumber,
updateAction = updateAction), class = "gar_AddOrderDealsRequest")
}
#' AddOrderDealsResponse Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param deals List of deals added (in the same proposal as passed in the request)
#' @param proposalRevisionNumber The updated revision number for the proposal
#'
#' @return AddOrderDealsResponse object
#'
#' @family AddOrderDealsResponse functions
#' @export
AddOrderDealsResponse <- function(deals = NULL, proposalRevisionNumber = NULL) {
structure(list(deals = deals, proposalRevisionNumber = proposalRevisionNumber),
class = "gar_AddOrderDealsResponse")
}
#' AddOrderNotesRequest Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param notes The list of notes to add
#'
#' @return AddOrderNotesRequest object
#'
#' @family AddOrderNotesRequest functions
#' @export
AddOrderNotesRequest <- function(notes = NULL) {
structure(list(notes = notes), class = "gar_AddOrderNotesRequest")
}
#' AddOrderNotesResponse Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param notes No description
#'
#' @return AddOrderNotesResponse object
#'
#' @family AddOrderNotesResponse functions
#' @export
AddOrderNotesResponse <- function(notes = NULL) {
structure(list(notes = notes), class = "gar_AddOrderNotesResponse")
}
#' BillingInfo Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' The configuration data for an Ad Exchange billing info.
#'
#' @param accountId Account id
#' @param accountName Account name
#' @param billingId A list of adgroup IDs associated with this particular account
#'
#' @return BillingInfo object
#'
#' @family BillingInfo functions
#' @export
BillingInfo <- function(accountId = NULL, accountName = NULL, billingId = NULL) {
structure(list(accountId = accountId, accountName = accountName, billingId = billingId,
kind = `adexchangebuyer#billingInfo`), class = "gar_BillingInfo")
}
#' BillingInfoList Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' A billing info feed lists Billing Info the Ad Exchange buyer account has access to. Each entry in the feed corresponds to a single billing info.
#'
#' @param items A list of billing info relevant for your account
#'
#' @return BillingInfoList object
#'
#' @family BillingInfoList functions
#' @export
BillingInfoList <- function(items = NULL) {
structure(list(items = items, kind = `adexchangebuyer#billingInfoList`), class = "gar_BillingInfoList")
}
#' Budget Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' The configuration data for Ad Exchange RTB - Budget API.
#'
#' @param accountId The id of the account
#' @param billingId The billing id to determine which adgroup to provide budget information for
#' @param budgetAmount The daily budget amount in unit amount of the account currency to apply for the billingId provided
#' @param currencyCode The currency code for the buyer
#' @param id The unique id that describes this item
#'
#' @return Budget object
#'
#' @family Budget functions
#' @export
Budget <- function(accountId = NULL, billingId = NULL, budgetAmount = NULL, currencyCode = NULL,
id = NULL) {
structure(list(accountId = accountId, billingId = billingId, budgetAmount = budgetAmount,
currencyCode = currencyCode, id = id, kind = `adexchangebuyer#budget`), class = "gar_Budget")
}
#' Buyer Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param accountId Adx account id of the buyer
#'
#' @return Buyer object
#'
#' @family Buyer functions
#' @export
Buyer <- function(accountId = NULL) {
structure(list(accountId = accountId), class = "gar_Buyer")
}
#' ContactInformation Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param email Email address of the contact
#' @param name The name of the contact
#'
#' @return ContactInformation object
#'
#' @family ContactInformation functions
#' @export
ContactInformation <- function(email = NULL, name = NULL) {
structure(list(email = email, name = name), class = "gar_ContactInformation")
}
#' CreateOrdersRequest Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param proposals The list of proposals to create
#' @param webPropertyCode Web property id of the seller creating these orders
#'
#' @return CreateOrdersRequest object
#'
#' @family CreateOrdersRequest functions
#' @export
CreateOrdersRequest <- function(proposals = NULL, webPropertyCode = NULL) {
structure(list(proposals = proposals, webPropertyCode = webPropertyCode), class = "gar_CreateOrdersRequest")
}
#' CreateOrdersResponse Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param proposals The list of proposals successfully created
#'
#' @return CreateOrdersResponse object
#'
#' @family CreateOrdersResponse functions
#' @export
CreateOrdersResponse <- function(proposals = NULL) {
structure(list(proposals = proposals), class = "gar_CreateOrdersResponse")
}
#' Creative Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' A creative and its classification data.
#'
#' @param Creative.corrections The \link{Creative.corrections} object or list of objects
#' @param Creative.corrections.contexts The \link{Creative.corrections.contexts} object or list of objects
#' @param Creative.filteringReasons The \link{Creative.filteringReasons} object or list of objects
#' @param Creative.filteringReasons.reasons The \link{Creative.filteringReasons.reasons} object or list of objects
#' @param Creative.nativeAd The \link{Creative.nativeAd} object or list of objects
#' @param Creative.nativeAd.appIcon The \link{Creative.nativeAd.appIcon} object or list of objects
#' @param Creative.nativeAd.image The \link{Creative.nativeAd.image} object or list of objects
#' @param Creative.nativeAd.logo The \link{Creative.nativeAd.logo} object or list of objects
#' @param Creative.servingRestrictions The \link{Creative.servingRestrictions} object or list of objects
#' @param Creative.servingRestrictions.contexts The \link{Creative.servingRestrictions.contexts} object or list of objects
#' @param Creative.servingRestrictions.disapprovalReasons The \link{Creative.servingRestrictions.disapprovalReasons} object or list of objects
#' @param HTMLSnippet The HTML snippet that displays the ad when inserted in the web page
#' @param accountId Account id
#' @param adChoicesDestinationUrl The link to the Ad Preferences page
#' @param advertiserId Detected advertiser id, if any
#' @param advertiserName The name of the company being advertised in the creative
#' @param agencyId The agency id for this creative
#' @param apiUploadTimestamp The last upload timestamp of this creative if it was uploaded via API
#' @param attribute List of buyer selectable attributes for the ads that may be shown from this snippet
#' @param buyerCreativeId A buyer-specific id identifying the creative in this ad
#' @param clickThroughUrl The set of destination urls for the snippet
#' @param corrections Shows any corrections that were applied to this creative
#' @param dealsStatus Top-level deals status
#' @param detectedDomains Detected domains for this creative
#' @param filteringReasons The filtering reasons for the creative
#' @param height Ad height
#' @param impressionTrackingUrl The set of urls to be called to record an impression
#' @param languages Detected languages for this creative
#' @param nativeAd If nativeAd is set, HTMLSnippet and the videoURL outside of nativeAd should not be set
#' @param openAuctionStatus Top-level open auction status
#' @param productCategories Detected product categories, if any
#' @param restrictedCategories All restricted categories for the ads that may be shown from this snippet
#' @param sensitiveCategories Detected sensitive categories, if any
#' @param servingRestrictions The granular status of this ad in specific contexts
#' @param vendorType List of vendor types for the ads that may be shown from this snippet
#' @param version The version for this creative
#' @param videoURL The URL to fetch a video ad
#' @param width Ad width
#'
#' @return Creative object
#'
#' @family Creative functions
#' @export
Creative <- function(Creative.corrections = NULL, Creative.corrections.contexts = NULL,
Creative.filteringReasons = NULL, Creative.filteringReasons.reasons = NULL, Creative.nativeAd = NULL,
Creative.nativeAd.appIcon = NULL, Creative.nativeAd.image = NULL, Creative.nativeAd.logo = NULL,
Creative.servingRestrictions = NULL, Creative.servingRestrictions.contexts = NULL,
Creative.servingRestrictions.disapprovalReasons = NULL, HTMLSnippet = NULL, accountId = NULL,
adChoicesDestinationUrl = NULL, advertiserId = NULL, advertiserName = NULL, agencyId = NULL,
apiUploadTimestamp = NULL, attribute = NULL, buyerCreativeId = NULL, clickThroughUrl = NULL,
corrections = NULL, dealsStatus = NULL, detectedDomains = NULL, filteringReasons = NULL,
height = NULL, impressionTrackingUrl = NULL, languages = NULL, nativeAd = NULL,
openAuctionStatus = NULL, productCategories = NULL, restrictedCategories = NULL,
sensitiveCategories = NULL, servingRestrictions = NULL, vendorType = NULL, version = NULL,
videoURL = NULL, width = NULL) {
structure(list(Creative.corrections = Creative.corrections, Creative.corrections.contexts = Creative.corrections.contexts,
Creative.filteringReasons = Creative.filteringReasons, Creative.filteringReasons.reasons = Creative.filteringReasons.reasons,
Creative.nativeAd = Creative.nativeAd, Creative.nativeAd.appIcon = Creative.nativeAd.appIcon,
Creative.nativeAd.image = Creative.nativeAd.image, Creative.nativeAd.logo = Creative.nativeAd.logo,
Creative.servingRestrictions = Creative.servingRestrictions, Creative.servingRestrictions.contexts = Creative.servingRestrictions.contexts,
Creative.servingRestrictions.disapprovalReasons = Creative.servingRestrictions.disapprovalReasons,
HTMLSnippet = HTMLSnippet, accountId = accountId, adChoicesDestinationUrl = adChoicesDestinationUrl,
advertiserId = advertiserId, advertiserName = advertiserName, agencyId = agencyId,
apiUploadTimestamp = apiUploadTimestamp, attribute = attribute, buyerCreativeId = buyerCreativeId,
clickThroughUrl = clickThroughUrl, corrections = corrections, dealsStatus = dealsStatus,
detectedDomains = detectedDomains, filteringReasons = filteringReasons, height = height,
impressionTrackingUrl = impressionTrackingUrl, kind = `adexchangebuyer#creative`,
languages = languages, nativeAd = nativeAd, openAuctionStatus = openAuctionStatus,
productCategories = productCategories, restrictedCategories = restrictedCategories,
sensitiveCategories = sensitiveCategories, servingRestrictions = servingRestrictions,
vendorType = vendorType, version = version, videoURL = videoURL, width = width),
class = "gar_Creative")
}
#' Creative.corrections Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param Creative.corrections.contexts The \link{Creative.corrections.contexts} object or list of objects
#'
#' @return Creative.corrections object
#'
#' @family Creative functions
#' @export
Creative.corrections <- function(Creative.corrections.contexts = NULL) {
structure(list(Creative.corrections.contexts = Creative.corrections.contexts),
class = "gar_Creative.corrections")
}
#' Creative.corrections.contexts Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#'
#'
#' @return Creative.corrections.contexts object
#'
#' @family Creative functions
#' @export
Creative.corrections.contexts <- function() {
list()
}
#' Creative.filteringReasons Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' The filtering reasons for the creative. Read-only. This field should not be set in requests.
#'
#' @param Creative.filteringReasons.reasons The \link{Creative.filteringReasons.reasons} object or list of objects
#' @param date The date in ISO 8601 format for the data
#' @param reasons The filtering reasons
#'
#' @return Creative.filteringReasons object
#'
#' @family Creative functions
#' @export
Creative.filteringReasons <- function(Creative.filteringReasons.reasons = NULL, date = NULL,
reasons = NULL) {
structure(list(Creative.filteringReasons.reasons = Creative.filteringReasons.reasons,
date = date, reasons = reasons), class = "gar_Creative.filteringReasons")
}
#' Creative.filteringReasons.reasons Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#'
#'
#' @return Creative.filteringReasons.reasons object
#'
#' @family Creative functions
#' @export
Creative.filteringReasons.reasons <- function() {
list()
}
#' Creative.nativeAd Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' If nativeAd is set, HTMLSnippet and the videoURL outside of nativeAd should not be set. (The videoURL inside nativeAd can be set.)
#'
#' @param Creative.nativeAd.appIcon The \link{Creative.nativeAd.appIcon} object or list of objects
#' @param Creative.nativeAd.image The \link{Creative.nativeAd.image} object or list of objects
#' @param Creative.nativeAd.logo The \link{Creative.nativeAd.logo} object or list of objects
#' @param advertiser No description
#' @param appIcon The app icon, for app download ads
#' @param body A long description of the ad
#' @param callToAction A label for the button that the user is supposed to click
#' @param clickLinkUrl The URL that the browser/SDK will load when the user clicks the ad
#' @param clickTrackingUrl The URL to use for click tracking
#' @param headline A short title for the ad
#' @param image A large image
#' @param impressionTrackingUrl The URLs are called when the impression is rendered
#' @param logo A smaller image, for the advertiser logo
#' @param price The price of the promoted app including the currency info
#' @param starRating The app rating in the app store
#' @param store The URL to the app store to purchase/download the promoted app
#' @param videoURL The URL of the XML VAST for a native ad
#'
#' @return Creative.nativeAd object
#'
#' @family Creative functions
#' @export
Creative.nativeAd <- function(Creative.nativeAd.appIcon = NULL, Creative.nativeAd.image = NULL,
Creative.nativeAd.logo = NULL, advertiser = NULL, appIcon = NULL, body = NULL,
callToAction = NULL, clickLinkUrl = NULL, clickTrackingUrl = NULL, headline = NULL,
image = NULL, impressionTrackingUrl = NULL, logo = NULL, price = NULL, starRating = NULL,
store = NULL, videoURL = NULL) {
structure(list(Creative.nativeAd.appIcon = Creative.nativeAd.appIcon, Creative.nativeAd.image = Creative.nativeAd.image,
Creative.nativeAd.logo = Creative.nativeAd.logo, advertiser = advertiser,
appIcon = appIcon, body = body, callToAction = callToAction, clickLinkUrl = clickLinkUrl,
clickTrackingUrl = clickTrackingUrl, headline = headline, image = image,
impressionTrackingUrl = impressionTrackingUrl, logo = logo, price = price,
starRating = starRating, store = store, videoURL = videoURL), class = "gar_Creative.nativeAd")
}
#' Creative.nativeAd.appIcon Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' The app icon, for app download ads.
#'
#' @param height No description
#' @param url No description
#' @param width No description
#'
#' @return Creative.nativeAd.appIcon object
#'
#' @family Creative functions
#' @export
Creative.nativeAd.appIcon <- function(height = NULL, url = NULL, width = NULL) {
structure(list(height = height, url = url, width = width), class = "gar_Creative.nativeAd.appIcon")
}
#' Creative.nativeAd.image Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' A large image.
#'
#' @param height No description
#' @param url No description
#' @param width No description
#'
#' @return Creative.nativeAd.image object
#'
#' @family Creative functions
#' @export
Creative.nativeAd.image <- function(height = NULL, url = NULL, width = NULL) {
structure(list(height = height, url = url, width = width), class = "gar_Creative.nativeAd.image")
}
#' Creative.nativeAd.logo Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' A smaller image, for the advertiser logo.
#'
#' @param height No description
#' @param url No description
#' @param width No description
#'
#' @return Creative.nativeAd.logo object
#'
#' @family Creative functions
#' @export
Creative.nativeAd.logo <- function(height = NULL, url = NULL, width = NULL) {
structure(list(height = height, url = url, width = width), class = "gar_Creative.nativeAd.logo")
}
#' Creative.servingRestrictions Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param Creative.servingRestrictions.contexts The \link{Creative.servingRestrictions.contexts} object or list of objects
#' @param Creative.servingRestrictions.disapprovalReasons The \link{Creative.servingRestrictions.disapprovalReasons} object or list of objects
#'
#' @return Creative.servingRestrictions object
#'
#' @family Creative functions
#' @export
Creative.servingRestrictions <- function(Creative.servingRestrictions.contexts = NULL,
Creative.servingRestrictions.disapprovalReasons = NULL) {
structure(list(Creative.servingRestrictions.contexts = Creative.servingRestrictions.contexts,
Creative.servingRestrictions.disapprovalReasons = Creative.servingRestrictions.disapprovalReasons),
class = "gar_Creative.servingRestrictions")
}
#' Creative.servingRestrictions.contexts Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#'
#'
#' @return Creative.servingRestrictions.contexts object
#'
#' @family Creative functions
#' @export
Creative.servingRestrictions.contexts <- function() {
list()
}
#' Creative.servingRestrictions.disapprovalReasons Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#'
#'
#' @return Creative.servingRestrictions.disapprovalReasons object
#'
#' @family Creative functions
#' @export
Creative.servingRestrictions.disapprovalReasons <- function() {
list()
}
#' CreativeDealIds Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' The external deal ids associated with a creative.
#'
#' @param CreativeDealIds.dealStatuses The \link{CreativeDealIds.dealStatuses} object or list of objects
#' @param dealStatuses A list of external deal ids and ARC approval status
#'
#' @return CreativeDealIds object
#'
#' @family CreativeDealIds functions
#' @export
CreativeDealIds <- function(CreativeDealIds.dealStatuses = NULL, dealStatuses = NULL) {
structure(list(CreativeDealIds.dealStatuses = CreativeDealIds.dealStatuses, dealStatuses = dealStatuses,
kind = `adexchangebuyer#creativeDealIds`), class = "gar_CreativeDealIds")
}
#' CreativeDealIds.dealStatuses Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#'
#'
#' @return CreativeDealIds.dealStatuses object
#'
#' @family CreativeDealIds functions
#' @export
CreativeDealIds.dealStatuses <- function() {
list()
}
#' CreativesList Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' The creatives feed lists the active creatives for the Ad Exchange buyer accounts that the user has access to. Each entry in the feed corresponds to a single creative.
#'
#' @param items A list of creatives
#' @param nextPageToken Continuation token used to page through creatives
#'
#' @return CreativesList object
#'
#' @family CreativesList functions
#' @export
CreativesList <- function(items = NULL, nextPageToken = NULL) {
structure(list(items = items, kind = `adexchangebuyer#creativesList`, nextPageToken = nextPageToken),
class = "gar_CreativesList")
}
#' DealServingMetadata Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param alcoholAdsAllowed True if alcohol ads are allowed for this deal (read-only)
#' @param dealPauseStatus Tracks which parties (if any) have paused a deal
#'
#' @return DealServingMetadata object
#'
#' @family DealServingMetadata functions
#' @export
DealServingMetadata <- function(alcoholAdsAllowed = NULL, dealPauseStatus = NULL) {
structure(list(alcoholAdsAllowed = alcoholAdsAllowed, dealPauseStatus = dealPauseStatus),
class = "gar_DealServingMetadata")
}
#' DealServingMetadataDealPauseStatus Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' Tracks which parties (if any) have paused a deal. The deal is considered paused if has_buyer_paused || has_seller_paused. Each of the has_buyer_paused or the has_seller_paused bits can be set independently.
#'
#' @param buyerPauseReason No description
#' @param firstPausedBy If the deal is paused, records which party paused the deal first
#' @param hasBuyerPaused No description
#' @param hasSellerPaused No description
#' @param sellerPauseReason No description
#'
#' @return DealServingMetadataDealPauseStatus object
#'
#' @family DealServingMetadataDealPauseStatus functions
#' @export
DealServingMetadataDealPauseStatus <- function(buyerPauseReason = NULL, firstPausedBy = NULL,
hasBuyerPaused = NULL, hasSellerPaused = NULL, sellerPauseReason = NULL) {
structure(list(buyerPauseReason = buyerPauseReason, firstPausedBy = firstPausedBy,
hasBuyerPaused = hasBuyerPaused, hasSellerPaused = hasSellerPaused, sellerPauseReason = sellerPauseReason),
class = "gar_DealServingMetadataDealPauseStatus")
}
#' DealTerms Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param brandingType Visibilty of the URL in bid requests
#' @param crossListedExternalDealIdType Indicates that this ExternalDealId exists under at least two different AdxInventoryDeals
#' @param description Description for the proposed terms of the deal
#' @param estimatedGrossSpend Non-binding estimate of the estimated gross spend for this deal Can be set by buyer or seller
#' @param estimatedImpressionsPerDay Non-binding estimate of the impressions served per day Can be set by buyer or seller
#' @param guaranteedFixedPriceTerms The terms for guaranteed fixed price deals
#' @param nonGuaranteedAuctionTerms The terms for non-guaranteed auction deals
#' @param nonGuaranteedFixedPriceTerms The terms for non-guaranteed fixed price deals
#' @param rubiconNonGuaranteedTerms The terms for rubicon non-guaranteed deals
#' @param sellerTimeZone For deals with Cost Per Day billing, defines the timezone used to mark the boundaries of a day (buyer-readonly)
#'
#' @return DealTerms object
#'
#' @family DealTerms functions
#' @export
DealTerms <- function(brandingType = NULL, crossListedExternalDealIdType = NULL,
description = NULL, estimatedGrossSpend = NULL, estimatedImpressionsPerDay = NULL,
guaranteedFixedPriceTerms = NULL, nonGuaranteedAuctionTerms = NULL, nonGuaranteedFixedPriceTerms = NULL,
rubiconNonGuaranteedTerms = NULL, sellerTimeZone = NULL) {
structure(list(brandingType = brandingType, crossListedExternalDealIdType = crossListedExternalDealIdType,
description = description, estimatedGrossSpend = estimatedGrossSpend, estimatedImpressionsPerDay = estimatedImpressionsPerDay,
guaranteedFixedPriceTerms = guaranteedFixedPriceTerms, nonGuaranteedAuctionTerms = nonGuaranteedAuctionTerms,
nonGuaranteedFixedPriceTerms = nonGuaranteedFixedPriceTerms, rubiconNonGuaranteedTerms = rubiconNonGuaranteedTerms,
sellerTimeZone = sellerTimeZone), class = "gar_DealTerms")
}
#' DealTermsGuaranteedFixedPriceTerms Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param billingInfo External billing info for this Deal
#' @param fixedPrices Fixed price for the specified buyer
#' @param guaranteedImpressions Guaranteed impressions as a percentage
#' @param guaranteedLooks Count of guaranteed looks
#' @param minimumDailyLooks Count of minimum daily looks for a CPD deal
#'
#' @return DealTermsGuaranteedFixedPriceTerms object
#'
#' @family DealTermsGuaranteedFixedPriceTerms functions
#' @export
DealTermsGuaranteedFixedPriceTerms <- function(billingInfo = NULL, fixedPrices = NULL,
guaranteedImpressions = NULL, guaranteedLooks = NULL, minimumDailyLooks = NULL) {
structure(list(billingInfo = billingInfo, fixedPrices = fixedPrices, guaranteedImpressions = guaranteedImpressions,
guaranteedLooks = guaranteedLooks, minimumDailyLooks = minimumDailyLooks),
class = "gar_DealTermsGuaranteedFixedPriceTerms")
}
#' DealTermsGuaranteedFixedPriceTermsBillingInfo Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param currencyConversionTimeMs The timestamp (in ms since epoch) when the original reservation price for the deal was first converted to DFP currency
#' @param dfpLineItemId The DFP line item id associated with this deal
#' @param originalContractedQuantity The original contracted quantity (# impressions) for this deal
#' @param price The original reservation price for the deal, if the currency code is different from the one used in negotiation
#'
#' @return DealTermsGuaranteedFixedPriceTermsBillingInfo object
#'
#' @family DealTermsGuaranteedFixedPriceTermsBillingInfo functions
#' @export
DealTermsGuaranteedFixedPriceTermsBillingInfo <- function(currencyConversionTimeMs = NULL,
dfpLineItemId = NULL, originalContractedQuantity = NULL, price = NULL) {
structure(list(currencyConversionTimeMs = currencyConversionTimeMs, dfpLineItemId = dfpLineItemId,
originalContractedQuantity = originalContractedQuantity, price = price),
class = "gar_DealTermsGuaranteedFixedPriceTermsBillingInfo")
}
#' DealTermsNonGuaranteedAuctionTerms Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param autoOptimizePrivateAuction True if open auction buyers are allowed to compete with invited buyers in this private auction (buyer-readonly)
#' @param reservePricePerBuyers Reserve price for the specified buyer
#'
#' @return DealTermsNonGuaranteedAuctionTerms object
#'
#' @family DealTermsNonGuaranteedAuctionTerms functions
#' @export
DealTermsNonGuaranteedAuctionTerms <- function(autoOptimizePrivateAuction = NULL,
reservePricePerBuyers = NULL) {
structure(list(autoOptimizePrivateAuction = autoOptimizePrivateAuction, reservePricePerBuyers = reservePricePerBuyers),
class = "gar_DealTermsNonGuaranteedAuctionTerms")
}
#' DealTermsNonGuaranteedFixedPriceTerms Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param fixedPrices Fixed price for the specified buyer
#'
#' @return DealTermsNonGuaranteedFixedPriceTerms object
#'
#' @family DealTermsNonGuaranteedFixedPriceTerms functions
#' @export
DealTermsNonGuaranteedFixedPriceTerms <- function(fixedPrices = NULL) {
structure(list(fixedPrices = fixedPrices), class = "gar_DealTermsNonGuaranteedFixedPriceTerms")
}
#' DealTermsRubiconNonGuaranteedTerms Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param priorityPrice Optional price for Rubicon priority access in the auction
#' @param standardPrice Optional price for Rubicon standard access in the auction
#'
#' @return DealTermsRubiconNonGuaranteedTerms object
#'
#' @family DealTermsRubiconNonGuaranteedTerms functions
#' @export
DealTermsRubiconNonGuaranteedTerms <- function(priorityPrice = NULL, standardPrice = NULL) {
structure(list(priorityPrice = priorityPrice, standardPrice = standardPrice),
class = "gar_DealTermsRubiconNonGuaranteedTerms")
}
#' DeleteOrderDealsRequest Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param dealIds List of deals to delete for a given proposal
#' @param proposalRevisionNumber The last known proposal revision number
#' @param updateAction Indicates an optional action to take on the proposal
#'
#' @return DeleteOrderDealsRequest object
#'
#' @family DeleteOrderDealsRequest functions
#' @export
DeleteOrderDealsRequest <- function(dealIds = NULL, proposalRevisionNumber = NULL,
updateAction = NULL) {
structure(list(dealIds = dealIds, proposalRevisionNumber = proposalRevisionNumber,
updateAction = updateAction), class = "gar_DeleteOrderDealsRequest")
}
#' DeleteOrderDealsResponse Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param deals List of deals deleted (in the same proposal as passed in the request)
#' @param proposalRevisionNumber The updated revision number for the proposal
#'
#' @return DeleteOrderDealsResponse object
#'
#' @family DeleteOrderDealsResponse functions
#' @export
DeleteOrderDealsResponse <- function(deals = NULL, proposalRevisionNumber = NULL) {
structure(list(deals = deals, proposalRevisionNumber = proposalRevisionNumber),
class = "gar_DeleteOrderDealsResponse")
}
#' DeliveryControl Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param creativeBlockingLevel No description
#' @param deliveryRateType No description
#' @param frequencyCaps No description
#'
#' @return DeliveryControl object
#'
#' @family DeliveryControl functions
#' @export
DeliveryControl <- function(creativeBlockingLevel = NULL, deliveryRateType = NULL,
frequencyCaps = NULL) {
structure(list(creativeBlockingLevel = creativeBlockingLevel, deliveryRateType = deliveryRateType,
frequencyCaps = frequencyCaps), class = "gar_DeliveryControl")
}
#' DeliveryControlFrequencyCap Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param maxImpressions No description
#' @param numTimeUnits No description
#' @param timeUnitType No description
#'
#' @return DeliveryControlFrequencyCap object
#'
#' @family DeliveryControlFrequencyCap functions
#' @export
DeliveryControlFrequencyCap <- function(maxImpressions = NULL, numTimeUnits = NULL,
timeUnitType = NULL) {
structure(list(maxImpressions = maxImpressions, numTimeUnits = numTimeUnits,
timeUnitType = timeUnitType), class = "gar_DeliveryControlFrequencyCap")
}
#' Dimension Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' This message carries publisher provided breakdown. E.g. {dimension_type: 'COUNTRY', [{dimension_value: {id: 1, name: 'US'}}, {dimension_value: {id: 2, name: 'UK'}}]}
#'
#' @param dimensionType No description
#' @param dimensionValues No description
#'
#' @return Dimension object
#'
#' @family Dimension functions
#' @export
Dimension <- function(dimensionType = NULL, dimensionValues = NULL) {
structure(list(dimensionType = dimensionType, dimensionValues = dimensionValues),
class = "gar_Dimension")
}
#' DimensionDimensionValue Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' Value of the dimension.
#'
#' @param id Id of the dimension
#' @param name Name of the dimension mainly for debugging purposes, except for the case of CREATIVE_SIZE
#' @param percentage Percent of total impressions for a dimension type
#'
#' @return DimensionDimensionValue object
#'
#' @family DimensionDimensionValue functions
#' @export
DimensionDimensionValue <- function(id = NULL, name = NULL, percentage = NULL) {
structure(list(id = id, name = name, percentage = percentage), class = "gar_DimensionDimensionValue")
}
#' EditAllOrderDealsRequest Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param deals List of deals to edit
#' @param proposal If specified, also updates the proposal in the batch transaction
#' @param proposalRevisionNumber The last known revision number for the proposal
#' @param updateAction Indicates an optional action to take on the proposal
#'
#' @return EditAllOrderDealsRequest object
#'
#' @family EditAllOrderDealsRequest functions
#' @export
EditAllOrderDealsRequest <- function(deals = NULL, proposal = NULL, proposalRevisionNumber = NULL,
updateAction = NULL) {
structure(list(deals = deals, proposal = proposal, proposalRevisionNumber = proposalRevisionNumber,
updateAction = updateAction), class = "gar_EditAllOrderDealsRequest")
}
#' EditAllOrderDealsResponse Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param deals List of all deals in the proposal after edit
#' @param orderRevisionNumber The latest revision number after the update has been applied
#'
#' @return EditAllOrderDealsResponse object
#'
#' @family EditAllOrderDealsResponse functions
#' @export
EditAllOrderDealsResponse <- function(deals = NULL, orderRevisionNumber = NULL) {
structure(list(deals = deals, orderRevisionNumber = orderRevisionNumber), class = "gar_EditAllOrderDealsResponse")
}
#' GetOffersResponse Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param products The returned list of products
#'
#' @return GetOffersResponse object
#'
#' @family GetOffersResponse functions
#' @export
GetOffersResponse <- function(products = NULL) {
structure(list(products = products), class = "gar_GetOffersResponse")
}
#' GetOrderDealsResponse Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param deals List of deals for the proposal
#'
#' @return GetOrderDealsResponse object
#'
#' @family GetOrderDealsResponse functions
#' @export
GetOrderDealsResponse <- function(deals = NULL) {
structure(list(deals = deals), class = "gar_GetOrderDealsResponse")
}
#' GetOrderNotesResponse Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param notes The list of matching notes
#'
#' @return GetOrderNotesResponse object
#'
#' @family GetOrderNotesResponse functions
#' @export
GetOrderNotesResponse <- function(notes = NULL) {
structure(list(notes = notes), class = "gar_GetOrderNotesResponse")
}
#' GetOrdersResponse Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param proposals The list of matching proposals
#'
#' @return GetOrdersResponse object
#'
#' @family GetOrdersResponse functions
#' @export
GetOrdersResponse <- function(proposals = NULL) {
structure(list(proposals = proposals), class = "gar_GetOrdersResponse")
}
#' GetPublisherProfilesByAccountIdResponse Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param profiles Profiles for the requested publisher
#'
#' @return GetPublisherProfilesByAccountIdResponse object
#'
#' @family GetPublisherProfilesByAccountIdResponse functions
#' @export
GetPublisherProfilesByAccountIdResponse <- function(profiles = NULL) {
structure(list(profiles = profiles), class = "gar_GetPublisherProfilesByAccountIdResponse")
}
#' MarketplaceDeal Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' A proposal can contain multiple deals. A deal contains the terms and targeting information that is used for serving.
#'
#' @param buyerPrivateData Buyer private data (hidden from seller)
#' @param creationTimeMs The time (ms since epoch) of the deal creation
#' @param creativePreApprovalPolicy Specifies the creative pre-approval policy (buyer-readonly)
#' @param creativeSafeFrameCompatibility Specifies whether the creative is safeFrame compatible (buyer-readonly)
#' @param dealId A unique deal-id for the deal (readonly)
#' @param dealServingMetadata Metadata about the serving status of this deal (readonly, writes via custom actions)
#' @param deliveryControl The set of fields around delivery control that are interesting for a buyer to see but are non-negotiable
#' @param externalDealId The external deal id assigned to this deal once the deal is finalized
#' @param flightEndTimeMs Proposed flight end time of the deal (ms since epoch) This will generally be stored in a granularity of a second
#' @param flightStartTimeMs Proposed flight start time of the deal (ms since epoch) This will generally be stored in a granularity of a second
#' @param inventoryDescription Description for the deal terms
#' @param isRfpTemplate Indicates whether the current deal is a RFP template
#' @param lastUpdateTimeMs The time (ms since epoch) when the deal was last updated
#' @param name The name of the deal
#' @param productId The product-id from which this deal was created
#' @param productRevisionNumber The revision number of the product that the deal was created from (readonly, except on create)
#' @param programmaticCreativeSource Specifies the creative source for programmatic deals, PUBLISHER means creative is provided by seller and ADVERTISR means creative is provided by buyer
#' @param proposalId No description
#' @param sellerContacts Optional Seller contact information for the deal (buyer-readonly)
#' @param sharedTargetings The shared targeting visible to buyers and sellers
#' @param syndicationProduct The syndication product associated with the deal
#' @param terms The negotiable terms of the deal
#' @param webPropertyCode No description
#'
#' @return MarketplaceDeal object
#'
#' @family MarketplaceDeal functions
#' @export
MarketplaceDeal <- function(buyerPrivateData = NULL, creationTimeMs = NULL, creativePreApprovalPolicy = NULL,
creativeSafeFrameCompatibility = NULL, dealId = NULL, dealServingMetadata = NULL,
deliveryControl = NULL, externalDealId = NULL, flightEndTimeMs = NULL, flightStartTimeMs = NULL,
inventoryDescription = NULL, isRfpTemplate = NULL, lastUpdateTimeMs = NULL, name = NULL,
productId = NULL, productRevisionNumber = NULL, programmaticCreativeSource = NULL,
proposalId = NULL, sellerContacts = NULL, sharedTargetings = NULL, syndicationProduct = NULL,
terms = NULL, webPropertyCode = NULL) {
structure(list(buyerPrivateData = buyerPrivateData, creationTimeMs = creationTimeMs,
creativePreApprovalPolicy = creativePreApprovalPolicy, creativeSafeFrameCompatibility = creativeSafeFrameCompatibility,
dealId = dealId, dealServingMetadata = dealServingMetadata, deliveryControl = deliveryControl,
externalDealId = externalDealId, flightEndTimeMs = flightEndTimeMs, flightStartTimeMs = flightStartTimeMs,
inventoryDescription = inventoryDescription, isRfpTemplate = isRfpTemplate,
kind = `adexchangebuyer#marketplaceDeal`, lastUpdateTimeMs = lastUpdateTimeMs,
name = name, productId = productId, productRevisionNumber = productRevisionNumber,
programmaticCreativeSource = programmaticCreativeSource, proposalId = proposalId,
sellerContacts = sellerContacts, sharedTargetings = sharedTargetings, syndicationProduct = syndicationProduct,
terms = terms, webPropertyCode = webPropertyCode), class = "gar_MarketplaceDeal")
}
#' MarketplaceDealParty Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param buyer The buyer/seller associated with the deal
#' @param seller The buyer/seller associated with the deal
#'
#' @return MarketplaceDealParty object
#'
#' @family MarketplaceDealParty functions
#' @export
MarketplaceDealParty <- function(buyer = NULL, seller = NULL) {
structure(list(buyer = buyer, seller = seller), class = "gar_MarketplaceDealParty")
}
#' MarketplaceLabel Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param accountId The accountId of the party that created the label
#' @param createTimeMs The creation time (in ms since epoch) for the label
#' @param deprecatedMarketplaceDealParty Information about the party that created the label
#' @param label The label to use
#'
#' @return MarketplaceLabel object
#'
#' @family MarketplaceLabel functions
#' @export
MarketplaceLabel <- function(accountId = NULL, createTimeMs = NULL, deprecatedMarketplaceDealParty = NULL,
label = NULL) {
structure(list(accountId = accountId, createTimeMs = createTimeMs, deprecatedMarketplaceDealParty = deprecatedMarketplaceDealParty,
label = label), class = "gar_MarketplaceLabel")
}
#' MarketplaceNote Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' A proposal is associated with a bunch of notes which may optionally be associated with a deal and/or revision number.
#'
#' @param creatorRole The role of the person (buyer/seller) creating the note
#' @param dealId Notes can optionally be associated with a deal
#' @param note The actual note to attach
#' @param noteId The unique id for the note
#' @param proposalId The proposalId that a note is attached to
#' @param proposalRevisionNumber If the note is associated with a proposal revision number, then store that here
#' @param timestampMs The timestamp (ms since epoch) that this note was created
#'
#' @return MarketplaceNote object
#'
#' @family MarketplaceNote functions
#' @export
MarketplaceNote <- function(creatorRole = NULL, dealId = NULL, note = NULL, noteId = NULL,
proposalId = NULL, proposalRevisionNumber = NULL, timestampMs = NULL) {
structure(list(creatorRole = creatorRole, dealId = dealId, kind = `adexchangebuyer#marketplaceNote`,
note = note, noteId = noteId, proposalId = proposalId, proposalRevisionNumber = proposalRevisionNumber,
timestampMs = timestampMs), class = "gar_MarketplaceNote")
}
#' PerformanceReport Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' The configuration data for an Ad Exchange performance report list.
#'
#' @param bidRate The number of bid responses with an ad
#' @param bidRequestRate The number of bid requests sent to your bidder
#' @param calloutStatusRate Rate of various prefiltering statuses per match
#' @param cookieMatcherStatusRate Average QPS for cookie matcher operations
#' @param creativeStatusRate Rate of ads with a given status
#' @param filteredBidRate The number of bid responses that were filtered due to a policy violation or other errors
#' @param hostedMatchStatusRate Average QPS for hosted match operations
#' @param inventoryMatchRate The number of potential queries based on your pretargeting settings
#' @param latency50thPercentile The 50th percentile round trip latency(ms) as perceived from Google servers for the duration period covered by the report
#' @param latency85thPercentile The 85th percentile round trip latency(ms) as perceived from Google servers for the duration period covered by the report
#' @param latency95thPercentile The 95th percentile round trip latency(ms) as perceived from Google servers for the duration period covered by the report
#' @param noQuotaInRegion Rate of various quota account statuses per quota check
#' @param outOfQuota Rate of various quota account statuses per quota check
#' @param pixelMatchRequests Average QPS for pixel match requests from clients
#' @param pixelMatchResponses Average QPS for pixel match responses from clients
#' @param quotaConfiguredLimit The configured quota limits for this account
#' @param quotaThrottledLimit The throttled quota limits for this account
#' @param region The trading location of this data
#' @param successfulRequestRate The number of properly formed bid responses received by our servers within the deadline
#' @param timestamp The unix timestamp of the starting time of this performance data
#' @param unsuccessfulRequestRate The number of bid responses that were unsuccessful due to timeouts, incorrect formatting, etc
#'
#' @return PerformanceReport object
#'
#' @family PerformanceReport functions
#' @export
PerformanceReport <- function(bidRate = NULL, bidRequestRate = NULL, calloutStatusRate = NULL,
cookieMatcherStatusRate = NULL, creativeStatusRate = NULL, filteredBidRate = NULL,
hostedMatchStatusRate = NULL, inventoryMatchRate = NULL, latency50thPercentile = NULL,
latency85thPercentile = NULL, latency95thPercentile = NULL, noQuotaInRegion = NULL,
outOfQuota = NULL, pixelMatchRequests = NULL, pixelMatchResponses = NULL, quotaConfiguredLimit = NULL,
quotaThrottledLimit = NULL, region = NULL, successfulRequestRate = NULL, timestamp = NULL,
unsuccessfulRequestRate = NULL) {
structure(list(bidRate = bidRate, bidRequestRate = bidRequestRate, calloutStatusRate = calloutStatusRate,
cookieMatcherStatusRate = cookieMatcherStatusRate, creativeStatusRate = creativeStatusRate,
filteredBidRate = filteredBidRate, hostedMatchStatusRate = hostedMatchStatusRate,
inventoryMatchRate = inventoryMatchRate, kind = `adexchangebuyer#performanceReport`,
latency50thPercentile = latency50thPercentile, latency85thPercentile = latency85thPercentile,
latency95thPercentile = latency95thPercentile, noQuotaInRegion = noQuotaInRegion,
outOfQuota = outOfQuota, pixelMatchRequests = pixelMatchRequests, pixelMatchResponses = pixelMatchResponses,
quotaConfiguredLimit = quotaConfiguredLimit, quotaThrottledLimit = quotaThrottledLimit,
region = region, successfulRequestRate = successfulRequestRate, timestamp = timestamp,
unsuccessfulRequestRate = unsuccessfulRequestRate), class = "gar_PerformanceReport")
}
#' PerformanceReportList Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' The configuration data for an Ad Exchange performance report list.
#'
#' @param performanceReport A list of performance reports relevant for the account
#'
#' @return PerformanceReportList object
#'
#' @family PerformanceReportList functions
#' @export
PerformanceReportList <- function(performanceReport = NULL) {
structure(list(kind = `adexchangebuyer#performanceReportList`, performanceReport = performanceReport),
class = "gar_PerformanceReportList")
}
#' PretargetingConfig Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param PretargetingConfig.dimensions The \link{PretargetingConfig.dimensions} object or list of objects
#' @param PretargetingConfig.excludedPlacements The \link{PretargetingConfig.excludedPlacements} object or list of objects
#' @param PretargetingConfig.placements The \link{PretargetingConfig.placements} object or list of objects
#' @param PretargetingConfig.videoPlayerSizes The \link{PretargetingConfig.videoPlayerSizes} object or list of objects
#' @param billingId The id for billing purposes, provided for reference
#' @param configId The config id; generated automatically
#' @param configName The name of the config
#' @param creativeType List must contain exactly one of PRETARGETING_CREATIVE_TYPE_HTML or PRETARGETING_CREATIVE_TYPE_VIDEO
#' @param dimensions Requests which allow one of these (width, height) pairs will match
#' @param excludedContentLabels Requests with any of these content labels will not match
#' @param excludedGeoCriteriaIds Requests containing any of these geo criteria ids will not match
#' @param excludedPlacements Requests containing any of these placements will not match
#' @param excludedUserLists Requests containing any of these users list ids will not match
#' @param excludedVerticals Requests containing any of these vertical ids will not match
#' @param geoCriteriaIds Requests containing any of these geo criteria ids will match
#' @param isActive Whether this config is active
#' @param languages Request containing any of these language codes will match
#' @param minimumViewabilityDecile Requests where the predicted viewability is below the specified decile will not match
#' @param mobileCarriers Requests containing any of these mobile carrier ids will match
#' @param mobileDevices Requests containing any of these mobile device ids will match
#' @param mobileOperatingSystemVersions Requests containing any of these mobile operating system version ids will match
#' @param placements Requests containing any of these placements will match
#' @param platforms Requests matching any of these platforms will match
#' @param supportedCreativeAttributes Creative attributes should be declared here if all creatives corresponding to this pretargeting configuration have that creative attribute
#' @param userIdentifierDataRequired Requests containing the specified type of user data will match
#' @param userLists Requests containing any of these user list ids will match
#' @param vendorTypes Requests that allow any of these vendor ids will match
#' @param verticals Requests containing any of these vertical ids will match
#' @param videoPlayerSizes Video requests satisfying any of these player size constraints will match
#'
#' @return PretargetingConfig object
#'
#' @family PretargetingConfig functions
#' @export
PretargetingConfig <- function(PretargetingConfig.dimensions = NULL, PretargetingConfig.excludedPlacements = NULL,
PretargetingConfig.placements = NULL, PretargetingConfig.videoPlayerSizes = NULL,
billingId = NULL, configId = NULL, configName = NULL, creativeType = NULL, dimensions = NULL,
excludedContentLabels = NULL, excludedGeoCriteriaIds = NULL, excludedPlacements = NULL,
excludedUserLists = NULL, excludedVerticals = NULL, geoCriteriaIds = NULL, isActive = NULL,
languages = NULL, minimumViewabilityDecile = NULL, mobileCarriers = NULL, mobileDevices = NULL,
mobileOperatingSystemVersions = NULL, placements = NULL, platforms = NULL, supportedCreativeAttributes = NULL,
userIdentifierDataRequired = NULL, userLists = NULL, vendorTypes = NULL, verticals = NULL,
videoPlayerSizes = NULL) {
structure(list(PretargetingConfig.dimensions = PretargetingConfig.dimensions,
PretargetingConfig.excludedPlacements = PretargetingConfig.excludedPlacements,
PretargetingConfig.placements = PretargetingConfig.placements, PretargetingConfig.videoPlayerSizes = PretargetingConfig.videoPlayerSizes,
billingId = billingId, configId = configId, configName = configName, creativeType = creativeType,
dimensions = dimensions, excludedContentLabels = excludedContentLabels, excludedGeoCriteriaIds = excludedGeoCriteriaIds,
excludedPlacements = excludedPlacements, excludedUserLists = excludedUserLists,
excludedVerticals = excludedVerticals, geoCriteriaIds = geoCriteriaIds, isActive = isActive,
kind = `adexchangebuyer#pretargetingConfig`, languages = languages, minimumViewabilityDecile = minimumViewabilityDecile,
mobileCarriers = mobileCarriers, mobileDevices = mobileDevices, mobileOperatingSystemVersions = mobileOperatingSystemVersions,
placements = placements, platforms = platforms, supportedCreativeAttributes = supportedCreativeAttributes,
userIdentifierDataRequired = userIdentifierDataRequired, userLists = userLists,
vendorTypes = vendorTypes, verticals = verticals, videoPlayerSizes = videoPlayerSizes),
class = "gar_PretargetingConfig")
}
#' PretargetingConfig.dimensions Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#'
#'
#' @return PretargetingConfig.dimensions object
#'
#' @family PretargetingConfig functions
#' @export
PretargetingConfig.dimensions <- function() {
list()
}
#' PretargetingConfig.excludedPlacements Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#'
#'
#' @return PretargetingConfig.excludedPlacements object
#'
#' @family PretargetingConfig functions
#' @export
PretargetingConfig.excludedPlacements <- function() {
list()
}
#' PretargetingConfig.placements Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#'
#'
#' @return PretargetingConfig.placements object
#'
#' @family PretargetingConfig functions
#' @export
PretargetingConfig.placements <- function() {
list()
}
#' PretargetingConfig.videoPlayerSizes Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#'
#'
#' @return PretargetingConfig.videoPlayerSizes object
#'
#' @family PretargetingConfig functions
#' @export
PretargetingConfig.videoPlayerSizes <- function() {
list()
}
#' PretargetingConfigList Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param items A list of pretargeting configs
#'
#' @return PretargetingConfigList object
#'
#' @family PretargetingConfigList functions
#' @export
PretargetingConfigList <- function(items = NULL) {
structure(list(items = items, kind = `adexchangebuyer#pretargetingConfigList`),
class = "gar_PretargetingConfigList")
}
#' Price Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param amountMicros The price value in micros
#' @param currencyCode The currency code for the price
#' @param expectedCpmMicros In case of CPD deals, the expected CPM in micros
#' @param pricingType The pricing type for the deal/product
#'
#' @return Price object
#'
#' @family Price functions
#' @export
Price <- function(amountMicros = NULL, currencyCode = NULL, expectedCpmMicros = NULL,
pricingType = NULL) {
structure(list(amountMicros = amountMicros, currencyCode = currencyCode, expectedCpmMicros = expectedCpmMicros,
pricingType = pricingType), class = "gar_Price")
}
#' PricePerBuyer Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' Used to specify pricing rules for buyers/advertisers. Each PricePerBuyer in an product can become [0,1] deals. To check if there is a PricePerBuyer for a particular buyer or buyer/advertiser pair, we look for the most specific matching rule - we first look for a rule matching the buyer and advertiser, next a rule with the buyer but an empty advertiser list, and otherwise look for a matching rule where no buyer is set.
#'
#' @param auctionTier Optional access type for this buyer
#' @param buyer The buyer who will pay this price
#' @param price The specified price
#'
#' @return PricePerBuyer object
#'
#' @family PricePerBuyer functions
#' @export
PricePerBuyer <- function(auctionTier = NULL, buyer = NULL, price = NULL) {
structure(list(auctionTier = auctionTier, buyer = buyer, price = price), class = "gar_PricePerBuyer")
}
#' PrivateData Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param referenceId No description
#' @param referencePayload No description
#'
#' @return PrivateData object
#'
#' @family PrivateData functions
#' @export
PrivateData <- function(referenceId = NULL, referencePayload = NULL) {
structure(list(referenceId = referenceId, referencePayload = referencePayload),
class = "gar_PrivateData")
}
#' Product Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' A product is segment of inventory that a seller wishes to sell. It is associated with certain terms and targeting information which helps buyer know more about the inventory. Each field in a product can have one of the following setting:(readonly) - It is an error to try and set this field. (buyer-readonly) - Only the seller can set this field. (seller-readonly) - Only the buyer can set this field. (updatable) - The field is updatable at all times by either buyer or the seller.
#'
#' @param creationTimeMs Creation time in ms
#' @param creatorContacts Optional contact information for the creator of this product
#' @param deliveryControl The set of fields around delivery control that are interesting for a buyer to see but are non-negotiable
#' @param flightEndTimeMs The proposed end time for the deal (ms since epoch) (buyer-readonly)
#' @param flightStartTimeMs Inventory availability dates
#' @param hasCreatorSignedOff If the creator has already signed off on the product, then the buyer can finalize the deal by accepting the product as is
#' @param inventorySource What exchange will provide this inventory (readonly, except on create)
#' @param labels Optional List of labels for the product (optional, buyer-readonly)
#' @param lastUpdateTimeMs Time of last update in ms
#' @param legacyOfferId Optional legacy offer id if this offer is a preferred deal offer
#' @param marketplacePublisherProfileId Marketplace publisher profile Id
#' @param name The name for this product as set by the seller
#' @param privateAuctionId Optional private auction id if this offer is a private auction offer
#' @param productId The unique id for the product (readonly)
#' @param publisherProfileId Id of the publisher profile for a given seller
#' @param publisherProvidedForecast Publisher self-provided forecast information
#' @param revisionNumber The revision number of the product
#' @param seller Information about the seller that created this product (readonly, except on create)
#' @param sharedTargetings Targeting that is shared between the buyer and the seller
#' @param state The state of the product
#' @param syndicationProduct The syndication product associated with the deal
#' @param terms The negotiable terms of the deal (buyer-readonly)
#' @param webPropertyCode The web property code for the seller
#'
#' @return Product object
#'
#' @family Product functions
#' @export
Product <- function(creationTimeMs = NULL, creatorContacts = NULL, deliveryControl = NULL,
flightEndTimeMs = NULL, flightStartTimeMs = NULL, hasCreatorSignedOff = NULL,
inventorySource = NULL, labels = NULL, lastUpdateTimeMs = NULL, legacyOfferId = NULL,
marketplacePublisherProfileId = NULL, name = NULL, privateAuctionId = NULL, productId = NULL,
publisherProfileId = NULL, publisherProvidedForecast = NULL, revisionNumber = NULL,
seller = NULL, sharedTargetings = NULL, state = NULL, syndicationProduct = NULL,
terms = NULL, webPropertyCode = NULL) {
structure(list(creationTimeMs = creationTimeMs, creatorContacts = creatorContacts,
deliveryControl = deliveryControl, flightEndTimeMs = flightEndTimeMs, flightStartTimeMs = flightStartTimeMs,
hasCreatorSignedOff = hasCreatorSignedOff, inventorySource = inventorySource,
kind = `adexchangebuyer#product`, labels = labels, lastUpdateTimeMs = lastUpdateTimeMs,
legacyOfferId = legacyOfferId, marketplacePublisherProfileId = marketplacePublisherProfileId,
name = name, privateAuctionId = privateAuctionId, productId = productId,
publisherProfileId = publisherProfileId, publisherProvidedForecast = publisherProvidedForecast,
revisionNumber = revisionNumber, seller = seller, sharedTargetings = sharedTargetings,
state = state, syndicationProduct = syndicationProduct, terms = terms, webPropertyCode = webPropertyCode),
class = "gar_Product")
}
#' Proposal Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' Represents a proposal in the marketplace. A proposal is the unit of negotiation between a seller and a buyer and contains deals which are served. Each field in a proposal can have one of the following setting:(readonly) - It is an error to try and set this field. (buyer-readonly) - Only the seller can set this field. (seller-readonly) - Only the buyer can set this field. (updatable) - The field is updatable at all times by either buyer or the seller.
#'
#' @param billedBuyer Reference to the buyer that will get billed for this proposal
#' @param buyer Reference to the buyer on the proposal
#' @param buyerContacts Optional contact information of the buyer
#' @param buyerPrivateData Private data for buyer
#' @param dbmAdvertiserIds IDs of DBM advertisers permission to this proposal
#' @param hasBuyerSignedOff When an proposal is in an accepted state, indicates whether the buyer has signed off
#' @param hasSellerSignedOff When an proposal is in an accepted state, indicates whether the buyer has signed off Once both sides have signed off on a deal, the proposal can be finalized by the seller
#' @param inventorySource What exchange will provide this inventory (readonly, except on create)
#' @param isRenegotiating True if the proposal is being renegotiated (readonly)
#' @param isSetupComplete True, if the buyside inventory setup is complete for this proposal
#' @param labels List of labels associated with the proposal
#' @param lastUpdaterOrCommentorRole The role of the last user that either updated the proposal or left a comment
#' @param name The name for the proposal (updatable)
#' @param negotiationId Optional negotiation id if this proposal is a preferred deal proposal
#' @param originatorRole Indicates whether the buyer/seller created the proposal
#' @param privateAuctionId Optional private auction id if this proposal is a private auction proposal
#' @param proposalId The unique id of the proposal
#' @param proposalState The current state of the proposal
#' @param revisionNumber The revision number for the proposal (readonly)
#' @param revisionTimeMs The time (ms since epoch) when the proposal was last revised (readonly)
#' @param seller Reference to the seller on the proposal
#' @param sellerContacts Optional contact information of the seller (buyer-readonly)
#'
#' @return Proposal object
#'
#' @family Proposal functions
#' @export
Proposal <- function(billedBuyer = NULL, buyer = NULL, buyerContacts = NULL, buyerPrivateData = NULL,
dbmAdvertiserIds = NULL, hasBuyerSignedOff = NULL, hasSellerSignedOff = NULL,
inventorySource = NULL, isRenegotiating = NULL, isSetupComplete = NULL, labels = NULL,
lastUpdaterOrCommentorRole = NULL, name = NULL, negotiationId = NULL, originatorRole = NULL,
privateAuctionId = NULL, proposalId = NULL, proposalState = NULL, revisionNumber = NULL,
revisionTimeMs = NULL, seller = NULL, sellerContacts = NULL) {
structure(list(billedBuyer = billedBuyer, buyer = buyer, buyerContacts = buyerContacts,
buyerPrivateData = buyerPrivateData, dbmAdvertiserIds = dbmAdvertiserIds,
hasBuyerSignedOff = hasBuyerSignedOff, hasSellerSignedOff = hasSellerSignedOff,
inventorySource = inventorySource, isRenegotiating = isRenegotiating, isSetupComplete = isSetupComplete,
kind = `adexchangebuyer#proposal`, labels = labels, lastUpdaterOrCommentorRole = lastUpdaterOrCommentorRole,
name = name, negotiationId = negotiationId, originatorRole = originatorRole,
privateAuctionId = privateAuctionId, proposalId = proposalId, proposalState = proposalState,
revisionNumber = revisionNumber, revisionTimeMs = revisionTimeMs, seller = seller,
sellerContacts = sellerContacts), class = "gar_Proposal")
}
#' PublisherProfileApiProto Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param accountId The account id of the seller
#' @param audience Publisher provided info on its audience
#' @param buyerPitchStatement A pitch statement for the buyer
#' @param directContact Direct contact for the publisher profile
#' @param exchange Exchange where this publisher profile is from
#' @param googlePlusLink Link to publisher's Google+ page
#' @param isParent True, if this is the parent profile, which represents all domains owned by the publisher
#' @param isPublished True, if this profile is published
#' @param logoUrl The url to the logo for the publisher
#' @param mediaKitLink The url for additional marketing and sales materials
#' @param name No description
#' @param overview Publisher provided overview
#' @param profileId The pair of (seller
#' @param programmaticContact Programmatic contact for the publisher profile
#' @param publisherDomains The list of domains represented in this publisher profile
#' @param publisherProfileId Unique Id for publisher profile
#' @param publisherProvidedForecast Publisher provided forecasting information
#' @param rateCardInfoLink Link to publisher rate card
#' @param samplePageLink Link for a sample content page
#' @param seller Seller of the publisher profile
#' @param state State of the publisher profile
#' @param topHeadlines Publisher provided key metrics and rankings
#'
#' @return PublisherProfileApiProto object
#'
#' @family PublisherProfileApiProto functions
#' @export
PublisherProfileApiProto <- function(accountId = NULL, audience = NULL, buyerPitchStatement = NULL,
directContact = NULL, exchange = NULL, googlePlusLink = NULL, isParent = NULL,
isPublished = NULL, logoUrl = NULL, mediaKitLink = NULL, name = NULL, overview = NULL,
profileId = NULL, programmaticContact = NULL, publisherDomains = NULL, publisherProfileId = NULL,
publisherProvidedForecast = NULL, rateCardInfoLink = NULL, samplePageLink = NULL,
seller = NULL, state = NULL, topHeadlines = NULL) {
structure(list(accountId = accountId, audience = audience, buyerPitchStatement = buyerPitchStatement,
directContact = directContact, exchange = exchange, googlePlusLink = googlePlusLink,
isParent = isParent, isPublished = isPublished, kind = `adexchangebuyer#publisherProfileApiProto`,
logoUrl = logoUrl, mediaKitLink = mediaKitLink, name = name, overview = overview,
profileId = profileId, programmaticContact = programmaticContact, publisherDomains = publisherDomains,
publisherProfileId = publisherProfileId, publisherProvidedForecast = publisherProvidedForecast,
rateCardInfoLink = rateCardInfoLink, samplePageLink = samplePageLink, seller = seller,
state = state, topHeadlines = topHeadlines), class = "gar_PublisherProfileApiProto")
}
#' PublisherProvidedForecast Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' This message carries publisher provided forecasting information.
#'
#' @param dimensions Publisher provided dimensions
#' @param weeklyImpressions Publisher provided weekly impressions
#' @param weeklyUniques Publisher provided weekly uniques
#'
#' @return PublisherProvidedForecast object
#'
#' @family PublisherProvidedForecast functions
#' @export
PublisherProvidedForecast <- function(dimensions = NULL, weeklyImpressions = NULL,
weeklyUniques = NULL) {
structure(list(dimensions = dimensions, weeklyImpressions = weeklyImpressions,
weeklyUniques = weeklyUniques), class = "gar_PublisherProvidedForecast")
}
#' Seller Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param accountId The unique id for the seller
#' @param subAccountId Optional sub-account id for the seller
#'
#' @return Seller object
#'
#' @family Seller functions
#' @export
Seller <- function(accountId = NULL, subAccountId = NULL) {
structure(list(accountId = accountId, subAccountId = subAccountId), class = "gar_Seller")
}
#' SharedTargeting Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param exclusions The list of values to exclude from targeting
#' @param inclusions The list of value to include as part of the targeting
#' @param key The key representing the shared targeting criterion
#'
#' @return SharedTargeting object
#'
#' @family SharedTargeting functions
#' @export
SharedTargeting <- function(exclusions = NULL, inclusions = NULL, key = NULL) {
structure(list(exclusions = exclusions, inclusions = inclusions, key = key),
class = "gar_SharedTargeting")
}
#' TargetingValue Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param creativeSizeValue The creative size value to exclude/include
#' @param dayPartTargetingValue The daypart targeting to include / exclude
#' @param longValue The long value to exclude/include
#' @param stringValue The string value to exclude/include
#'
#' @return TargetingValue object
#'
#' @family TargetingValue functions
#' @export
TargetingValue <- function(creativeSizeValue = NULL, dayPartTargetingValue = NULL,
longValue = NULL, stringValue = NULL) {
structure(list(creativeSizeValue = creativeSizeValue, dayPartTargetingValue = dayPartTargetingValue,
longValue = longValue, stringValue = stringValue), class = "gar_TargetingValue")
}
#' TargetingValueCreativeSize Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param companionSizes For video size type, the list of companion sizes
#' @param creativeSizeType The Creative size type
#' @param size For regular or video creative size type, specifies the size of the creative
#' @param skippableAdType The skippable ad type for video size
#'
#' @return TargetingValueCreativeSize object
#'
#' @family TargetingValueCreativeSize functions
#' @export
TargetingValueCreativeSize <- function(companionSizes = NULL, creativeSizeType = NULL,
size = NULL, skippableAdType = NULL) {
structure(list(companionSizes = companionSizes, creativeSizeType = creativeSizeType,
size = size, skippableAdType = skippableAdType), class = "gar_TargetingValueCreativeSize")
}
#' TargetingValueDayPartTargeting Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param dayParts No description
#' @param timeZoneType No description
#'
#' @return TargetingValueDayPartTargeting object
#'
#' @family TargetingValueDayPartTargeting functions
#' @export
TargetingValueDayPartTargeting <- function(dayParts = NULL, timeZoneType = NULL) {
structure(list(dayParts = dayParts, timeZoneType = timeZoneType), class = "gar_TargetingValueDayPartTargeting")
}
#' TargetingValueDayPartTargetingDayPart Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param dayOfWeek No description
#' @param endHour No description
#' @param endMinute No description
#' @param startHour No description
#' @param startMinute No description
#'
#' @return TargetingValueDayPartTargetingDayPart object
#'
#' @family TargetingValueDayPartTargetingDayPart functions
#' @export
TargetingValueDayPartTargetingDayPart <- function(dayOfWeek = NULL, endHour = NULL,
endMinute = NULL, startHour = NULL, startMinute = NULL) {
structure(list(dayOfWeek = dayOfWeek, endHour = endHour, endMinute = endMinute,
startHour = startHour, startMinute = startMinute), class = "gar_TargetingValueDayPartTargetingDayPart")
}
#' TargetingValueSize Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param height The height of the creative
#' @param width The width of the creative
#'
#' @return TargetingValueSize object
#'
#' @family TargetingValueSize functions
#' @export
TargetingValueSize <- function(height = NULL, width = NULL) {
structure(list(height = height, width = width), class = "gar_TargetingValueSize")
}
#' UpdatePrivateAuctionProposalRequest Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param externalDealId The externalDealId of the deal to be updated
#' @param note Optional note to be added
#' @param proposalRevisionNumber The current revision number of the proposal to be updated
#' @param updateAction The proposed action on the private auction proposal
#'
#' @return UpdatePrivateAuctionProposalRequest object
#'
#' @family UpdatePrivateAuctionProposalRequest functions
#' @export
UpdatePrivateAuctionProposalRequest <- function(externalDealId = NULL, note = NULL,
proposalRevisionNumber = NULL, updateAction = NULL) {
structure(list(externalDealId = externalDealId, note = note, proposalRevisionNumber = proposalRevisionNumber,
updateAction = updateAction), class = "gar_UpdatePrivateAuctionProposalRequest")
}
| /B_analysts_sources_github/MarkEdmondson1234/autoGoogleAPI/adexchangebuyer_objects.R | no_license | Irbis3/crantasticScrapper | R | false | false | 77,334 | r | #' Ad Exchange Buyer API Objects
#' Accesses your bidding-account information, submits creatives for validation, finds available direct deals, and retrieves performance reports.
#'
#' Auto-generated code by googleAuthR::gar_create_api_objects
#' at 2017-03-05 19:20:29
#' filename: /Users/mark/dev/R/autoGoogleAPI/googleadexchangebuyerv14.auto/R/adexchangebuyer_objects.R
#' api_json: api_json
#'
#' Objects for use by the functions created by googleAuthR::gar_create_api_skeleton
#' Account Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' Configuration data for an Ad Exchange buyer account.
#'
#' @param Account.bidderLocation The \link{Account.bidderLocation} object or list of objects
#' @param bidderLocation Your bidder locations that have distinct URLs
#' @param cookieMatchingNid The nid parameter value used in cookie match requests
#' @param cookieMatchingUrl The base URL used in cookie match requests
#' @param id Account id
#' @param maximumActiveCreatives The maximum number of active creatives that an account can have, where a creative is active if it was inserted or bid with in the last 30 days
#' @param maximumTotalQps The sum of all bidderLocation
#' @param numberActiveCreatives The number of creatives that this account inserted or bid with in the last 30 days
#'
#' @return Account object
#'
#' @family Account functions
#' @export
Account <- function(Account.bidderLocation = NULL, bidderLocation = NULL, cookieMatchingNid = NULL,
cookieMatchingUrl = NULL, id = NULL, maximumActiveCreatives = NULL, maximumTotalQps = NULL,
numberActiveCreatives = NULL) {
structure(list(Account.bidderLocation = Account.bidderLocation, bidderLocation = bidderLocation,
cookieMatchingNid = cookieMatchingNid, cookieMatchingUrl = cookieMatchingUrl,
id = id, kind = `adexchangebuyer#account`, maximumActiveCreatives = maximumActiveCreatives,
maximumTotalQps = maximumTotalQps, numberActiveCreatives = numberActiveCreatives),
class = "gar_Account")
}
#' Account.bidderLocation Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#'
#'
#' @return Account.bidderLocation object
#'
#' @family Account functions
#' @export
Account.bidderLocation <- function() {
list()
}
#' AccountsList Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' An account feed lists Ad Exchange buyer accounts that the user has access to. Each entry in the feed corresponds to a single buyer account.
#'
#' @param items A list of accounts
#'
#' @return AccountsList object
#'
#' @family AccountsList functions
#' @export
AccountsList <- function(items = NULL) {
structure(list(items = items, kind = `adexchangebuyer#accountsList`), class = "gar_AccountsList")
}
#' AddOrderDealsRequest Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param deals The list of deals to add
#' @param proposalRevisionNumber The last known proposal revision number
#' @param updateAction Indicates an optional action to take on the proposal
#'
#' @return AddOrderDealsRequest object
#'
#' @family AddOrderDealsRequest functions
#' @export
AddOrderDealsRequest <- function(deals = NULL, proposalRevisionNumber = NULL, updateAction = NULL) {
structure(list(deals = deals, proposalRevisionNumber = proposalRevisionNumber,
updateAction = updateAction), class = "gar_AddOrderDealsRequest")
}
#' AddOrderDealsResponse Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param deals List of deals added (in the same proposal as passed in the request)
#' @param proposalRevisionNumber The updated revision number for the proposal
#'
#' @return AddOrderDealsResponse object
#'
#' @family AddOrderDealsResponse functions
#' @export
AddOrderDealsResponse <- function(deals = NULL, proposalRevisionNumber = NULL) {
structure(list(deals = deals, proposalRevisionNumber = proposalRevisionNumber),
class = "gar_AddOrderDealsResponse")
}
#' AddOrderNotesRequest Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param notes The list of notes to add
#'
#' @return AddOrderNotesRequest object
#'
#' @family AddOrderNotesRequest functions
#' @export
AddOrderNotesRequest <- function(notes = NULL) {
structure(list(notes = notes), class = "gar_AddOrderNotesRequest")
}
#' AddOrderNotesResponse Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param notes No description
#'
#' @return AddOrderNotesResponse object
#'
#' @family AddOrderNotesResponse functions
#' @export
AddOrderNotesResponse <- function(notes = NULL) {
structure(list(notes = notes), class = "gar_AddOrderNotesResponse")
}
#' BillingInfo Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' The configuration data for an Ad Exchange billing info.
#'
#' @param accountId Account id
#' @param accountName Account name
#' @param billingId A list of adgroup IDs associated with this particular account
#'
#' @return BillingInfo object
#'
#' @family BillingInfo functions
#' @export
BillingInfo <- function(accountId = NULL, accountName = NULL, billingId = NULL) {
structure(list(accountId = accountId, accountName = accountName, billingId = billingId,
kind = `adexchangebuyer#billingInfo`), class = "gar_BillingInfo")
}
#' BillingInfoList Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' A billing info feed lists Billing Info the Ad Exchange buyer account has access to. Each entry in the feed corresponds to a single billing info.
#'
#' @param items A list of billing info relevant for your account
#'
#' @return BillingInfoList object
#'
#' @family BillingInfoList functions
#' @export
BillingInfoList <- function(items = NULL) {
structure(list(items = items, kind = `adexchangebuyer#billingInfoList`), class = "gar_BillingInfoList")
}
#' Budget Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' The configuration data for Ad Exchange RTB - Budget API.
#'
#' @param accountId The id of the account
#' @param billingId The billing id to determine which adgroup to provide budget information for
#' @param budgetAmount The daily budget amount in unit amount of the account currency to apply for the billingId provided
#' @param currencyCode The currency code for the buyer
#' @param id The unique id that describes this item
#'
#' @return Budget object
#'
#' @family Budget functions
#' @export
Budget <- function(accountId = NULL, billingId = NULL, budgetAmount = NULL, currencyCode = NULL,
id = NULL) {
structure(list(accountId = accountId, billingId = billingId, budgetAmount = budgetAmount,
currencyCode = currencyCode, id = id, kind = `adexchangebuyer#budget`), class = "gar_Budget")
}
#' Buyer Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param accountId Adx account id of the buyer
#'
#' @return Buyer object
#'
#' @family Buyer functions
#' @export
Buyer <- function(accountId = NULL) {
structure(list(accountId = accountId), class = "gar_Buyer")
}
#' ContactInformation Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param email Email address of the contact
#' @param name The name of the contact
#'
#' @return ContactInformation object
#'
#' @family ContactInformation functions
#' @export
ContactInformation <- function(email = NULL, name = NULL) {
structure(list(email = email, name = name), class = "gar_ContactInformation")
}
#' CreateOrdersRequest Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param proposals The list of proposals to create
#' @param webPropertyCode Web property id of the seller creating these orders
#'
#' @return CreateOrdersRequest object
#'
#' @family CreateOrdersRequest functions
#' @export
CreateOrdersRequest <- function(proposals = NULL, webPropertyCode = NULL) {
structure(list(proposals = proposals, webPropertyCode = webPropertyCode), class = "gar_CreateOrdersRequest")
}
#' CreateOrdersResponse Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param proposals The list of proposals successfully created
#'
#' @return CreateOrdersResponse object
#'
#' @family CreateOrdersResponse functions
#' @export
CreateOrdersResponse <- function(proposals = NULL) {
structure(list(proposals = proposals), class = "gar_CreateOrdersResponse")
}
#' Creative Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' A creative and its classification data.
#'
#' @param Creative.corrections The \link{Creative.corrections} object or list of objects
#' @param Creative.corrections.contexts The \link{Creative.corrections.contexts} object or list of objects
#' @param Creative.filteringReasons The \link{Creative.filteringReasons} object or list of objects
#' @param Creative.filteringReasons.reasons The \link{Creative.filteringReasons.reasons} object or list of objects
#' @param Creative.nativeAd The \link{Creative.nativeAd} object or list of objects
#' @param Creative.nativeAd.appIcon The \link{Creative.nativeAd.appIcon} object or list of objects
#' @param Creative.nativeAd.image The \link{Creative.nativeAd.image} object or list of objects
#' @param Creative.nativeAd.logo The \link{Creative.nativeAd.logo} object or list of objects
#' @param Creative.servingRestrictions The \link{Creative.servingRestrictions} object or list of objects
#' @param Creative.servingRestrictions.contexts The \link{Creative.servingRestrictions.contexts} object or list of objects
#' @param Creative.servingRestrictions.disapprovalReasons The \link{Creative.servingRestrictions.disapprovalReasons} object or list of objects
#' @param HTMLSnippet The HTML snippet that displays the ad when inserted in the web page
#' @param accountId Account id
#' @param adChoicesDestinationUrl The link to the Ad Preferences page
#' @param advertiserId Detected advertiser id, if any
#' @param advertiserName The name of the company being advertised in the creative
#' @param agencyId The agency id for this creative
#' @param apiUploadTimestamp The last upload timestamp of this creative if it was uploaded via API
#' @param attribute List of buyer selectable attributes for the ads that may be shown from this snippet
#' @param buyerCreativeId A buyer-specific id identifying the creative in this ad
#' @param clickThroughUrl The set of destination urls for the snippet
#' @param corrections Shows any corrections that were applied to this creative
#' @param dealsStatus Top-level deals status
#' @param detectedDomains Detected domains for this creative
#' @param filteringReasons The filtering reasons for the creative
#' @param height Ad height
#' @param impressionTrackingUrl The set of urls to be called to record an impression
#' @param languages Detected languages for this creative
#' @param nativeAd If nativeAd is set, HTMLSnippet and the videoURL outside of nativeAd should not be set
#' @param openAuctionStatus Top-level open auction status
#' @param productCategories Detected product categories, if any
#' @param restrictedCategories All restricted categories for the ads that may be shown from this snippet
#' @param sensitiveCategories Detected sensitive categories, if any
#' @param servingRestrictions The granular status of this ad in specific contexts
#' @param vendorType List of vendor types for the ads that may be shown from this snippet
#' @param version The version for this creative
#' @param videoURL The URL to fetch a video ad
#' @param width Ad width
#'
#' @return Creative object
#'
#' @family Creative functions
#' @export
Creative <- function(Creative.corrections = NULL, Creative.corrections.contexts = NULL,
Creative.filteringReasons = NULL, Creative.filteringReasons.reasons = NULL, Creative.nativeAd = NULL,
Creative.nativeAd.appIcon = NULL, Creative.nativeAd.image = NULL, Creative.nativeAd.logo = NULL,
Creative.servingRestrictions = NULL, Creative.servingRestrictions.contexts = NULL,
Creative.servingRestrictions.disapprovalReasons = NULL, HTMLSnippet = NULL, accountId = NULL,
adChoicesDestinationUrl = NULL, advertiserId = NULL, advertiserName = NULL, agencyId = NULL,
apiUploadTimestamp = NULL, attribute = NULL, buyerCreativeId = NULL, clickThroughUrl = NULL,
corrections = NULL, dealsStatus = NULL, detectedDomains = NULL, filteringReasons = NULL,
height = NULL, impressionTrackingUrl = NULL, languages = NULL, nativeAd = NULL,
openAuctionStatus = NULL, productCategories = NULL, restrictedCategories = NULL,
sensitiveCategories = NULL, servingRestrictions = NULL, vendorType = NULL, version = NULL,
videoURL = NULL, width = NULL) {
structure(list(Creative.corrections = Creative.corrections, Creative.corrections.contexts = Creative.corrections.contexts,
Creative.filteringReasons = Creative.filteringReasons, Creative.filteringReasons.reasons = Creative.filteringReasons.reasons,
Creative.nativeAd = Creative.nativeAd, Creative.nativeAd.appIcon = Creative.nativeAd.appIcon,
Creative.nativeAd.image = Creative.nativeAd.image, Creative.nativeAd.logo = Creative.nativeAd.logo,
Creative.servingRestrictions = Creative.servingRestrictions, Creative.servingRestrictions.contexts = Creative.servingRestrictions.contexts,
Creative.servingRestrictions.disapprovalReasons = Creative.servingRestrictions.disapprovalReasons,
HTMLSnippet = HTMLSnippet, accountId = accountId, adChoicesDestinationUrl = adChoicesDestinationUrl,
advertiserId = advertiserId, advertiserName = advertiserName, agencyId = agencyId,
apiUploadTimestamp = apiUploadTimestamp, attribute = attribute, buyerCreativeId = buyerCreativeId,
clickThroughUrl = clickThroughUrl, corrections = corrections, dealsStatus = dealsStatus,
detectedDomains = detectedDomains, filteringReasons = filteringReasons, height = height,
impressionTrackingUrl = impressionTrackingUrl, kind = `adexchangebuyer#creative`,
languages = languages, nativeAd = nativeAd, openAuctionStatus = openAuctionStatus,
productCategories = productCategories, restrictedCategories = restrictedCategories,
sensitiveCategories = sensitiveCategories, servingRestrictions = servingRestrictions,
vendorType = vendorType, version = version, videoURL = videoURL, width = width),
class = "gar_Creative")
}
#' Creative.corrections Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param Creative.corrections.contexts The \link{Creative.corrections.contexts} object or list of objects
#'
#' @return Creative.corrections object
#'
#' @family Creative functions
#' @export
Creative.corrections <- function(Creative.corrections.contexts = NULL) {
structure(list(Creative.corrections.contexts = Creative.corrections.contexts),
class = "gar_Creative.corrections")
}
#' Creative.corrections.contexts Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#'
#'
#' @return Creative.corrections.contexts object
#'
#' @family Creative functions
#' @export
Creative.corrections.contexts <- function() {
list()
}
#' Creative.filteringReasons Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' The filtering reasons for the creative. Read-only. This field should not be set in requests.
#'
#' @param Creative.filteringReasons.reasons The \link{Creative.filteringReasons.reasons} object or list of objects
#' @param date The date in ISO 8601 format for the data
#' @param reasons The filtering reasons
#'
#' @return Creative.filteringReasons object
#'
#' @family Creative functions
#' @export
Creative.filteringReasons <- function(Creative.filteringReasons.reasons = NULL, date = NULL,
reasons = NULL) {
structure(list(Creative.filteringReasons.reasons = Creative.filteringReasons.reasons,
date = date, reasons = reasons), class = "gar_Creative.filteringReasons")
}
#' Creative.filteringReasons.reasons Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#'
#'
#' @return Creative.filteringReasons.reasons object
#'
#' @family Creative functions
#' @export
Creative.filteringReasons.reasons <- function() {
list()
}
#' Creative.nativeAd Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' If nativeAd is set, HTMLSnippet and the videoURL outside of nativeAd should not be set. (The videoURL inside nativeAd can be set.)
#'
#' @param Creative.nativeAd.appIcon The \link{Creative.nativeAd.appIcon} object or list of objects
#' @param Creative.nativeAd.image The \link{Creative.nativeAd.image} object or list of objects
#' @param Creative.nativeAd.logo The \link{Creative.nativeAd.logo} object or list of objects
#' @param advertiser No description
#' @param appIcon The app icon, for app download ads
#' @param body A long description of the ad
#' @param callToAction A label for the button that the user is supposed to click
#' @param clickLinkUrl The URL that the browser/SDK will load when the user clicks the ad
#' @param clickTrackingUrl The URL to use for click tracking
#' @param headline A short title for the ad
#' @param image A large image
#' @param impressionTrackingUrl The URLs are called when the impression is rendered
#' @param logo A smaller image, for the advertiser logo
#' @param price The price of the promoted app including the currency info
#' @param starRating The app rating in the app store
#' @param store The URL to the app store to purchase/download the promoted app
#' @param videoURL The URL of the XML VAST for a native ad
#'
#' @return Creative.nativeAd object
#'
#' @family Creative functions
#' @export
Creative.nativeAd <- function(Creative.nativeAd.appIcon = NULL, Creative.nativeAd.image = NULL,
Creative.nativeAd.logo = NULL, advertiser = NULL, appIcon = NULL, body = NULL,
callToAction = NULL, clickLinkUrl = NULL, clickTrackingUrl = NULL, headline = NULL,
image = NULL, impressionTrackingUrl = NULL, logo = NULL, price = NULL, starRating = NULL,
store = NULL, videoURL = NULL) {
structure(list(Creative.nativeAd.appIcon = Creative.nativeAd.appIcon, Creative.nativeAd.image = Creative.nativeAd.image,
Creative.nativeAd.logo = Creative.nativeAd.logo, advertiser = advertiser,
appIcon = appIcon, body = body, callToAction = callToAction, clickLinkUrl = clickLinkUrl,
clickTrackingUrl = clickTrackingUrl, headline = headline, image = image,
impressionTrackingUrl = impressionTrackingUrl, logo = logo, price = price,
starRating = starRating, store = store, videoURL = videoURL), class = "gar_Creative.nativeAd")
}
#' Creative.nativeAd.appIcon Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' The app icon, for app download ads.
#'
#' @param height No description
#' @param url No description
#' @param width No description
#'
#' @return Creative.nativeAd.appIcon object
#'
#' @family Creative functions
#' @export
Creative.nativeAd.appIcon <- function(height = NULL, url = NULL, width = NULL) {
structure(list(height = height, url = url, width = width), class = "gar_Creative.nativeAd.appIcon")
}
#' Creative.nativeAd.image Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' A large image.
#'
#' @param height No description
#' @param url No description
#' @param width No description
#'
#' @return Creative.nativeAd.image object
#'
#' @family Creative functions
#' @export
Creative.nativeAd.image <- function(height = NULL, url = NULL, width = NULL) {
structure(list(height = height, url = url, width = width), class = "gar_Creative.nativeAd.image")
}
#' Creative.nativeAd.logo Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' A smaller image, for the advertiser logo.
#'
#' @param height No description
#' @param url No description
#' @param width No description
#'
#' @return Creative.nativeAd.logo object
#'
#' @family Creative functions
#' @export
Creative.nativeAd.logo <- function(height = NULL, url = NULL, width = NULL) {
structure(list(height = height, url = url, width = width), class = "gar_Creative.nativeAd.logo")
}
#' Creative.servingRestrictions Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param Creative.servingRestrictions.contexts The \link{Creative.servingRestrictions.contexts} object or list of objects
#' @param Creative.servingRestrictions.disapprovalReasons The \link{Creative.servingRestrictions.disapprovalReasons} object or list of objects
#'
#' @return Creative.servingRestrictions object
#'
#' @family Creative functions
#' @export
Creative.servingRestrictions <- function(Creative.servingRestrictions.contexts = NULL,
Creative.servingRestrictions.disapprovalReasons = NULL) {
structure(list(Creative.servingRestrictions.contexts = Creative.servingRestrictions.contexts,
Creative.servingRestrictions.disapprovalReasons = Creative.servingRestrictions.disapprovalReasons),
class = "gar_Creative.servingRestrictions")
}
#' Creative.servingRestrictions.contexts Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#'
#'
#' @return Creative.servingRestrictions.contexts object
#'
#' @family Creative functions
#' @export
Creative.servingRestrictions.contexts <- function() {
list()
}
#' Creative.servingRestrictions.disapprovalReasons Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#'
#'
#' @return Creative.servingRestrictions.disapprovalReasons object
#'
#' @family Creative functions
#' @export
Creative.servingRestrictions.disapprovalReasons <- function() {
list()
}
#' CreativeDealIds Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' The external deal ids associated with a creative.
#'
#' @param CreativeDealIds.dealStatuses The \link{CreativeDealIds.dealStatuses} object or list of objects
#' @param dealStatuses A list of external deal ids and ARC approval status
#'
#' @return CreativeDealIds object
#'
#' @family CreativeDealIds functions
#' @export
CreativeDealIds <- function(CreativeDealIds.dealStatuses = NULL, dealStatuses = NULL) {
structure(list(CreativeDealIds.dealStatuses = CreativeDealIds.dealStatuses, dealStatuses = dealStatuses,
kind = `adexchangebuyer#creativeDealIds`), class = "gar_CreativeDealIds")
}
#' CreativeDealIds.dealStatuses Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#'
#'
#' @return CreativeDealIds.dealStatuses object
#'
#' @family CreativeDealIds functions
#' @export
CreativeDealIds.dealStatuses <- function() {
list()
}
#' CreativesList Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' The creatives feed lists the active creatives for the Ad Exchange buyer accounts that the user has access to. Each entry in the feed corresponds to a single creative.
#'
#' @param items A list of creatives
#' @param nextPageToken Continuation token used to page through creatives
#'
#' @return CreativesList object
#'
#' @family CreativesList functions
#' @export
CreativesList <- function(items = NULL, nextPageToken = NULL) {
structure(list(items = items, kind = `adexchangebuyer#creativesList`, nextPageToken = nextPageToken),
class = "gar_CreativesList")
}
#' DealServingMetadata Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param alcoholAdsAllowed True if alcohol ads are allowed for this deal (read-only)
#' @param dealPauseStatus Tracks which parties (if any) have paused a deal
#'
#' @return DealServingMetadata object
#'
#' @family DealServingMetadata functions
#' @export
DealServingMetadata <- function(alcoholAdsAllowed = NULL, dealPauseStatus = NULL) {
structure(list(alcoholAdsAllowed = alcoholAdsAllowed, dealPauseStatus = dealPauseStatus),
class = "gar_DealServingMetadata")
}
#' DealServingMetadataDealPauseStatus Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' Tracks which parties (if any) have paused a deal. The deal is considered paused if has_buyer_paused || has_seller_paused. Each of the has_buyer_paused or the has_seller_paused bits can be set independently.
#'
#' @param buyerPauseReason No description
#' @param firstPausedBy If the deal is paused, records which party paused the deal first
#' @param hasBuyerPaused No description
#' @param hasSellerPaused No description
#' @param sellerPauseReason No description
#'
#' @return DealServingMetadataDealPauseStatus object
#'
#' @family DealServingMetadataDealPauseStatus functions
#' @export
DealServingMetadataDealPauseStatus <- function(buyerPauseReason = NULL, firstPausedBy = NULL,
hasBuyerPaused = NULL, hasSellerPaused = NULL, sellerPauseReason = NULL) {
structure(list(buyerPauseReason = buyerPauseReason, firstPausedBy = firstPausedBy,
hasBuyerPaused = hasBuyerPaused, hasSellerPaused = hasSellerPaused, sellerPauseReason = sellerPauseReason),
class = "gar_DealServingMetadataDealPauseStatus")
}
#' DealTerms Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param brandingType Visibilty of the URL in bid requests
#' @param crossListedExternalDealIdType Indicates that this ExternalDealId exists under at least two different AdxInventoryDeals
#' @param description Description for the proposed terms of the deal
#' @param estimatedGrossSpend Non-binding estimate of the estimated gross spend for this deal Can be set by buyer or seller
#' @param estimatedImpressionsPerDay Non-binding estimate of the impressions served per day Can be set by buyer or seller
#' @param guaranteedFixedPriceTerms The terms for guaranteed fixed price deals
#' @param nonGuaranteedAuctionTerms The terms for non-guaranteed auction deals
#' @param nonGuaranteedFixedPriceTerms The terms for non-guaranteed fixed price deals
#' @param rubiconNonGuaranteedTerms The terms for rubicon non-guaranteed deals
#' @param sellerTimeZone For deals with Cost Per Day billing, defines the timezone used to mark the boundaries of a day (buyer-readonly)
#'
#' @return DealTerms object
#'
#' @family DealTerms functions
#' @export
DealTerms <- function(brandingType = NULL, crossListedExternalDealIdType = NULL,
description = NULL, estimatedGrossSpend = NULL, estimatedImpressionsPerDay = NULL,
guaranteedFixedPriceTerms = NULL, nonGuaranteedAuctionTerms = NULL, nonGuaranteedFixedPriceTerms = NULL,
rubiconNonGuaranteedTerms = NULL, sellerTimeZone = NULL) {
structure(list(brandingType = brandingType, crossListedExternalDealIdType = crossListedExternalDealIdType,
description = description, estimatedGrossSpend = estimatedGrossSpend, estimatedImpressionsPerDay = estimatedImpressionsPerDay,
guaranteedFixedPriceTerms = guaranteedFixedPriceTerms, nonGuaranteedAuctionTerms = nonGuaranteedAuctionTerms,
nonGuaranteedFixedPriceTerms = nonGuaranteedFixedPriceTerms, rubiconNonGuaranteedTerms = rubiconNonGuaranteedTerms,
sellerTimeZone = sellerTimeZone), class = "gar_DealTerms")
}
#' DealTermsGuaranteedFixedPriceTerms Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param billingInfo External billing info for this Deal
#' @param fixedPrices Fixed price for the specified buyer
#' @param guaranteedImpressions Guaranteed impressions as a percentage
#' @param guaranteedLooks Count of guaranteed looks
#' @param minimumDailyLooks Count of minimum daily looks for a CPD deal
#'
#' @return DealTermsGuaranteedFixedPriceTerms object
#'
#' @family DealTermsGuaranteedFixedPriceTerms functions
#' @export
DealTermsGuaranteedFixedPriceTerms <- function(billingInfo = NULL, fixedPrices = NULL,
guaranteedImpressions = NULL, guaranteedLooks = NULL, minimumDailyLooks = NULL) {
structure(list(billingInfo = billingInfo, fixedPrices = fixedPrices, guaranteedImpressions = guaranteedImpressions,
guaranteedLooks = guaranteedLooks, minimumDailyLooks = minimumDailyLooks),
class = "gar_DealTermsGuaranteedFixedPriceTerms")
}
#' DealTermsGuaranteedFixedPriceTermsBillingInfo Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param currencyConversionTimeMs The timestamp (in ms since epoch) when the original reservation price for the deal was first converted to DFP currency
#' @param dfpLineItemId The DFP line item id associated with this deal
#' @param originalContractedQuantity The original contracted quantity (# impressions) for this deal
#' @param price The original reservation price for the deal, if the currency code is different from the one used in negotiation
#'
#' @return DealTermsGuaranteedFixedPriceTermsBillingInfo object
#'
#' @family DealTermsGuaranteedFixedPriceTermsBillingInfo functions
#' @export
DealTermsGuaranteedFixedPriceTermsBillingInfo <- function(currencyConversionTimeMs = NULL,
dfpLineItemId = NULL, originalContractedQuantity = NULL, price = NULL) {
structure(list(currencyConversionTimeMs = currencyConversionTimeMs, dfpLineItemId = dfpLineItemId,
originalContractedQuantity = originalContractedQuantity, price = price),
class = "gar_DealTermsGuaranteedFixedPriceTermsBillingInfo")
}
#' DealTermsNonGuaranteedAuctionTerms Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param autoOptimizePrivateAuction True if open auction buyers are allowed to compete with invited buyers in this private auction (buyer-readonly)
#' @param reservePricePerBuyers Reserve price for the specified buyer
#'
#' @return DealTermsNonGuaranteedAuctionTerms object
#'
#' @family DealTermsNonGuaranteedAuctionTerms functions
#' @export
DealTermsNonGuaranteedAuctionTerms <- function(autoOptimizePrivateAuction = NULL,
reservePricePerBuyers = NULL) {
structure(list(autoOptimizePrivateAuction = autoOptimizePrivateAuction, reservePricePerBuyers = reservePricePerBuyers),
class = "gar_DealTermsNonGuaranteedAuctionTerms")
}
#' DealTermsNonGuaranteedFixedPriceTerms Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param fixedPrices Fixed price for the specified buyer
#'
#' @return DealTermsNonGuaranteedFixedPriceTerms object
#'
#' @family DealTermsNonGuaranteedFixedPriceTerms functions
#' @export
DealTermsNonGuaranteedFixedPriceTerms <- function(fixedPrices = NULL) {
structure(list(fixedPrices = fixedPrices), class = "gar_DealTermsNonGuaranteedFixedPriceTerms")
}
#' DealTermsRubiconNonGuaranteedTerms Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param priorityPrice Optional price for Rubicon priority access in the auction
#' @param standardPrice Optional price for Rubicon standard access in the auction
#'
#' @return DealTermsRubiconNonGuaranteedTerms object
#'
#' @family DealTermsRubiconNonGuaranteedTerms functions
#' @export
DealTermsRubiconNonGuaranteedTerms <- function(priorityPrice = NULL, standardPrice = NULL) {
structure(list(priorityPrice = priorityPrice, standardPrice = standardPrice),
class = "gar_DealTermsRubiconNonGuaranteedTerms")
}
#' DeleteOrderDealsRequest Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param dealIds List of deals to delete for a given proposal
#' @param proposalRevisionNumber The last known proposal revision number
#' @param updateAction Indicates an optional action to take on the proposal
#'
#' @return DeleteOrderDealsRequest object
#'
#' @family DeleteOrderDealsRequest functions
#' @export
DeleteOrderDealsRequest <- function(dealIds = NULL, proposalRevisionNumber = NULL,
updateAction = NULL) {
structure(list(dealIds = dealIds, proposalRevisionNumber = proposalRevisionNumber,
updateAction = updateAction), class = "gar_DeleteOrderDealsRequest")
}
#' DeleteOrderDealsResponse Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param deals List of deals deleted (in the same proposal as passed in the request)
#' @param proposalRevisionNumber The updated revision number for the proposal
#'
#' @return DeleteOrderDealsResponse object
#'
#' @family DeleteOrderDealsResponse functions
#' @export
DeleteOrderDealsResponse <- function(deals = NULL, proposalRevisionNumber = NULL) {
structure(list(deals = deals, proposalRevisionNumber = proposalRevisionNumber),
class = "gar_DeleteOrderDealsResponse")
}
#' DeliveryControl Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param creativeBlockingLevel No description
#' @param deliveryRateType No description
#' @param frequencyCaps No description
#'
#' @return DeliveryControl object
#'
#' @family DeliveryControl functions
#' @export
DeliveryControl <- function(creativeBlockingLevel = NULL, deliveryRateType = NULL,
frequencyCaps = NULL) {
structure(list(creativeBlockingLevel = creativeBlockingLevel, deliveryRateType = deliveryRateType,
frequencyCaps = frequencyCaps), class = "gar_DeliveryControl")
}
#' DeliveryControlFrequencyCap Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param maxImpressions No description
#' @param numTimeUnits No description
#' @param timeUnitType No description
#'
#' @return DeliveryControlFrequencyCap object
#'
#' @family DeliveryControlFrequencyCap functions
#' @export
DeliveryControlFrequencyCap <- function(maxImpressions = NULL, numTimeUnits = NULL,
timeUnitType = NULL) {
structure(list(maxImpressions = maxImpressions, numTimeUnits = numTimeUnits,
timeUnitType = timeUnitType), class = "gar_DeliveryControlFrequencyCap")
}
#' Dimension Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' This message carries publisher provided breakdown. E.g. {dimension_type: 'COUNTRY', [{dimension_value: {id: 1, name: 'US'}}, {dimension_value: {id: 2, name: 'UK'}}]}
#'
#' @param dimensionType No description
#' @param dimensionValues No description
#'
#' @return Dimension object
#'
#' @family Dimension functions
#' @export
Dimension <- function(dimensionType = NULL, dimensionValues = NULL) {
structure(list(dimensionType = dimensionType, dimensionValues = dimensionValues),
class = "gar_Dimension")
}
#' DimensionDimensionValue Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' Value of the dimension.
#'
#' @param id Id of the dimension
#' @param name Name of the dimension mainly for debugging purposes, except for the case of CREATIVE_SIZE
#' @param percentage Percent of total impressions for a dimension type
#'
#' @return DimensionDimensionValue object
#'
#' @family DimensionDimensionValue functions
#' @export
DimensionDimensionValue <- function(id = NULL, name = NULL, percentage = NULL) {
structure(list(id = id, name = name, percentage = percentage), class = "gar_DimensionDimensionValue")
}
#' EditAllOrderDealsRequest Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param deals List of deals to edit
#' @param proposal If specified, also updates the proposal in the batch transaction
#' @param proposalRevisionNumber The last known revision number for the proposal
#' @param updateAction Indicates an optional action to take on the proposal
#'
#' @return EditAllOrderDealsRequest object
#'
#' @family EditAllOrderDealsRequest functions
#' @export
EditAllOrderDealsRequest <- function(deals = NULL, proposal = NULL, proposalRevisionNumber = NULL,
updateAction = NULL) {
structure(list(deals = deals, proposal = proposal, proposalRevisionNumber = proposalRevisionNumber,
updateAction = updateAction), class = "gar_EditAllOrderDealsRequest")
}
#' EditAllOrderDealsResponse Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param deals List of all deals in the proposal after edit
#' @param orderRevisionNumber The latest revision number after the update has been applied
#'
#' @return EditAllOrderDealsResponse object
#'
#' @family EditAllOrderDealsResponse functions
#' @export
EditAllOrderDealsResponse <- function(deals = NULL, orderRevisionNumber = NULL) {
structure(list(deals = deals, orderRevisionNumber = orderRevisionNumber), class = "gar_EditAllOrderDealsResponse")
}
#' GetOffersResponse Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param products The returned list of products
#'
#' @return GetOffersResponse object
#'
#' @family GetOffersResponse functions
#' @export
GetOffersResponse <- function(products = NULL) {
structure(list(products = products), class = "gar_GetOffersResponse")
}
#' GetOrderDealsResponse Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param deals List of deals for the proposal
#'
#' @return GetOrderDealsResponse object
#'
#' @family GetOrderDealsResponse functions
#' @export
GetOrderDealsResponse <- function(deals = NULL) {
structure(list(deals = deals), class = "gar_GetOrderDealsResponse")
}
#' GetOrderNotesResponse Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param notes The list of matching notes
#'
#' @return GetOrderNotesResponse object
#'
#' @family GetOrderNotesResponse functions
#' @export
GetOrderNotesResponse <- function(notes = NULL) {
structure(list(notes = notes), class = "gar_GetOrderNotesResponse")
}
#' GetOrdersResponse Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param proposals The list of matching proposals
#'
#' @return GetOrdersResponse object
#'
#' @family GetOrdersResponse functions
#' @export
GetOrdersResponse <- function(proposals = NULL) {
structure(list(proposals = proposals), class = "gar_GetOrdersResponse")
}
#' GetPublisherProfilesByAccountIdResponse Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param profiles Profiles for the requested publisher
#'
#' @return GetPublisherProfilesByAccountIdResponse object
#'
#' @family GetPublisherProfilesByAccountIdResponse functions
#' @export
GetPublisherProfilesByAccountIdResponse <- function(profiles = NULL) {
structure(list(profiles = profiles), class = "gar_GetPublisherProfilesByAccountIdResponse")
}
#' MarketplaceDeal Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' A proposal can contain multiple deals. A deal contains the terms and targeting information that is used for serving.
#'
#' @param buyerPrivateData Buyer private data (hidden from seller)
#' @param creationTimeMs The time (ms since epoch) of the deal creation
#' @param creativePreApprovalPolicy Specifies the creative pre-approval policy (buyer-readonly)
#' @param creativeSafeFrameCompatibility Specifies whether the creative is safeFrame compatible (buyer-readonly)
#' @param dealId A unique deal-id for the deal (readonly)
#' @param dealServingMetadata Metadata about the serving status of this deal (readonly, writes via custom actions)
#' @param deliveryControl The set of fields around delivery control that are interesting for a buyer to see but are non-negotiable
#' @param externalDealId The external deal id assigned to this deal once the deal is finalized
#' @param flightEndTimeMs Proposed flight end time of the deal (ms since epoch) This will generally be stored in a granularity of a second
#' @param flightStartTimeMs Proposed flight start time of the deal (ms since epoch) This will generally be stored in a granularity of a second
#' @param inventoryDescription Description for the deal terms
#' @param isRfpTemplate Indicates whether the current deal is a RFP template
#' @param lastUpdateTimeMs The time (ms since epoch) when the deal was last updated
#' @param name The name of the deal
#' @param productId The product-id from which this deal was created
#' @param productRevisionNumber The revision number of the product that the deal was created from (readonly, except on create)
#' @param programmaticCreativeSource Specifies the creative source for programmatic deals, PUBLISHER means creative is provided by seller and ADVERTISR means creative is provided by buyer
#' @param proposalId No description
#' @param sellerContacts Optional Seller contact information for the deal (buyer-readonly)
#' @param sharedTargetings The shared targeting visible to buyers and sellers
#' @param syndicationProduct The syndication product associated with the deal
#' @param terms The negotiable terms of the deal
#' @param webPropertyCode No description
#'
#' @return MarketplaceDeal object
#'
#' @family MarketplaceDeal functions
#' @export
MarketplaceDeal <- function(buyerPrivateData = NULL, creationTimeMs = NULL, creativePreApprovalPolicy = NULL,
creativeSafeFrameCompatibility = NULL, dealId = NULL, dealServingMetadata = NULL,
deliveryControl = NULL, externalDealId = NULL, flightEndTimeMs = NULL, flightStartTimeMs = NULL,
inventoryDescription = NULL, isRfpTemplate = NULL, lastUpdateTimeMs = NULL, name = NULL,
productId = NULL, productRevisionNumber = NULL, programmaticCreativeSource = NULL,
proposalId = NULL, sellerContacts = NULL, sharedTargetings = NULL, syndicationProduct = NULL,
terms = NULL, webPropertyCode = NULL) {
structure(list(buyerPrivateData = buyerPrivateData, creationTimeMs = creationTimeMs,
creativePreApprovalPolicy = creativePreApprovalPolicy, creativeSafeFrameCompatibility = creativeSafeFrameCompatibility,
dealId = dealId, dealServingMetadata = dealServingMetadata, deliveryControl = deliveryControl,
externalDealId = externalDealId, flightEndTimeMs = flightEndTimeMs, flightStartTimeMs = flightStartTimeMs,
inventoryDescription = inventoryDescription, isRfpTemplate = isRfpTemplate,
kind = `adexchangebuyer#marketplaceDeal`, lastUpdateTimeMs = lastUpdateTimeMs,
name = name, productId = productId, productRevisionNumber = productRevisionNumber,
programmaticCreativeSource = programmaticCreativeSource, proposalId = proposalId,
sellerContacts = sellerContacts, sharedTargetings = sharedTargetings, syndicationProduct = syndicationProduct,
terms = terms, webPropertyCode = webPropertyCode), class = "gar_MarketplaceDeal")
}
#' MarketplaceDealParty Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param buyer The buyer/seller associated with the deal
#' @param seller The buyer/seller associated with the deal
#'
#' @return MarketplaceDealParty object
#'
#' @family MarketplaceDealParty functions
#' @export
MarketplaceDealParty <- function(buyer = NULL, seller = NULL) {
structure(list(buyer = buyer, seller = seller), class = "gar_MarketplaceDealParty")
}
#' MarketplaceLabel Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param accountId The accountId of the party that created the label
#' @param createTimeMs The creation time (in ms since epoch) for the label
#' @param deprecatedMarketplaceDealParty Information about the party that created the label
#' @param label The label to use
#'
#' @return MarketplaceLabel object
#'
#' @family MarketplaceLabel functions
#' @export
MarketplaceLabel <- function(accountId = NULL, createTimeMs = NULL, deprecatedMarketplaceDealParty = NULL,
label = NULL) {
structure(list(accountId = accountId, createTimeMs = createTimeMs, deprecatedMarketplaceDealParty = deprecatedMarketplaceDealParty,
label = label), class = "gar_MarketplaceLabel")
}
#' MarketplaceNote Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' A proposal is associated with a bunch of notes which may optionally be associated with a deal and/or revision number.
#'
#' @param creatorRole The role of the person (buyer/seller) creating the note
#' @param dealId Notes can optionally be associated with a deal
#' @param note The actual note to attach
#' @param noteId The unique id for the note
#' @param proposalId The proposalId that a note is attached to
#' @param proposalRevisionNumber If the note is associated with a proposal revision number, then store that here
#' @param timestampMs The timestamp (ms since epoch) that this note was created
#'
#' @return MarketplaceNote object
#'
#' @family MarketplaceNote functions
#' @export
MarketplaceNote <- function(creatorRole = NULL, dealId = NULL, note = NULL, noteId = NULL,
proposalId = NULL, proposalRevisionNumber = NULL, timestampMs = NULL) {
structure(list(creatorRole = creatorRole, dealId = dealId, kind = `adexchangebuyer#marketplaceNote`,
note = note, noteId = noteId, proposalId = proposalId, proposalRevisionNumber = proposalRevisionNumber,
timestampMs = timestampMs), class = "gar_MarketplaceNote")
}
#' PerformanceReport Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' The configuration data for an Ad Exchange performance report list.
#'
#' @param bidRate The number of bid responses with an ad
#' @param bidRequestRate The number of bid requests sent to your bidder
#' @param calloutStatusRate Rate of various prefiltering statuses per match
#' @param cookieMatcherStatusRate Average QPS for cookie matcher operations
#' @param creativeStatusRate Rate of ads with a given status
#' @param filteredBidRate The number of bid responses that were filtered due to a policy violation or other errors
#' @param hostedMatchStatusRate Average QPS for hosted match operations
#' @param inventoryMatchRate The number of potential queries based on your pretargeting settings
#' @param latency50thPercentile The 50th percentile round trip latency(ms) as perceived from Google servers for the duration period covered by the report
#' @param latency85thPercentile The 85th percentile round trip latency(ms) as perceived from Google servers for the duration period covered by the report
#' @param latency95thPercentile The 95th percentile round trip latency(ms) as perceived from Google servers for the duration period covered by the report
#' @param noQuotaInRegion Rate of various quota account statuses per quota check
#' @param outOfQuota Rate of various quota account statuses per quota check
#' @param pixelMatchRequests Average QPS for pixel match requests from clients
#' @param pixelMatchResponses Average QPS for pixel match responses from clients
#' @param quotaConfiguredLimit The configured quota limits for this account
#' @param quotaThrottledLimit The throttled quota limits for this account
#' @param region The trading location of this data
#' @param successfulRequestRate The number of properly formed bid responses received by our servers within the deadline
#' @param timestamp The unix timestamp of the starting time of this performance data
#' @param unsuccessfulRequestRate The number of bid responses that were unsuccessful due to timeouts, incorrect formatting, etc
#'
#' @return PerformanceReport object
#'
#' @family PerformanceReport functions
#' @export
PerformanceReport <- function(bidRate = NULL, bidRequestRate = NULL, calloutStatusRate = NULL,
cookieMatcherStatusRate = NULL, creativeStatusRate = NULL, filteredBidRate = NULL,
hostedMatchStatusRate = NULL, inventoryMatchRate = NULL, latency50thPercentile = NULL,
latency85thPercentile = NULL, latency95thPercentile = NULL, noQuotaInRegion = NULL,
outOfQuota = NULL, pixelMatchRequests = NULL, pixelMatchResponses = NULL, quotaConfiguredLimit = NULL,
quotaThrottledLimit = NULL, region = NULL, successfulRequestRate = NULL, timestamp = NULL,
unsuccessfulRequestRate = NULL) {
structure(list(bidRate = bidRate, bidRequestRate = bidRequestRate, calloutStatusRate = calloutStatusRate,
cookieMatcherStatusRate = cookieMatcherStatusRate, creativeStatusRate = creativeStatusRate,
filteredBidRate = filteredBidRate, hostedMatchStatusRate = hostedMatchStatusRate,
inventoryMatchRate = inventoryMatchRate, kind = `adexchangebuyer#performanceReport`,
latency50thPercentile = latency50thPercentile, latency85thPercentile = latency85thPercentile,
latency95thPercentile = latency95thPercentile, noQuotaInRegion = noQuotaInRegion,
outOfQuota = outOfQuota, pixelMatchRequests = pixelMatchRequests, pixelMatchResponses = pixelMatchResponses,
quotaConfiguredLimit = quotaConfiguredLimit, quotaThrottledLimit = quotaThrottledLimit,
region = region, successfulRequestRate = successfulRequestRate, timestamp = timestamp,
unsuccessfulRequestRate = unsuccessfulRequestRate), class = "gar_PerformanceReport")
}
#' PerformanceReportList Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' The configuration data for an Ad Exchange performance report list.
#'
#' @param performanceReport A list of performance reports relevant for the account
#'
#' @return PerformanceReportList object
#'
#' @family PerformanceReportList functions
#' @export
PerformanceReportList <- function(performanceReport = NULL) {
structure(list(kind = `adexchangebuyer#performanceReportList`, performanceReport = performanceReport),
class = "gar_PerformanceReportList")
}
#' PretargetingConfig Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param PretargetingConfig.dimensions The \link{PretargetingConfig.dimensions} object or list of objects
#' @param PretargetingConfig.excludedPlacements The \link{PretargetingConfig.excludedPlacements} object or list of objects
#' @param PretargetingConfig.placements The \link{PretargetingConfig.placements} object or list of objects
#' @param PretargetingConfig.videoPlayerSizes The \link{PretargetingConfig.videoPlayerSizes} object or list of objects
#' @param billingId The id for billing purposes, provided for reference
#' @param configId The config id; generated automatically
#' @param configName The name of the config
#' @param creativeType List must contain exactly one of PRETARGETING_CREATIVE_TYPE_HTML or PRETARGETING_CREATIVE_TYPE_VIDEO
#' @param dimensions Requests which allow one of these (width, height) pairs will match
#' @param excludedContentLabels Requests with any of these content labels will not match
#' @param excludedGeoCriteriaIds Requests containing any of these geo criteria ids will not match
#' @param excludedPlacements Requests containing any of these placements will not match
#' @param excludedUserLists Requests containing any of these users list ids will not match
#' @param excludedVerticals Requests containing any of these vertical ids will not match
#' @param geoCriteriaIds Requests containing any of these geo criteria ids will match
#' @param isActive Whether this config is active
#' @param languages Request containing any of these language codes will match
#' @param minimumViewabilityDecile Requests where the predicted viewability is below the specified decile will not match
#' @param mobileCarriers Requests containing any of these mobile carrier ids will match
#' @param mobileDevices Requests containing any of these mobile device ids will match
#' @param mobileOperatingSystemVersions Requests containing any of these mobile operating system version ids will match
#' @param placements Requests containing any of these placements will match
#' @param platforms Requests matching any of these platforms will match
#' @param supportedCreativeAttributes Creative attributes should be declared here if all creatives corresponding to this pretargeting configuration have that creative attribute
#' @param userIdentifierDataRequired Requests containing the specified type of user data will match
#' @param userLists Requests containing any of these user list ids will match
#' @param vendorTypes Requests that allow any of these vendor ids will match
#' @param verticals Requests containing any of these vertical ids will match
#' @param videoPlayerSizes Video requests satisfying any of these player size constraints will match
#'
#' @return PretargetingConfig object
#'
#' @family PretargetingConfig functions
#' @export
PretargetingConfig <- function(PretargetingConfig.dimensions = NULL, PretargetingConfig.excludedPlacements = NULL,
PretargetingConfig.placements = NULL, PretargetingConfig.videoPlayerSizes = NULL,
billingId = NULL, configId = NULL, configName = NULL, creativeType = NULL, dimensions = NULL,
excludedContentLabels = NULL, excludedGeoCriteriaIds = NULL, excludedPlacements = NULL,
excludedUserLists = NULL, excludedVerticals = NULL, geoCriteriaIds = NULL, isActive = NULL,
languages = NULL, minimumViewabilityDecile = NULL, mobileCarriers = NULL, mobileDevices = NULL,
mobileOperatingSystemVersions = NULL, placements = NULL, platforms = NULL, supportedCreativeAttributes = NULL,
userIdentifierDataRequired = NULL, userLists = NULL, vendorTypes = NULL, verticals = NULL,
videoPlayerSizes = NULL) {
structure(list(PretargetingConfig.dimensions = PretargetingConfig.dimensions,
PretargetingConfig.excludedPlacements = PretargetingConfig.excludedPlacements,
PretargetingConfig.placements = PretargetingConfig.placements, PretargetingConfig.videoPlayerSizes = PretargetingConfig.videoPlayerSizes,
billingId = billingId, configId = configId, configName = configName, creativeType = creativeType,
dimensions = dimensions, excludedContentLabels = excludedContentLabels, excludedGeoCriteriaIds = excludedGeoCriteriaIds,
excludedPlacements = excludedPlacements, excludedUserLists = excludedUserLists,
excludedVerticals = excludedVerticals, geoCriteriaIds = geoCriteriaIds, isActive = isActive,
kind = `adexchangebuyer#pretargetingConfig`, languages = languages, minimumViewabilityDecile = minimumViewabilityDecile,
mobileCarriers = mobileCarriers, mobileDevices = mobileDevices, mobileOperatingSystemVersions = mobileOperatingSystemVersions,
placements = placements, platforms = platforms, supportedCreativeAttributes = supportedCreativeAttributes,
userIdentifierDataRequired = userIdentifierDataRequired, userLists = userLists,
vendorTypes = vendorTypes, verticals = verticals, videoPlayerSizes = videoPlayerSizes),
class = "gar_PretargetingConfig")
}
#' PretargetingConfig.dimensions Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#'
#'
#' @return PretargetingConfig.dimensions object
#'
#' @family PretargetingConfig functions
#' @export
PretargetingConfig.dimensions <- function() {
list()
}
#' PretargetingConfig.excludedPlacements Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#'
#'
#' @return PretargetingConfig.excludedPlacements object
#'
#' @family PretargetingConfig functions
#' @export
PretargetingConfig.excludedPlacements <- function() {
list()
}
#' PretargetingConfig.placements Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#'
#'
#' @return PretargetingConfig.placements object
#'
#' @family PretargetingConfig functions
#' @export
PretargetingConfig.placements <- function() {
list()
}
#' PretargetingConfig.videoPlayerSizes Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#'
#'
#' @return PretargetingConfig.videoPlayerSizes object
#'
#' @family PretargetingConfig functions
#' @export
PretargetingConfig.videoPlayerSizes <- function() {
list()
}
#' PretargetingConfigList Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param items A list of pretargeting configs
#'
#' @return PretargetingConfigList object
#'
#' @family PretargetingConfigList functions
#' @export
PretargetingConfigList <- function(items = NULL) {
structure(list(items = items, kind = `adexchangebuyer#pretargetingConfigList`),
class = "gar_PretargetingConfigList")
}
#' Price Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param amountMicros The price value in micros
#' @param currencyCode The currency code for the price
#' @param expectedCpmMicros In case of CPD deals, the expected CPM in micros
#' @param pricingType The pricing type for the deal/product
#'
#' @return Price object
#'
#' @family Price functions
#' @export
Price <- function(amountMicros = NULL, currencyCode = NULL, expectedCpmMicros = NULL,
pricingType = NULL) {
structure(list(amountMicros = amountMicros, currencyCode = currencyCode, expectedCpmMicros = expectedCpmMicros,
pricingType = pricingType), class = "gar_Price")
}
#' PricePerBuyer Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' Used to specify pricing rules for buyers/advertisers. Each PricePerBuyer in an product can become [0,1] deals. To check if there is a PricePerBuyer for a particular buyer or buyer/advertiser pair, we look for the most specific matching rule - we first look for a rule matching the buyer and advertiser, next a rule with the buyer but an empty advertiser list, and otherwise look for a matching rule where no buyer is set.
#'
#' @param auctionTier Optional access type for this buyer
#' @param buyer The buyer who will pay this price
#' @param price The specified price
#'
#' @return PricePerBuyer object
#'
#' @family PricePerBuyer functions
#' @export
PricePerBuyer <- function(auctionTier = NULL, buyer = NULL, price = NULL) {
structure(list(auctionTier = auctionTier, buyer = buyer, price = price), class = "gar_PricePerBuyer")
}
#' PrivateData Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param referenceId No description
#' @param referencePayload No description
#'
#' @return PrivateData object
#'
#' @family PrivateData functions
#' @export
PrivateData <- function(referenceId = NULL, referencePayload = NULL) {
structure(list(referenceId = referenceId, referencePayload = referencePayload),
class = "gar_PrivateData")
}
#' Product Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' A product is segment of inventory that a seller wishes to sell. It is associated with certain terms and targeting information which helps buyer know more about the inventory. Each field in a product can have one of the following setting:(readonly) - It is an error to try and set this field. (buyer-readonly) - Only the seller can set this field. (seller-readonly) - Only the buyer can set this field. (updatable) - The field is updatable at all times by either buyer or the seller.
#'
#' @param creationTimeMs Creation time in ms
#' @param creatorContacts Optional contact information for the creator of this product
#' @param deliveryControl The set of fields around delivery control that are interesting for a buyer to see but are non-negotiable
#' @param flightEndTimeMs The proposed end time for the deal (ms since epoch) (buyer-readonly)
#' @param flightStartTimeMs Inventory availability dates
#' @param hasCreatorSignedOff If the creator has already signed off on the product, then the buyer can finalize the deal by accepting the product as is
#' @param inventorySource What exchange will provide this inventory (readonly, except on create)
#' @param labels Optional List of labels for the product (optional, buyer-readonly)
#' @param lastUpdateTimeMs Time of last update in ms
#' @param legacyOfferId Optional legacy offer id if this offer is a preferred deal offer
#' @param marketplacePublisherProfileId Marketplace publisher profile Id
#' @param name The name for this product as set by the seller
#' @param privateAuctionId Optional private auction id if this offer is a private auction offer
#' @param productId The unique id for the product (readonly)
#' @param publisherProfileId Id of the publisher profile for a given seller
#' @param publisherProvidedForecast Publisher self-provided forecast information
#' @param revisionNumber The revision number of the product
#' @param seller Information about the seller that created this product (readonly, except on create)
#' @param sharedTargetings Targeting that is shared between the buyer and the seller
#' @param state The state of the product
#' @param syndicationProduct The syndication product associated with the deal
#' @param terms The negotiable terms of the deal (buyer-readonly)
#' @param webPropertyCode The web property code for the seller
#'
#' @return Product object
#'
#' @family Product functions
#' @export
Product <- function(creationTimeMs = NULL, creatorContacts = NULL, deliveryControl = NULL,
flightEndTimeMs = NULL, flightStartTimeMs = NULL, hasCreatorSignedOff = NULL,
inventorySource = NULL, labels = NULL, lastUpdateTimeMs = NULL, legacyOfferId = NULL,
marketplacePublisherProfileId = NULL, name = NULL, privateAuctionId = NULL, productId = NULL,
publisherProfileId = NULL, publisherProvidedForecast = NULL, revisionNumber = NULL,
seller = NULL, sharedTargetings = NULL, state = NULL, syndicationProduct = NULL,
terms = NULL, webPropertyCode = NULL) {
structure(list(creationTimeMs = creationTimeMs, creatorContacts = creatorContacts,
deliveryControl = deliveryControl, flightEndTimeMs = flightEndTimeMs, flightStartTimeMs = flightStartTimeMs,
hasCreatorSignedOff = hasCreatorSignedOff, inventorySource = inventorySource,
kind = `adexchangebuyer#product`, labels = labels, lastUpdateTimeMs = lastUpdateTimeMs,
legacyOfferId = legacyOfferId, marketplacePublisherProfileId = marketplacePublisherProfileId,
name = name, privateAuctionId = privateAuctionId, productId = productId,
publisherProfileId = publisherProfileId, publisherProvidedForecast = publisherProvidedForecast,
revisionNumber = revisionNumber, seller = seller, sharedTargetings = sharedTargetings,
state = state, syndicationProduct = syndicationProduct, terms = terms, webPropertyCode = webPropertyCode),
class = "gar_Product")
}
#' Proposal Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' Represents a proposal in the marketplace. A proposal is the unit of negotiation between a seller and a buyer and contains deals which are served. Each field in a proposal can have one of the following setting:(readonly) - It is an error to try and set this field. (buyer-readonly) - Only the seller can set this field. (seller-readonly) - Only the buyer can set this field. (updatable) - The field is updatable at all times by either buyer or the seller.
#'
#' @param billedBuyer Reference to the buyer that will get billed for this proposal
#' @param buyer Reference to the buyer on the proposal
#' @param buyerContacts Optional contact information of the buyer
#' @param buyerPrivateData Private data for buyer
#' @param dbmAdvertiserIds IDs of DBM advertisers permission to this proposal
#' @param hasBuyerSignedOff When an proposal is in an accepted state, indicates whether the buyer has signed off
#' @param hasSellerSignedOff When an proposal is in an accepted state, indicates whether the buyer has signed off Once both sides have signed off on a deal, the proposal can be finalized by the seller
#' @param inventorySource What exchange will provide this inventory (readonly, except on create)
#' @param isRenegotiating True if the proposal is being renegotiated (readonly)
#' @param isSetupComplete True, if the buyside inventory setup is complete for this proposal
#' @param labels List of labels associated with the proposal
#' @param lastUpdaterOrCommentorRole The role of the last user that either updated the proposal or left a comment
#' @param name The name for the proposal (updatable)
#' @param negotiationId Optional negotiation id if this proposal is a preferred deal proposal
#' @param originatorRole Indicates whether the buyer/seller created the proposal
#' @param privateAuctionId Optional private auction id if this proposal is a private auction proposal
#' @param proposalId The unique id of the proposal
#' @param proposalState The current state of the proposal
#' @param revisionNumber The revision number for the proposal (readonly)
#' @param revisionTimeMs The time (ms since epoch) when the proposal was last revised (readonly)
#' @param seller Reference to the seller on the proposal
#' @param sellerContacts Optional contact information of the seller (buyer-readonly)
#'
#' @return Proposal object
#'
#' @family Proposal functions
#' @export
Proposal <- function(billedBuyer = NULL, buyer = NULL, buyerContacts = NULL, buyerPrivateData = NULL,
dbmAdvertiserIds = NULL, hasBuyerSignedOff = NULL, hasSellerSignedOff = NULL,
inventorySource = NULL, isRenegotiating = NULL, isSetupComplete = NULL, labels = NULL,
lastUpdaterOrCommentorRole = NULL, name = NULL, negotiationId = NULL, originatorRole = NULL,
privateAuctionId = NULL, proposalId = NULL, proposalState = NULL, revisionNumber = NULL,
revisionTimeMs = NULL, seller = NULL, sellerContacts = NULL) {
structure(list(billedBuyer = billedBuyer, buyer = buyer, buyerContacts = buyerContacts,
buyerPrivateData = buyerPrivateData, dbmAdvertiserIds = dbmAdvertiserIds,
hasBuyerSignedOff = hasBuyerSignedOff, hasSellerSignedOff = hasSellerSignedOff,
inventorySource = inventorySource, isRenegotiating = isRenegotiating, isSetupComplete = isSetupComplete,
kind = `adexchangebuyer#proposal`, labels = labels, lastUpdaterOrCommentorRole = lastUpdaterOrCommentorRole,
name = name, negotiationId = negotiationId, originatorRole = originatorRole,
privateAuctionId = privateAuctionId, proposalId = proposalId, proposalState = proposalState,
revisionNumber = revisionNumber, revisionTimeMs = revisionTimeMs, seller = seller,
sellerContacts = sellerContacts), class = "gar_Proposal")
}
#' PublisherProfileApiProto Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param accountId The account id of the seller
#' @param audience Publisher provided info on its audience
#' @param buyerPitchStatement A pitch statement for the buyer
#' @param directContact Direct contact for the publisher profile
#' @param exchange Exchange where this publisher profile is from
#' @param googlePlusLink Link to publisher's Google+ page
#' @param isParent True, if this is the parent profile, which represents all domains owned by the publisher
#' @param isPublished True, if this profile is published
#' @param logoUrl The url to the logo for the publisher
#' @param mediaKitLink The url for additional marketing and sales materials
#' @param name No description
#' @param overview Publisher provided overview
#' @param profileId The pair of (seller
#' @param programmaticContact Programmatic contact for the publisher profile
#' @param publisherDomains The list of domains represented in this publisher profile
#' @param publisherProfileId Unique Id for publisher profile
#' @param publisherProvidedForecast Publisher provided forecasting information
#' @param rateCardInfoLink Link to publisher rate card
#' @param samplePageLink Link for a sample content page
#' @param seller Seller of the publisher profile
#' @param state State of the publisher profile
#' @param topHeadlines Publisher provided key metrics and rankings
#'
#' @return PublisherProfileApiProto object
#'
#' @family PublisherProfileApiProto functions
#' @export
PublisherProfileApiProto <- function(accountId = NULL, audience = NULL, buyerPitchStatement = NULL,
directContact = NULL, exchange = NULL, googlePlusLink = NULL, isParent = NULL,
isPublished = NULL, logoUrl = NULL, mediaKitLink = NULL, name = NULL, overview = NULL,
profileId = NULL, programmaticContact = NULL, publisherDomains = NULL, publisherProfileId = NULL,
publisherProvidedForecast = NULL, rateCardInfoLink = NULL, samplePageLink = NULL,
seller = NULL, state = NULL, topHeadlines = NULL) {
structure(list(accountId = accountId, audience = audience, buyerPitchStatement = buyerPitchStatement,
directContact = directContact, exchange = exchange, googlePlusLink = googlePlusLink,
isParent = isParent, isPublished = isPublished, kind = `adexchangebuyer#publisherProfileApiProto`,
logoUrl = logoUrl, mediaKitLink = mediaKitLink, name = name, overview = overview,
profileId = profileId, programmaticContact = programmaticContact, publisherDomains = publisherDomains,
publisherProfileId = publisherProfileId, publisherProvidedForecast = publisherProvidedForecast,
rateCardInfoLink = rateCardInfoLink, samplePageLink = samplePageLink, seller = seller,
state = state, topHeadlines = topHeadlines), class = "gar_PublisherProfileApiProto")
}
#' PublisherProvidedForecast Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' This message carries publisher provided forecasting information.
#'
#' @param dimensions Publisher provided dimensions
#' @param weeklyImpressions Publisher provided weekly impressions
#' @param weeklyUniques Publisher provided weekly uniques
#'
#' @return PublisherProvidedForecast object
#'
#' @family PublisherProvidedForecast functions
#' @export
PublisherProvidedForecast <- function(dimensions = NULL, weeklyImpressions = NULL,
weeklyUniques = NULL) {
structure(list(dimensions = dimensions, weeklyImpressions = weeklyImpressions,
weeklyUniques = weeklyUniques), class = "gar_PublisherProvidedForecast")
}
#' Seller Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param accountId The unique id for the seller
#' @param subAccountId Optional sub-account id for the seller
#'
#' @return Seller object
#'
#' @family Seller functions
#' @export
Seller <- function(accountId = NULL, subAccountId = NULL) {
structure(list(accountId = accountId, subAccountId = subAccountId), class = "gar_Seller")
}
#' SharedTargeting Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param exclusions The list of values to exclude from targeting
#' @param inclusions The list of value to include as part of the targeting
#' @param key The key representing the shared targeting criterion
#'
#' @return SharedTargeting object
#'
#' @family SharedTargeting functions
#' @export
SharedTargeting <- function(exclusions = NULL, inclusions = NULL, key = NULL) {
structure(list(exclusions = exclusions, inclusions = inclusions, key = key),
class = "gar_SharedTargeting")
}
#' TargetingValue Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param creativeSizeValue The creative size value to exclude/include
#' @param dayPartTargetingValue The daypart targeting to include / exclude
#' @param longValue The long value to exclude/include
#' @param stringValue The string value to exclude/include
#'
#' @return TargetingValue object
#'
#' @family TargetingValue functions
#' @export
TargetingValue <- function(creativeSizeValue = NULL, dayPartTargetingValue = NULL,
longValue = NULL, stringValue = NULL) {
structure(list(creativeSizeValue = creativeSizeValue, dayPartTargetingValue = dayPartTargetingValue,
longValue = longValue, stringValue = stringValue), class = "gar_TargetingValue")
}
#' TargetingValueCreativeSize Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param companionSizes For video size type, the list of companion sizes
#' @param creativeSizeType The Creative size type
#' @param size For regular or video creative size type, specifies the size of the creative
#' @param skippableAdType The skippable ad type for video size
#'
#' @return TargetingValueCreativeSize object
#'
#' @family TargetingValueCreativeSize functions
#' @export
TargetingValueCreativeSize <- function(companionSizes = NULL, creativeSizeType = NULL,
size = NULL, skippableAdType = NULL) {
structure(list(companionSizes = companionSizes, creativeSizeType = creativeSizeType,
size = size, skippableAdType = skippableAdType), class = "gar_TargetingValueCreativeSize")
}
#' TargetingValueDayPartTargeting Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param dayParts No description
#' @param timeZoneType No description
#'
#' @return TargetingValueDayPartTargeting object
#'
#' @family TargetingValueDayPartTargeting functions
#' @export
TargetingValueDayPartTargeting <- function(dayParts = NULL, timeZoneType = NULL) {
structure(list(dayParts = dayParts, timeZoneType = timeZoneType), class = "gar_TargetingValueDayPartTargeting")
}
#' TargetingValueDayPartTargetingDayPart Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param dayOfWeek No description
#' @param endHour No description
#' @param endMinute No description
#' @param startHour No description
#' @param startMinute No description
#'
#' @return TargetingValueDayPartTargetingDayPart object
#'
#' @family TargetingValueDayPartTargetingDayPart functions
#' @export
TargetingValueDayPartTargetingDayPart <- function(dayOfWeek = NULL, endHour = NULL,
endMinute = NULL, startHour = NULL, startMinute = NULL) {
structure(list(dayOfWeek = dayOfWeek, endHour = endHour, endMinute = endMinute,
startHour = startHour, startMinute = startMinute), class = "gar_TargetingValueDayPartTargetingDayPart")
}
#' TargetingValueSize Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param height The height of the creative
#' @param width The width of the creative
#'
#' @return TargetingValueSize object
#'
#' @family TargetingValueSize functions
#' @export
TargetingValueSize <- function(height = NULL, width = NULL) {
structure(list(height = height, width = width), class = "gar_TargetingValueSize")
}
#' UpdatePrivateAuctionProposalRequest Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param externalDealId The externalDealId of the deal to be updated
#' @param note Optional note to be added
#' @param proposalRevisionNumber The current revision number of the proposal to be updated
#' @param updateAction The proposed action on the private auction proposal
#'
#' @return UpdatePrivateAuctionProposalRequest object
#'
#' @family UpdatePrivateAuctionProposalRequest functions
#' @export
UpdatePrivateAuctionProposalRequest <- function(externalDealId = NULL, note = NULL,
proposalRevisionNumber = NULL, updateAction = NULL) {
structure(list(externalDealId = externalDealId, note = note, proposalRevisionNumber = proposalRevisionNumber,
updateAction = updateAction), class = "gar_UpdatePrivateAuctionProposalRequest")
}
|
unormalize <- function(x, form = c("NFKC", "NFC", "NFKD", "NFD"), encoding = "utf8") {
form <- switch(match.arg(form), NFD = 0L, NFKD = 1L, NFC = 2L, NFKC = 3L)
if (class(x) == "character") {
return(.Call("normalize", x, form, encoding))
} else if (class(x) == "factor") {
levels(x) <- .Call("normalize", levels(x), form, encoding)
return(x)
} else {
stop("`x` should be either 'character' or 'factor'!")
}
}
| /R/unormalize.R | no_license | abicky/RUnicode | R | false | false | 461 | r | unormalize <- function(x, form = c("NFKC", "NFC", "NFKD", "NFD"), encoding = "utf8") {
form <- switch(match.arg(form), NFD = 0L, NFKD = 1L, NFC = 2L, NFKC = 3L)
if (class(x) == "character") {
return(.Call("normalize", x, form, encoding))
} else if (class(x) == "factor") {
levels(x) <- .Call("normalize", levels(x), form, encoding)
return(x)
} else {
stop("`x` should be either 'character' or 'factor'!")
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/opts.R
\name{opt_table_lines}
\alias{opt_table_lines}
\title{Option to set table lines to different extents}
\usage{
opt_table_lines(data, extent = c("all", "none", "default"))
}
\arguments{
\item{data}{\emph{The gt table data object}
\verb{obj:<gt_tbl>} // \strong{required}
This is the \strong{gt} table object that is commonly created through use of the
\code{\link[=gt]{gt()}} function.}
\item{extent}{\emph{Extent of lines added}
\verb{singl-kw:[all|none|default]} // \emph{default:} \code{"all"}
The extent to which lines will be visible in the table. Options are
\code{"all"}, \code{"none"}, or \code{"default"}.}
}
\value{
An object of class \code{gt_tbl}.
}
\description{
The \code{opt_table_lines()} function sets table lines in one of three possible
ways: (1) all possible table lines drawn (\code{"all"}), (2) no table lines at all
(\code{"none"}), and (3) resetting to the default line styles (\code{"default"}). This
is great if you want to start off with lots of lines and subtract just a few
of them with \code{\link[=tab_options]{tab_options()}} or \code{\link[=tab_style]{tab_style()}}. Or, use it to start with a
completely lineless table, adding individual lines as needed.
}
\section{Examples}{
Use the \code{\link{exibble}} dataset to create a \strong{gt} table with a number of table
parts added (using functions like \code{\link[=summary_rows]{summary_rows()}}, \code{\link[=grand_summary_rows]{grand_summary_rows()}},
and more). Following that, we'll use the \code{opt_table_lines()} function to
generate lines everywhere there can possibly be lines (the default for the
\code{extent} argument is \code{"all"}).
\if{html}{\out{<div class="sourceCode r">}}\preformatted{exibble |>
gt(rowname_col = "row", groupname_col = "group") |>
summary_rows(
groups = "grp_a",
columns = c(num, currency),
fns = c("min", "max")
) |>
grand_summary_rows(
columns = currency,
fns = total ~ sum(., na.rm = TRUE)
) |>
tab_source_note(source_note = "This is a source note.") |>
tab_footnote(
footnote = "This is a footnote.",
locations = cells_body(columns = 1, rows = 1)
) |>
tab_header(
title = "The title of the table",
subtitle = "The table's subtitle"
) |>
opt_table_lines()
}\if{html}{\out{</div>}}
\if{html}{\out{
<img src="https://raw.githubusercontent.com/rstudio/gt/master/images/man_opt_table_lines_1.png" alt="This image of a table was generated from the first code example in the `opt_table_lines()` help file." style="width:100\%;">
}}
}
\section{Function ID}{
10-10
}
\section{Function Introduced}{
\code{v0.2.0.5} (March 31, 2020)
}
\seealso{
Other table option functions:
\code{\link{opt_align_table_header}()},
\code{\link{opt_all_caps}()},
\code{\link{opt_css}()},
\code{\link{opt_footnote_marks}()},
\code{\link{opt_footnote_spec}()},
\code{\link{opt_horizontal_padding}()},
\code{\link{opt_interactive}()},
\code{\link{opt_row_striping}()},
\code{\link{opt_stylize}()},
\code{\link{opt_table_font}()},
\code{\link{opt_table_outline}()},
\code{\link{opt_vertical_padding}()}
}
\concept{table option functions}
| /man/opt_table_lines.Rd | permissive | rstudio/gt | R | false | true | 3,186 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/opts.R
\name{opt_table_lines}
\alias{opt_table_lines}
\title{Option to set table lines to different extents}
\usage{
opt_table_lines(data, extent = c("all", "none", "default"))
}
\arguments{
\item{data}{\emph{The gt table data object}
\verb{obj:<gt_tbl>} // \strong{required}
This is the \strong{gt} table object that is commonly created through use of the
\code{\link[=gt]{gt()}} function.}
\item{extent}{\emph{Extent of lines added}
\verb{singl-kw:[all|none|default]} // \emph{default:} \code{"all"}
The extent to which lines will be visible in the table. Options are
\code{"all"}, \code{"none"}, or \code{"default"}.}
}
\value{
An object of class \code{gt_tbl}.
}
\description{
The \code{opt_table_lines()} function sets table lines in one of three possible
ways: (1) all possible table lines drawn (\code{"all"}), (2) no table lines at all
(\code{"none"}), and (3) resetting to the default line styles (\code{"default"}). This
is great if you want to start off with lots of lines and subtract just a few
of them with \code{\link[=tab_options]{tab_options()}} or \code{\link[=tab_style]{tab_style()}}. Or, use it to start with a
completely lineless table, adding individual lines as needed.
}
\section{Examples}{
Use the \code{\link{exibble}} dataset to create a \strong{gt} table with a number of table
parts added (using functions like \code{\link[=summary_rows]{summary_rows()}}, \code{\link[=grand_summary_rows]{grand_summary_rows()}},
and more). Following that, we'll use the \code{opt_table_lines()} function to
generate lines everywhere there can possibly be lines (the default for the
\code{extent} argument is \code{"all"}).
\if{html}{\out{<div class="sourceCode r">}}\preformatted{exibble |>
gt(rowname_col = "row", groupname_col = "group") |>
summary_rows(
groups = "grp_a",
columns = c(num, currency),
fns = c("min", "max")
) |>
grand_summary_rows(
columns = currency,
fns = total ~ sum(., na.rm = TRUE)
) |>
tab_source_note(source_note = "This is a source note.") |>
tab_footnote(
footnote = "This is a footnote.",
locations = cells_body(columns = 1, rows = 1)
) |>
tab_header(
title = "The title of the table",
subtitle = "The table's subtitle"
) |>
opt_table_lines()
}\if{html}{\out{</div>}}
\if{html}{\out{
<img src="https://raw.githubusercontent.com/rstudio/gt/master/images/man_opt_table_lines_1.png" alt="This image of a table was generated from the first code example in the `opt_table_lines()` help file." style="width:100\%;">
}}
}
\section{Function ID}{
10-10
}
\section{Function Introduced}{
\code{v0.2.0.5} (March 31, 2020)
}
\seealso{
Other table option functions:
\code{\link{opt_align_table_header}()},
\code{\link{opt_all_caps}()},
\code{\link{opt_css}()},
\code{\link{opt_footnote_marks}()},
\code{\link{opt_footnote_spec}()},
\code{\link{opt_horizontal_padding}()},
\code{\link{opt_interactive}()},
\code{\link{opt_row_striping}()},
\code{\link{opt_stylize}()},
\code{\link{opt_table_font}()},
\code{\link{opt_table_outline}()},
\code{\link{opt_vertical_padding}()}
}
\concept{table option functions}
|
#' Format Percentages
#'
#' \code{f_percent} - A wrapper for \code{\link[numform]{f_num}} that formats
#' percent values as labeled percentages.
#'
#' @param x A vector of proportions.
#' @param digits The number of digits to use. Defaults to 1. Can be set
#' globally via: \code{options(numformdigits = n)} where n is the number of
#' digits beyond the decimal point to include.
#' @param less.than.replace logical. If \code{TRUE} values lower than lowest
#' place value, specified by \code{digits}, will be replaced with a less than
#' sign followed by the \code{double} representation of the place value
#' specified by \code{digits}. For example, if \code{digits = 0} then
#' replacement will be \code{"<1\%"} or if \code{digits = 2} then replacement will
#' be \code{"<.01\%"}.
#' @param \ldots Other values passed to \code{\link[numform]{f_num}}.
#' @return Returns a string of publication ready digits.
#' @export
#' @rdname f_percent
#' @seealso \code{\link[numform]{f_num}}
#' @examples
#' f_percent(c(30, 33.45, .1))
#' f_percent(c(30, 33.45, .1), 1)
#' f_percent(c(0.0, 0, .2, -00.02, 1.122222, pi))
#' f_prop2percent(c(.30, 1, 1.01, .33, .222, .01))
#' f_pp(c(.30, 1, 1.01, .33, .222, .01))
#'
#' f_percent(c(30, 33.45, .1), digits = 0, less.than.replace = TRUE)
#' f_prop2percent(c(.30, 1, 1.01, .33, .222, .01, .0001, NA), digits = 0,
#' less.than.replace = TRUE)
#'
#' \dontrun{
#' library(tidyverse)
#'
#' mtcars %>%
#' count(cyl, gear) %>%
#' group_by(cyl) %>%
#' mutate(prop = n/sum(n)) %>%
#' ggplot(aes(gear, prop)) +
#' geom_bar(stat = 'identity') +
#' facet_wrap(~cyl, ncol = 1) +
#' scale_y_continuous(labels = ff_prop2percent(digits = 0))
#' }
f_percent <- function(x, digits = getOption("numformdigits"), less.than.replace = FALSE, ...) {
out <- f_num(x, digits = digits, s="%", ...)
if (isTRUE(less.than.replace)){
if (is.null(digits)) digits <- 1
repl <- replace_less_than(digits, percent = TRUE)
out[x < repl[['prop_cut']][1] & x >= 0] <- repl[['replacement']][1]
out[x > repl[['prop_cut']][2] & x < 0] <- repl[['replacement']][2]
}
out
}
#' @export
#' @include utils.R
#' @rdname f_percent
ff_percent <- functionize(f_percent)
#' Format Percentages
#'
#' \code{f_prop2percent} - A wrapper for \code{\link[numform]{f_num}} that formats
#' proportions as labeled percentages.
#'
#' @rdname f_percent
#' @export
f_prop2percent <- function(x, digits = getOption("numformdigits"), less.than.replace = FALSE, ...) {
out <- f_num(100*x, digits = digits, s="%", ...)
if (isTRUE(less.than.replace)){
if (is.null(digits)) digits <- 1
repl <- replace_less_than(digits, percent = FALSE)
out[x < repl[['prop_cut']][1] & x >= 0] <- repl[['replacement']][1]
out[x > repl[['prop_cut']][2] & x < 0] <- repl[['replacement']][2]
}
out
}
#' @export
#' @include utils.R
#' @rdname f_percent
ff_prop2percent <- functionize(f_prop2percent)
#' Format Percentages
#'
#' \code{f_pp} - A wrapper for \code{\link[numform]{f_prop2percent}} that requires
#' less typing and has \code{digits} set to \code{0} by default.
#'
#' @export
#' @include utils.R
#' @rdname f_percent
f_pp <- hijack(f_prop2percent, digits = 0)
#' @export
#' @include utils.R
#' @rdname f_percent
ff_pp <- functionize(f_pp)
replace_less_than <- function(digits = 0, prefix = c("<", ">-"), percent = FALSE, ...){
if(percent) div <- 1 else div <- 1e2
cut <- 1/(10^digits)
list(prop_cut = c((cut)/div, -(cut)/div), replacement = f_percent(cut, digits = digits, prefix = prefix))
}
| /R/f_percent.R | no_license | jimhester/numform | R | false | false | 3,754 | r | #' Format Percentages
#'
#' \code{f_percent} - A wrapper for \code{\link[numform]{f_num}} that formats
#' percent values as labeled percentages.
#'
#' @param x A vector of proportions.
#' @param digits The number of digits to use. Defaults to 1. Can be set
#' globally via: \code{options(numformdigits = n)} where n is the number of
#' digits beyond the decimal point to include.
#' @param less.than.replace logical. If \code{TRUE} values lower than lowest
#' place value, specified by \code{digits}, will be replaced with a less than
#' sign followed by the \code{double} representation of the place value
#' specified by \code{digits}. For example, if \code{digits = 0} then
#' replacement will be \code{"<1\%"} or if \code{digits = 2} then replacement will
#' be \code{"<.01\%"}.
#' @param \ldots Other values passed to \code{\link[numform]{f_num}}.
#' @return Returns a string of publication ready digits.
#' @export
#' @rdname f_percent
#' @seealso \code{\link[numform]{f_num}}
#' @examples
#' f_percent(c(30, 33.45, .1))
#' f_percent(c(30, 33.45, .1), 1)
#' f_percent(c(0.0, 0, .2, -00.02, 1.122222, pi))
#' f_prop2percent(c(.30, 1, 1.01, .33, .222, .01))
#' f_pp(c(.30, 1, 1.01, .33, .222, .01))
#'
#' f_percent(c(30, 33.45, .1), digits = 0, less.than.replace = TRUE)
#' f_prop2percent(c(.30, 1, 1.01, .33, .222, .01, .0001, NA), digits = 0,
#' less.than.replace = TRUE)
#'
#' \dontrun{
#' library(tidyverse)
#'
#' mtcars %>%
#' count(cyl, gear) %>%
#' group_by(cyl) %>%
#' mutate(prop = n/sum(n)) %>%
#' ggplot(aes(gear, prop)) +
#' geom_bar(stat = 'identity') +
#' facet_wrap(~cyl, ncol = 1) +
#' scale_y_continuous(labels = ff_prop2percent(digits = 0))
#' }
f_percent <- function(x, digits = getOption("numformdigits"), less.than.replace = FALSE, ...) {
out <- f_num(x, digits = digits, s="%", ...)
if (isTRUE(less.than.replace)){
if (is.null(digits)) digits <- 1
repl <- replace_less_than(digits, percent = TRUE)
out[x < repl[['prop_cut']][1] & x >= 0] <- repl[['replacement']][1]
out[x > repl[['prop_cut']][2] & x < 0] <- repl[['replacement']][2]
}
out
}
#' @export
#' @include utils.R
#' @rdname f_percent
ff_percent <- functionize(f_percent)
#' Format Percentages
#'
#' \code{f_prop2percent} - A wrapper for \code{\link[numform]{f_num}} that formats
#' proportions as labeled percentages.
#'
#' @rdname f_percent
#' @export
f_prop2percent <- function(x, digits = getOption("numformdigits"), less.than.replace = FALSE, ...) {
out <- f_num(100*x, digits = digits, s="%", ...)
if (isTRUE(less.than.replace)){
if (is.null(digits)) digits <- 1
repl <- replace_less_than(digits, percent = FALSE)
out[x < repl[['prop_cut']][1] & x >= 0] <- repl[['replacement']][1]
out[x > repl[['prop_cut']][2] & x < 0] <- repl[['replacement']][2]
}
out
}
#' @export
#' @include utils.R
#' @rdname f_percent
ff_prop2percent <- functionize(f_prop2percent)
#' Format Percentages
#'
#' \code{f_pp} - A wrapper for \code{\link[numform]{f_prop2percent}} that requires
#' less typing and has \code{digits} set to \code{0} by default.
#'
#' @export
#' @include utils.R
#' @rdname f_percent
f_pp <- hijack(f_prop2percent, digits = 0)
#' @export
#' @include utils.R
#' @rdname f_percent
ff_pp <- functionize(f_pp)
replace_less_than <- function(digits = 0, prefix = c("<", ">-"), percent = FALSE, ...){
if(percent) div <- 1 else div <- 1e2
cut <- 1/(10^digits)
list(prop_cut = c((cut)/div, -(cut)/div), replacement = f_percent(cut, digits = digits, prefix = prefix))
}
|
context("test-gplates_reconstruct")
test_that("gplates_reconstruct_point works", {
expect_error(gplates_reconstruct_point(lon = 15,lat = 15, age = 65))
})
test_that("gplates_reconstruct_coastlines works", {
# only one age at a time:
expect_error(gplates_reconstruct_coastlines(c(100, 140)))
})
| /tests/testthat/test-gplates_reconstruct.R | no_license | LunaSare/gplatesr | R | false | false | 308 | r | context("test-gplates_reconstruct")
test_that("gplates_reconstruct_point works", {
expect_error(gplates_reconstruct_point(lon = 15,lat = 15, age = 65))
})
test_that("gplates_reconstruct_coastlines works", {
# only one age at a time:
expect_error(gplates_reconstruct_coastlines(c(100, 140)))
})
|
require(RODBC)
# Helped from: http://stackoverflow.com/questions/20310261/read-data-from-microsoft-sql-database-on-remote-desktop-via-r
# By: Sudip Kafle at http://stackoverflow.com/users/1159766/sudip-kafle
ufn_get_connection <- function(host, db, user=NULL, pass=NULL )
{
if(is.null(pass))
{
c <- odbcDriverConnect(connection=paste0("server=",host,
";database=",db,
";trusted_connection=true;Port=1433;driver={SQL Server};TDS_Version=7.0;"))
}
else
{
c <- odbcDriverConnect(connection=paste0("server=",host,
";database=",db,
";uid=",user,
";pwd=",pass,
";trusted_connection=true;Port=1433;driver={SQL Server};TDS_Version=7.0;"))
}
if(class(c) == 'RODBC')
{
writeLines("Successfilly opened connection to db")
return(c)
}
else
{
writeLines(paste0("Error opening connection: ", as.character(c)))
}
}
# Connection with integrated security...
myisconnection <- ufn_get_connection ("BRYANCAFFERKYPC\\BPC", "AdventureWorks")
# Connection using credentials...
# mysaconnection <- ufn_get_connection ("(local)", "AdventureWorks", "bryan", "bryan")
# Get column list...
sqlColumns(myisconnection, "Sales.CreditCard")
# Doa query...
myresults <- sqlQuery(myisconnection, "select * from Sales.CreditCard", errors = TRUE)
# Display results...
myresults
class(myresults)
summary(myresults)
## load a data frame into the database
data(USArrests)
sqlSave(myisconnection, USArrests, rownames = "state", addPK = TRUE)
# rm(USArrests)
## list the tables in the database
sqlTables(myisconnection)
| /DataScienceFromAtoZ/Script/Script/Part_2/R_Demo_8_dataframe_odbc.R | no_license | bcafferky/shared | R | false | false | 1,841 | r | require(RODBC)
# Helped from: http://stackoverflow.com/questions/20310261/read-data-from-microsoft-sql-database-on-remote-desktop-via-r
# By: Sudip Kafle at http://stackoverflow.com/users/1159766/sudip-kafle
ufn_get_connection <- function(host, db, user=NULL, pass=NULL )
{
if(is.null(pass))
{
c <- odbcDriverConnect(connection=paste0("server=",host,
";database=",db,
";trusted_connection=true;Port=1433;driver={SQL Server};TDS_Version=7.0;"))
}
else
{
c <- odbcDriverConnect(connection=paste0("server=",host,
";database=",db,
";uid=",user,
";pwd=",pass,
";trusted_connection=true;Port=1433;driver={SQL Server};TDS_Version=7.0;"))
}
if(class(c) == 'RODBC')
{
writeLines("Successfilly opened connection to db")
return(c)
}
else
{
writeLines(paste0("Error opening connection: ", as.character(c)))
}
}
# Connection with integrated security...
myisconnection <- ufn_get_connection ("BRYANCAFFERKYPC\\BPC", "AdventureWorks")
# Connection using credentials...
# mysaconnection <- ufn_get_connection ("(local)", "AdventureWorks", "bryan", "bryan")
# Get column list...
sqlColumns(myisconnection, "Sales.CreditCard")
# Doa query...
myresults <- sqlQuery(myisconnection, "select * from Sales.CreditCard", errors = TRUE)
# Display results...
myresults
class(myresults)
summary(myresults)
## load a data frame into the database
data(USArrests)
sqlSave(myisconnection, USArrests, rownames = "state", addPK = TRUE)
# rm(USArrests)
## list the tables in the database
sqlTables(myisconnection)
|
library(bayesmeta)
library(bayesplot)
library(ggplot2)
library(dplyr)
library("rstan")
options(mc.cores = parallel::detectCores())
rstan_options(auto_write = TRUE)
# Four studies with CTS N
publication <- c("Dafny (2016)", "Frank (1995)", "Helland (2016)", "Grabowski (2007)")
yi <- c(-0.094, -0.097, -0.053, -0.09)
sei <- c(0.008, 0.038, 0.009, 0.01)
N_mean <- c(3.62, 3.62, 3.31)
df <- data.frame(publication, yi, sei)
# One study with categorical N
publication2 <- rep("Tenn (2014)", 5)
y1i <- rep(-0.091, 5)
se1i <- rep(0.035, 5)
y2i <- rep(-0.087, 5)
se2i <- rep(0.056, 5)
y3i <- rep(-0.109, 5)
se3i <- rep(0.065, 5)
y4i <- rep(-0.221, 5)
se4i <- rep(0.077, 5)
y5i <- c(-0.295/6, -0.362/7, -0.406/8, -0.455/9, -0.581/10)
se5i <- c(0.082/6, 0.108/7, 0.126/8, 0.152/9, 0.246/10)
#publication2 <- c("Tenn (2014)")
#y1i <- c(-0.091)
#se1i <- c(0.035)
#y2i <- c(-0.087)
#se2i <- c(0.056)
#y3i <- c(-0.455)
#se3i <- c(0.152)
df2 <- data.frame(publication2, y1i, se1i, y2i, se2i, y3i, se3i, y4i, se4i, y5i, se5i)
set.seed(123)
J <- nrow(df)
M <- nrow(df2)
# Specify which studies have % of entrants missing (among 4 studies with CTS N)
J_obs <- 1
J_mis <- nrow(df) - J_obs
ii_obs <- c(4)
ii_mis <- c(1, 2, 3)
# Dirichlet prior for missing % (from MEPS)
alpha = c(0.140, 0.143, 0.177, 0.083, 0.081, 0.075, 0.108, 0.063, 0.048, 0.123, 0.130)
# % from one study where % of entrants is not missing
p_obs = c(4/40, 3/40, 4/40, 3/40, 4/40, 4/40, 1/40, 2/40, 3/40, 2/40, 10/40)
## Combine as data input
stan.dat_nobias_cat <- list(J_obs = J_obs,
J_mis = J_mis,
ii_obs = ii_obs,
ii_mis = ii_mis,
beta = df$yi,
sigma = df$sei,
N_mean = N_mean,
P = 11, # number of entrant groups
p_obs = p_obs,
#alpha = alpha,
#M = 1,
beta1 = df2$y1i,
sigma1 = df2$se1i,
beta2 = df2$y2i,
sigma2 = df2$se2i,
beta3 = df2$y3i,
sigma3 = df2$se3i,
beta4 = df2$y4i,
sigma4 = df2$se4i,
beta5 = df2$y5i,
sigma5 = df2$se5i)
fit <- stan(
file = "Bayesmeta_nobias_noncentered_cat_NB_5_groups.stan", # Stan program
data = stan.dat_nobias_cat, # named list of data
chains = 4, # number of Markov chains
warmup = 1000, # number of warmup iterations per chain
iter = 6000, # total number of iterations per chain
cores = 4, # number of cores (could use one per chain)
#refresh = 1000, # no progress shown
control = list(adapt_delta = 0.995,
max_treedepth = 18)
)
plot(fit, plotfun = "trace", pars = c("mu1", "mu2", "mu3", "mu4", "mu5"), inc_warmup = TRUE, nrow = 5)
plot(fit, plotfun = "trace", pars = c("weight_sim[1]", "weight_sim[2]", "weight_sim[3]"), inc_warmup = TRUE, nrow = 4)
print(fit)
fit_sim <- extract(fit)
#posterior predictive simulation
n_sims <- length(fit_sim$lp__)
beta1_rep <- array(NA, c(n_sims, 7))
beta2_rep <- array(NA, c(n_sims, 7))
beta3_rep <- array(NA, c(n_sims, 7))
for (s in 1:n_sims) {
beta1_rep[s,] <- rnorm(1, fit_sim$gamma1[s,], df2$se1i)
beta2_rep[s,] <- rnorm(1, fit_sim$gamma2[s,], df2$se2i)
beta3_rep[s,] <- rnorm(1, fit_sim$gamma3[s,], df2$se3i)
}
##Replicated data in new study
theta1_rep <- array(NA, c(n_sims, 7))
beta1_rep <- array(NA, c(n_sims, 7))
theta2_rep <- array(NA, c(n_sims, 7))
beta2_rep <- array(NA, c(n_sims, 7))
theta3_rep <- array(NA, c(n_sims, 7))
beta3_rep <- array(NA, c(n_sims, 7))
for (s in 1:n_sims){
theta1_rep[s,] <- rnorm(7, fit_sim$mu1[s], fit_sim$tau1[s])
beta1_rep[s,] <- rnorm(7, theta1_rep[s,], df2$se1i)
theta2_rep[s,] <- rnorm(7, fit_sim$mu1[s], fit_sim$tau2[s])
beta2_rep[s,] <- rnorm(7, theta2_rep[s,], df2$se2i)
theta3_rep[s,] <- rnorm(7, fit_sim$mu1[s], fit_sim$tau3[s])
beta3_rep[s,] <- rnorm(7, theta3_rep[s,], df2$se3i)
}
median(beta1_rep)
quantile(beta1_rep, probs = c(.025, .975))
median(beta2_rep)
quantile(beta2_rep, probs = c(.025, .975))
median(beta3_rep)
quantile(beta3_rep, probs = c(.025, .975))
beta1_rep <- array(NA, c(n_sims, 7))
beta2_rep <- array(NA, c(n_sims, 7))
beta3_rep <- array(NA, c(n_sims, 7))
for (s in 1:n_sims) {
beta1_rep[s,] <- rnorm(7, fit_sim$gamma1[s,], df2$se1i)
beta2_rep[s,] <- rnorm(7, fit_sim$gamma2[s,], df2$se2i)
beta3_rep[s,] <- rnorm(7, fit_sim$gamma3[s,], df2$se3i)
}
median(beta1_rep)
quantile(beta1_rep, probs = c(.025, .975))
median(beta2_rep)
quantile(beta2_rep, probs = c(.025, .975))
median(beta3_rep)
quantile(beta3_rep, probs = c(.025, .975))
| /Bayesmeta_categorical_clean_NB_5_groups.R | no_license | shuxchen/BayesMeta | R | false | false | 5,124 | r | library(bayesmeta)
library(bayesplot)
library(ggplot2)
library(dplyr)
library("rstan")
options(mc.cores = parallel::detectCores())
rstan_options(auto_write = TRUE)
# Four studies with CTS N
publication <- c("Dafny (2016)", "Frank (1995)", "Helland (2016)", "Grabowski (2007)")
yi <- c(-0.094, -0.097, -0.053, -0.09)
sei <- c(0.008, 0.038, 0.009, 0.01)
N_mean <- c(3.62, 3.62, 3.31)
df <- data.frame(publication, yi, sei)
# One study with categorical N
publication2 <- rep("Tenn (2014)", 5)
y1i <- rep(-0.091, 5)
se1i <- rep(0.035, 5)
y2i <- rep(-0.087, 5)
se2i <- rep(0.056, 5)
y3i <- rep(-0.109, 5)
se3i <- rep(0.065, 5)
y4i <- rep(-0.221, 5)
se4i <- rep(0.077, 5)
y5i <- c(-0.295/6, -0.362/7, -0.406/8, -0.455/9, -0.581/10)
se5i <- c(0.082/6, 0.108/7, 0.126/8, 0.152/9, 0.246/10)
#publication2 <- c("Tenn (2014)")
#y1i <- c(-0.091)
#se1i <- c(0.035)
#y2i <- c(-0.087)
#se2i <- c(0.056)
#y3i <- c(-0.455)
#se3i <- c(0.152)
df2 <- data.frame(publication2, y1i, se1i, y2i, se2i, y3i, se3i, y4i, se4i, y5i, se5i)
set.seed(123)
J <- nrow(df)
M <- nrow(df2)
# Specify which studies have % of entrants missing (among 4 studies with CTS N)
J_obs <- 1
J_mis <- nrow(df) - J_obs
ii_obs <- c(4)
ii_mis <- c(1, 2, 3)
# Dirichlet prior for missing % (from MEPS)
alpha = c(0.140, 0.143, 0.177, 0.083, 0.081, 0.075, 0.108, 0.063, 0.048, 0.123, 0.130)
# % from one study where % of entrants is not missing
p_obs = c(4/40, 3/40, 4/40, 3/40, 4/40, 4/40, 1/40, 2/40, 3/40, 2/40, 10/40)
## Combine as data input
stan.dat_nobias_cat <- list(J_obs = J_obs,
J_mis = J_mis,
ii_obs = ii_obs,
ii_mis = ii_mis,
beta = df$yi,
sigma = df$sei,
N_mean = N_mean,
P = 11, # number of entrant groups
p_obs = p_obs,
#alpha = alpha,
#M = 1,
beta1 = df2$y1i,
sigma1 = df2$se1i,
beta2 = df2$y2i,
sigma2 = df2$se2i,
beta3 = df2$y3i,
sigma3 = df2$se3i,
beta4 = df2$y4i,
sigma4 = df2$se4i,
beta5 = df2$y5i,
sigma5 = df2$se5i)
fit <- stan(
file = "Bayesmeta_nobias_noncentered_cat_NB_5_groups.stan", # Stan program
data = stan.dat_nobias_cat, # named list of data
chains = 4, # number of Markov chains
warmup = 1000, # number of warmup iterations per chain
iter = 6000, # total number of iterations per chain
cores = 4, # number of cores (could use one per chain)
#refresh = 1000, # no progress shown
control = list(adapt_delta = 0.995,
max_treedepth = 18)
)
plot(fit, plotfun = "trace", pars = c("mu1", "mu2", "mu3", "mu4", "mu5"), inc_warmup = TRUE, nrow = 5)
plot(fit, plotfun = "trace", pars = c("weight_sim[1]", "weight_sim[2]", "weight_sim[3]"), inc_warmup = TRUE, nrow = 4)
print(fit)
fit_sim <- extract(fit)
#posterior predictive simulation
n_sims <- length(fit_sim$lp__)
beta1_rep <- array(NA, c(n_sims, 7))
beta2_rep <- array(NA, c(n_sims, 7))
beta3_rep <- array(NA, c(n_sims, 7))
for (s in 1:n_sims) {
beta1_rep[s,] <- rnorm(1, fit_sim$gamma1[s,], df2$se1i)
beta2_rep[s,] <- rnorm(1, fit_sim$gamma2[s,], df2$se2i)
beta3_rep[s,] <- rnorm(1, fit_sim$gamma3[s,], df2$se3i)
}
##Replicated data in new study
theta1_rep <- array(NA, c(n_sims, 7))
beta1_rep <- array(NA, c(n_sims, 7))
theta2_rep <- array(NA, c(n_sims, 7))
beta2_rep <- array(NA, c(n_sims, 7))
theta3_rep <- array(NA, c(n_sims, 7))
beta3_rep <- array(NA, c(n_sims, 7))
for (s in 1:n_sims){
theta1_rep[s,] <- rnorm(7, fit_sim$mu1[s], fit_sim$tau1[s])
beta1_rep[s,] <- rnorm(7, theta1_rep[s,], df2$se1i)
theta2_rep[s,] <- rnorm(7, fit_sim$mu1[s], fit_sim$tau2[s])
beta2_rep[s,] <- rnorm(7, theta2_rep[s,], df2$se2i)
theta3_rep[s,] <- rnorm(7, fit_sim$mu1[s], fit_sim$tau3[s])
beta3_rep[s,] <- rnorm(7, theta3_rep[s,], df2$se3i)
}
median(beta1_rep)
quantile(beta1_rep, probs = c(.025, .975))
median(beta2_rep)
quantile(beta2_rep, probs = c(.025, .975))
median(beta3_rep)
quantile(beta3_rep, probs = c(.025, .975))
beta1_rep <- array(NA, c(n_sims, 7))
beta2_rep <- array(NA, c(n_sims, 7))
beta3_rep <- array(NA, c(n_sims, 7))
for (s in 1:n_sims) {
beta1_rep[s,] <- rnorm(7, fit_sim$gamma1[s,], df2$se1i)
beta2_rep[s,] <- rnorm(7, fit_sim$gamma2[s,], df2$se2i)
beta3_rep[s,] <- rnorm(7, fit_sim$gamma3[s,], df2$se3i)
}
median(beta1_rep)
quantile(beta1_rep, probs = c(.025, .975))
median(beta2_rep)
quantile(beta2_rep, probs = c(.025, .975))
median(beta3_rep)
quantile(beta3_rep, probs = c(.025, .975))
|
\name{compContourM1/2u}
\alias{compContourM1u}
\alias{compContourM2u}
\alias{compContourM1/2u}
\title{Directional Regression Quantile Computation}
\description{
The functions \code{compContourM1u} and \code{compContourM2u}
may be used to obtain not only directional regression quantiles
for \emph{all} directions, but also some related overall
statistics. Their output may also be used for the evaluation
of the corresponding regression quantile regions by means of
\code{\link{evalContour}}. The functions use different
methods and algorithms, namely \code{compContourM1u} is based
on [01] and [06] and \code{compContourM2u} results from [03]
and [07]. The corresponding regression quantile regions are
nevertheless virtually the same. See all the references below
for further details and possible applications.
}
\usage{
compContourM1u(Tau = 0.2, YMat = NULL, XMat = NULL, CTechST = NULL)
compContourM2u(Tau = 0.2, YMat = NULL, XMat = NULL, CTechST = NULL)
}
\arguments{
\item{Tau}{the quantile level in (0, 0.5).}
\item{YMat}{the N x M response matrix with two to six
columns, \code{N > M+P-1}.
Each row corresponds to one observation.}
\item{XMat}{the N x P design matrix including the (first)
intercept column. The default NULL value
corresponds to the unit vector of the right
length.
Each row corresponds to one observation.}
\item{CTechST}{the (optional) list with some parameters
influencing the computation and its output.
Its default value can be generated by
method-dependent \code{\link{getCTechSTM1/2u}}
and then modified by the user before its use
in \code{compContourM1/2u}.}
}
\details{
Generally, the performance of the functions deteriorates with
increasing Tau, N, M, and P as for their reliability and
time requirements. Nevertheless, they should work fine
at least for two-dimensional problems
up to N = 10000 and P = 10,
for three-dimensional problems
up to N = 500 and P = 5,
and for four-dimensional problems
up to N = 150 and P = 3.
Furthemore, common problems related to the computation
can fortunately be prevented or overcome easily.
\bold{Bad data} - the computation may fail if the processed
data points are in a bad configuration (i.e., if they are not
in general position or if they would lead to a quantile
hyperplane with at least one zero coefficient), which mostly
happens when discrete-valued/rounded/repeated observations,
dummy variables or bad random number generators are employed.
Such problems can often be prevented if one perturbs the data
with a random noise of a reasonably small magnitude before the
computation, splits the model into separate or independent
submodels, cleverly uses affine equivariance, or replaces
a few identical observations with a copy of them weighted
by the total number of their occurrences.
\bold{Bad Tau} - the computation may fail for a finite number of
problematic quantile levels, e.g., if Tau is an integer multiple
of 1/N in the location case with unit weights (when the sample
quantiles are not uniquely defined). Such a situation may occur
easily for Tau's with only a few decimal digits or in a
fractional form, especially when the number of observations
changes automatically during the computation. The problem
can be fixed easily by perturbing Tau with a sufficiently small
number in the right direction, which should not affect the
resulting regression quantile contours although it may slightly
change the other output. The strategy is also adopted
by \code{compContourM1/2u}, but only in the location case and
with a warning output message explaining it.
\bold{Bad scale} - the computation may fail easily for badly
scaled data. That is to say that the functionality has been
heavily tested only for the observations coming from
a centered unit hypercube. Nevertheless, you can always
change the units of measurements or employ full affine
equivariance to avoid all the troubles. Similar problems
may also arise when properly scaled data are used with highly
non-uniform weights, which frequently happens in local(ly)
polynomial regression. Then the weights can be rescaled
in a suitable way and the observations with virtually zero
weights can be excluded from the computation.
\bold{Bad expectations} - the computation and its output need
not meet false expectations. Every user should be aware of
the facts that the computation may take a long time or fail
even for moderately sized three-dimensional data sets, that the
\code{HypMat} component is not always present in the list
\code{COutST$CharST} by default, and that the sample regression
quantile contours can be not only empty, but also unbounded and
crossing one another in the general regression case.
\bold{Bad interpretation} - the output results may be easily
interpreted misleadingly or erroneously. That is to say that
the quantile level Tau is not linked to the probability
content of the sample (regression) Tau-quantile region in
any straightforward way. Furthermore, any meaningful
parametric quantile regression model should include
as regressors not only the variables influencing the
trend, but also all those affecting the dispersion
of the multivariate responses. Even then the cuts
of the resulting regression quantile contours parallel
to the response space cannot be safely interpreted
as conditional multivariate quantiles except for
some very special cases. Nevertheless, such a
conclusion could somehow be warranted in case of
nonparametric multiple-output quantile regression; see [09].
}
\value{
Both compContourM1u and compContourM2u may display some
auxiliary information regarding the computation on the screen
(if \code{CTechST$ReportI} = 1) or store their in-depth
output (determined by \code{CTechST$BriefOutputI}) in the output
files (if \code{CTechST$OutSaveI} = 1) with
the filenames beginning with the string contained in
\code{CTechST$OutFilePrefS}, followed by the file number
padded with zeros to form six digits
and by the extension \file{.dqo}, respectively. The first
output file produced by \code{compContourM1u} would
thus be named \file{DQOutputM1_000001.dqo}.
Both compContourM1u and compContourM2u always return
a list with the same components. Their interpretation is
also the same (except for CharST that itself contains some
components that are method-specific):
\item{CharST}{the list with some default or
user-defined output.
The default one is provided
by function \code{\link{getCharSTM1u}}
for \code{compContourM1u} and by
function \code{\link{getCharSTM2u}}
for \code{compContourM2u}.
A user-defined function generating
its own output can be employed instead
by changing \code{CTechST$getCharST}.}
\item{CTechSTMsgS}{the (possibly empty) string that informs
about the problems with input \code{CTechST}.}
\item{ProbSizeMsgS}{the (possibly empty) string that warns
if the input problem is very large.}
\item{TauMsgS}{the (possibly empty) string that announces
an internal perturbation of \code{Tau}.}
\item{CompErrMsgS}{the (possibly empty) string that decribes
the error interrupting the computation.}
\item{NDQFiles}{the counter of (possible) output files,
i.e., as if \code{CTechST$OutSaveI} = 1.}
\item{NumB}{the counter of (not necessarily distinct) optimal
bases considered.}
\item{PosVec}{the vector of length N that desribes
the position of individual (regression)
observations with respect to the
exact (regression) Tau-quantile
contour.
The identification is reliable only after a
successful computation.
\code{PosVec[i]} = 0/1/2 if the \code{i}-th
observation is in/on/out of the contour.
If \code{compContourM2u} is used with
\code{CTechST$SkipRedI} = 1, then \code{PosVec}
correctly detects only all the outer observations.}
\item{MaxLWidth}{the maximum width of one layer of the
internal algorithm.}
\item{NIniNone}{the number of trials when the initial
solution could not be found at all.}
\item{NIniBad}{the number of trials when the found
initial solution did not have
the right number of clearly nonzero
coordinates.}
\item{NSkipCone}{the number of skipped cones (where
an interior point could not be found).}
If \code{CTechST.CubRegWiseI} = 1, then the last four
components are calculated over all the individual
orthants.
}
\references{
[01] Hallin, M., Paindaveine, D. and Šiman, M. (2010)
Multivariate quantiles and multiple-output regression quantiles:
from L1 optimization to halfspace depth.
\emph{Annals of Statistics} \bold{38}, 635--669.
[02] Hallin, M., Paindaveine, D. and Šiman, M. (2010)
Rejoinder (to [01]).
\emph{Annals of Statistics} \bold{38}, 694--703.
[03] Paindaveine, D. and Šiman, M. (2011)
On directional multiple-output quantile regression.
\emph{Journal of Multivariate Analysis} \bold{102}, 193--212.
[04] Šiman, M. (2011)
On exact computation of some statistics based on projection
pursuit in a general regression context.
\emph{Communications in Statistics - Simulation and Computation} \bold{40}, 948--956.
[05] McKeague, I. W., López-Pintado, S., Hallin, M. and Šiman, M. (2011)
Analyzing growth trajectories.
\emph{Journal of Developmental Origins of Health and Disease} \bold{2}, 322--329.
[06] Paindaveine, D. and Šiman, M. (2012)
Computing multiple-output regression quantile regions.
\emph{Computational Statistics & Data Analysis} \bold{56}, 840--853.
[07] Paindaveine, D. and Šiman, M. (2012)
Computing multiple-output regression quantile regions
from projection quantiles.
\emph{Computational Statistics} \bold{27}, 29--49.
[08] Šiman, M. (2014)
Precision index in the multivariate context.
\emph{Communications in Statistics - Theory and Methods} \bold{43}, 377--387.
[09] Hallin, M., Lu, Z., Paindaveine, D. and Šiman, M. (2015)
Local bilinear multiple-output quantile/depth regression.
\emph{Bernoulli} \bold{21}, 1435--1466.
}
\examples{
##computing all directional 0.15-quantiles of 199 random points
##uniformly distributed in the unit square centered at zero
##- preparing the input
Tau <- 0.15
XMat <- matrix(1, 199, 1)
YMat <- matrix(runif(2*199, -0.5, 0.5), 199, 2)
##- Method 1:
COutST <- compContourM1u(Tau, YMat, XMat)
##- Method 2:
COutST <- compContourM2u(Tau, YMat, XMat)
}
| /man/compContour.Rd | no_license | cran/modQR | R | false | false | 11,033 | rd | \name{compContourM1/2u}
\alias{compContourM1u}
\alias{compContourM2u}
\alias{compContourM1/2u}
\title{Directional Regression Quantile Computation}
\description{
The functions \code{compContourM1u} and \code{compContourM2u}
may be used to obtain not only directional regression quantiles
for \emph{all} directions, but also some related overall
statistics. Their output may also be used for the evaluation
of the corresponding regression quantile regions by means of
\code{\link{evalContour}}. The functions use different
methods and algorithms, namely \code{compContourM1u} is based
on [01] and [06] and \code{compContourM2u} results from [03]
and [07]. The corresponding regression quantile regions are
nevertheless virtually the same. See all the references below
for further details and possible applications.
}
\usage{
compContourM1u(Tau = 0.2, YMat = NULL, XMat = NULL, CTechST = NULL)
compContourM2u(Tau = 0.2, YMat = NULL, XMat = NULL, CTechST = NULL)
}
\arguments{
\item{Tau}{the quantile level in (0, 0.5).}
\item{YMat}{the N x M response matrix with two to six
columns, \code{N > M+P-1}.
Each row corresponds to one observation.}
\item{XMat}{the N x P design matrix including the (first)
intercept column. The default NULL value
corresponds to the unit vector of the right
length.
Each row corresponds to one observation.}
\item{CTechST}{the (optional) list with some parameters
influencing the computation and its output.
Its default value can be generated by
method-dependent \code{\link{getCTechSTM1/2u}}
and then modified by the user before its use
in \code{compContourM1/2u}.}
}
\details{
Generally, the performance of the functions deteriorates with
increasing Tau, N, M, and P as for their reliability and
time requirements. Nevertheless, they should work fine
at least for two-dimensional problems
up to N = 10000 and P = 10,
for three-dimensional problems
up to N = 500 and P = 5,
and for four-dimensional problems
up to N = 150 and P = 3.
Furthemore, common problems related to the computation
can fortunately be prevented or overcome easily.
\bold{Bad data} - the computation may fail if the processed
data points are in a bad configuration (i.e., if they are not
in general position or if they would lead to a quantile
hyperplane with at least one zero coefficient), which mostly
happens when discrete-valued/rounded/repeated observations,
dummy variables or bad random number generators are employed.
Such problems can often be prevented if one perturbs the data
with a random noise of a reasonably small magnitude before the
computation, splits the model into separate or independent
submodels, cleverly uses affine equivariance, or replaces
a few identical observations with a copy of them weighted
by the total number of their occurrences.
\bold{Bad Tau} - the computation may fail for a finite number of
problematic quantile levels, e.g., if Tau is an integer multiple
of 1/N in the location case with unit weights (when the sample
quantiles are not uniquely defined). Such a situation may occur
easily for Tau's with only a few decimal digits or in a
fractional form, especially when the number of observations
changes automatically during the computation. The problem
can be fixed easily by perturbing Tau with a sufficiently small
number in the right direction, which should not affect the
resulting regression quantile contours although it may slightly
change the other output. The strategy is also adopted
by \code{compContourM1/2u}, but only in the location case and
with a warning output message explaining it.
\bold{Bad scale} - the computation may fail easily for badly
scaled data. That is to say that the functionality has been
heavily tested only for the observations coming from
a centered unit hypercube. Nevertheless, you can always
change the units of measurements or employ full affine
equivariance to avoid all the troubles. Similar problems
may also arise when properly scaled data are used with highly
non-uniform weights, which frequently happens in local(ly)
polynomial regression. Then the weights can be rescaled
in a suitable way and the observations with virtually zero
weights can be excluded from the computation.
\bold{Bad expectations} - the computation and its output need
not meet false expectations. Every user should be aware of
the facts that the computation may take a long time or fail
even for moderately sized three-dimensional data sets, that the
\code{HypMat} component is not always present in the list
\code{COutST$CharST} by default, and that the sample regression
quantile contours can be not only empty, but also unbounded and
crossing one another in the general regression case.
\bold{Bad interpretation} - the output results may be easily
interpreted misleadingly or erroneously. That is to say that
the quantile level Tau is not linked to the probability
content of the sample (regression) Tau-quantile region in
any straightforward way. Furthermore, any meaningful
parametric quantile regression model should include
as regressors not only the variables influencing the
trend, but also all those affecting the dispersion
of the multivariate responses. Even then the cuts
of the resulting regression quantile contours parallel
to the response space cannot be safely interpreted
as conditional multivariate quantiles except for
some very special cases. Nevertheless, such a
conclusion could somehow be warranted in case of
nonparametric multiple-output quantile regression; see [09].
}
\value{
Both compContourM1u and compContourM2u may display some
auxiliary information regarding the computation on the screen
(if \code{CTechST$ReportI} = 1) or store their in-depth
output (determined by \code{CTechST$BriefOutputI}) in the output
files (if \code{CTechST$OutSaveI} = 1) with
the filenames beginning with the string contained in
\code{CTechST$OutFilePrefS}, followed by the file number
padded with zeros to form six digits
and by the extension \file{.dqo}, respectively. The first
output file produced by \code{compContourM1u} would
thus be named \file{DQOutputM1_000001.dqo}.
Both compContourM1u and compContourM2u always return
a list with the same components. Their interpretation is
also the same (except for CharST that itself contains some
components that are method-specific):
\item{CharST}{the list with some default or
user-defined output.
The default one is provided
by function \code{\link{getCharSTM1u}}
for \code{compContourM1u} and by
function \code{\link{getCharSTM2u}}
for \code{compContourM2u}.
A user-defined function generating
its own output can be employed instead
by changing \code{CTechST$getCharST}.}
\item{CTechSTMsgS}{the (possibly empty) string that informs
about the problems with input \code{CTechST}.}
\item{ProbSizeMsgS}{the (possibly empty) string that warns
if the input problem is very large.}
\item{TauMsgS}{the (possibly empty) string that announces
an internal perturbation of \code{Tau}.}
\item{CompErrMsgS}{the (possibly empty) string that decribes
the error interrupting the computation.}
\item{NDQFiles}{the counter of (possible) output files,
i.e., as if \code{CTechST$OutSaveI} = 1.}
\item{NumB}{the counter of (not necessarily distinct) optimal
bases considered.}
\item{PosVec}{the vector of length N that desribes
the position of individual (regression)
observations with respect to the
exact (regression) Tau-quantile
contour.
The identification is reliable only after a
successful computation.
\code{PosVec[i]} = 0/1/2 if the \code{i}-th
observation is in/on/out of the contour.
If \code{compContourM2u} is used with
\code{CTechST$SkipRedI} = 1, then \code{PosVec}
correctly detects only all the outer observations.}
\item{MaxLWidth}{the maximum width of one layer of the
internal algorithm.}
\item{NIniNone}{the number of trials when the initial
solution could not be found at all.}
\item{NIniBad}{the number of trials when the found
initial solution did not have
the right number of clearly nonzero
coordinates.}
\item{NSkipCone}{the number of skipped cones (where
an interior point could not be found).}
If \code{CTechST.CubRegWiseI} = 1, then the last four
components are calculated over all the individual
orthants.
}
\references{
[01] Hallin, M., Paindaveine, D. and Šiman, M. (2010)
Multivariate quantiles and multiple-output regression quantiles:
from L1 optimization to halfspace depth.
\emph{Annals of Statistics} \bold{38}, 635--669.
[02] Hallin, M., Paindaveine, D. and Šiman, M. (2010)
Rejoinder (to [01]).
\emph{Annals of Statistics} \bold{38}, 694--703.
[03] Paindaveine, D. and Šiman, M. (2011)
On directional multiple-output quantile regression.
\emph{Journal of Multivariate Analysis} \bold{102}, 193--212.
[04] Šiman, M. (2011)
On exact computation of some statistics based on projection
pursuit in a general regression context.
\emph{Communications in Statistics - Simulation and Computation} \bold{40}, 948--956.
[05] McKeague, I. W., López-Pintado, S., Hallin, M. and Šiman, M. (2011)
Analyzing growth trajectories.
\emph{Journal of Developmental Origins of Health and Disease} \bold{2}, 322--329.
[06] Paindaveine, D. and Šiman, M. (2012)
Computing multiple-output regression quantile regions.
\emph{Computational Statistics & Data Analysis} \bold{56}, 840--853.
[07] Paindaveine, D. and Šiman, M. (2012)
Computing multiple-output regression quantile regions
from projection quantiles.
\emph{Computational Statistics} \bold{27}, 29--49.
[08] Šiman, M. (2014)
Precision index in the multivariate context.
\emph{Communications in Statistics - Theory and Methods} \bold{43}, 377--387.
[09] Hallin, M., Lu, Z., Paindaveine, D. and Šiman, M. (2015)
Local bilinear multiple-output quantile/depth regression.
\emph{Bernoulli} \bold{21}, 1435--1466.
}
\examples{
##computing all directional 0.15-quantiles of 199 random points
##uniformly distributed in the unit square centered at zero
##- preparing the input
Tau <- 0.15
XMat <- matrix(1, 199, 1)
YMat <- matrix(runif(2*199, -0.5, 0.5), 199, 2)
##- Method 1:
COutST <- compContourM1u(Tau, YMat, XMat)
##- Method 2:
COutST <- compContourM2u(Tau, YMat, XMat)
}
|
###############################################
######### import data from i2b2 ###############
###############################################
##### get connection #####
conn <- i2b2bordeaux::getI2B2con()
## dashboard
qGetDashboard <- "SELECT *
FROM IAM.COVID_INDIC
WHERE SAVE_DATE = (
SELECT max(save_date)
FROM IAM.COVID_INDIC
)
AND DOMAIN != 'PREL'
AND DOMAIN != 'POS_PREL_BY_AGE'
AND DOMAIN != 'TDM'"
RqCOVID_INDIC <- i2b2bordeaux::oracleDBquery(conn = conn,
statement = qGetDashboard)
# get the max date
qFeatures <- "SELECT *
FROM IAM.COVID_FEATURES_SAVE_BIS
WHERE SAVE_DATE = (
SELECT max(save_date)
FROM IAM.COVID_FEATURES_SAVE_BIS
)"
RqCOVID_FEATURES_SAVE_BIS <- i2b2bordeaux::oracleDBquery(conn = conn,
statement = qFeatures)
###############################################
######### import data from open ###############
###############################################
### Import majority variant
dfVariants <- PredictCovid::ImportMajorityVariant() %>%
filter(dep == "33")
### Import vaccination data
dfVaccination <- PredictCovid::ImportCleanVaccination() %>%
dplyr::select(dep, jour, n_cum_dose1_tous_ages) %>%
dplyr::rename("DATE" = "jour",
"Vaccin_1dose" = "n_cum_dose1_tous_ages") %>%
dplyr::filter(dep == "33")
### Import 2022 weather data (2020-2021 already stored)
weatherDepToImport <- c("33",
dfLimitrophe %>%
dplyr::filter(departement == "33") %>%
dplyr::pull(adjacent))
weather_data_byDep2022 <- PredictCovid::weather_data_from_NOAA(Regions_or_Dept_stations_pop = Dept_stations_pop[Dept_stations_pop$code_insee %in% weatherDepToImport,],
years = 2022,
n.cores = 1) %>%
dplyr::select(date_day, code_insee, t.mean,
precip, RH.mean, AH.mean,
IPTCC.mean, ws.mean, dewpoint.mean) %>%
dplyr::rename("DATE" = "date_day",
"dep" = "code_insee")
## merge
lsWeather <- dplyr::bind_rows(weather_data_byDep2022,
weather_data_byDepHistorical) %>%
PredictCovid::ImputeMissingWeather()
weather_data_byDep <- lsWeather$dfImputed %>%
dplyr::filter(dep == "33")
###############################################
######### Merge all ###########################
###############################################
### EDS data
dfFullDashboard <- PredictCovid::CovidDataFromI2b2(RqCOVID_INDIC = RqCOVID_INDIC,
RqCOVID_FEATURES_SAVE_BIS = RqCOVID_FEATURES_SAVE_BIS)
dfEDS <- PredictCovid::MergeDashboardVaccinVariantsWeather(dfFullDashboard = dfFullDashboard,
dfVariants_dep33 = dfVariants,
dfVaccination_dep33 = dfVaccination,
weather_data_dep33 = weather_data_byDep) %>%
PredictCovid::CleanEDS(dfEDS = .)
| /reporting/stream_model/01_importData.R | no_license | thomasferte/PredictCovidOpen | R | false | false | 3,193 | r | ###############################################
######### import data from i2b2 ###############
###############################################
##### get connection #####
conn <- i2b2bordeaux::getI2B2con()
## dashboard
qGetDashboard <- "SELECT *
FROM IAM.COVID_INDIC
WHERE SAVE_DATE = (
SELECT max(save_date)
FROM IAM.COVID_INDIC
)
AND DOMAIN != 'PREL'
AND DOMAIN != 'POS_PREL_BY_AGE'
AND DOMAIN != 'TDM'"
RqCOVID_INDIC <- i2b2bordeaux::oracleDBquery(conn = conn,
statement = qGetDashboard)
# get the max date
qFeatures <- "SELECT *
FROM IAM.COVID_FEATURES_SAVE_BIS
WHERE SAVE_DATE = (
SELECT max(save_date)
FROM IAM.COVID_FEATURES_SAVE_BIS
)"
RqCOVID_FEATURES_SAVE_BIS <- i2b2bordeaux::oracleDBquery(conn = conn,
statement = qFeatures)
###############################################
######### import data from open ###############
###############################################
### Import majority variant
dfVariants <- PredictCovid::ImportMajorityVariant() %>%
filter(dep == "33")
### Import vaccination data
dfVaccination <- PredictCovid::ImportCleanVaccination() %>%
dplyr::select(dep, jour, n_cum_dose1_tous_ages) %>%
dplyr::rename("DATE" = "jour",
"Vaccin_1dose" = "n_cum_dose1_tous_ages") %>%
dplyr::filter(dep == "33")
### Import 2022 weather data (2020-2021 already stored)
weatherDepToImport <- c("33",
dfLimitrophe %>%
dplyr::filter(departement == "33") %>%
dplyr::pull(adjacent))
weather_data_byDep2022 <- PredictCovid::weather_data_from_NOAA(Regions_or_Dept_stations_pop = Dept_stations_pop[Dept_stations_pop$code_insee %in% weatherDepToImport,],
years = 2022,
n.cores = 1) %>%
dplyr::select(date_day, code_insee, t.mean,
precip, RH.mean, AH.mean,
IPTCC.mean, ws.mean, dewpoint.mean) %>%
dplyr::rename("DATE" = "date_day",
"dep" = "code_insee")
## merge
lsWeather <- dplyr::bind_rows(weather_data_byDep2022,
weather_data_byDepHistorical) %>%
PredictCovid::ImputeMissingWeather()
weather_data_byDep <- lsWeather$dfImputed %>%
dplyr::filter(dep == "33")
###############################################
######### Merge all ###########################
###############################################
### EDS data
dfFullDashboard <- PredictCovid::CovidDataFromI2b2(RqCOVID_INDIC = RqCOVID_INDIC,
RqCOVID_FEATURES_SAVE_BIS = RqCOVID_FEATURES_SAVE_BIS)
dfEDS <- PredictCovid::MergeDashboardVaccinVariantsWeather(dfFullDashboard = dfFullDashboard,
dfVariants_dep33 = dfVariants,
dfVaccination_dep33 = dfVaccination,
weather_data_dep33 = weather_data_byDep) %>%
PredictCovid::CleanEDS(dfEDS = .)
|
pollutantmean <-function(in_Dir,in_pollutant,in_id)
{
list <- list.files(path=in_Dir,pattern = ".csv")
v <- numeric()
for (i in in_id){
data <-read.csv(list[i])
v <- c(v,data[[in_pollutant]])
}
mean(v,na.rm=T)
}
| /Week2/week2_project_1/pollutantmean.R | no_license | vchangarangath/Hello-R-World | R | false | false | 244 | r |
pollutantmean <-function(in_Dir,in_pollutant,in_id)
{
list <- list.files(path=in_Dir,pattern = ".csv")
v <- numeric()
for (i in in_id){
data <-read.csv(list[i])
v <- c(v,data[[in_pollutant]])
}
mean(v,na.rm=T)
}
|
setwd("/volumes/5550/turk2_results")
# the most common choices by human for each lineup = human_choices
# the actual location of the real plot in each lineup = newdata$plot_location
library(tidyverse)
data <- read.csv("/volumes/5550/turk2_results/data_turk2.csv")
file <- read.csv("/volumes/5550/turk2_results/file.csv")
ntrue <- data %>% group_by(pic_name) %>% summarise(ntrue = sum(response))
count <- table(data$pic_name) %>% data.frame()
#png_name <- as.character(file$png_nolc)
#human_choices <- vector()
#for (i in 1:70) {
# datai <- subset(data, pic_name == png_name[i])
# human_choices[i] <- names(which.max(table(datai$response_no)))
#}
ntrue <- ntrue %>%
left_join(select(data, difficulty, pic_name,
sample_size, beta, plot_location, sigma, replica)) %>%
distinct() %>%
left_join(count, by = c("pic_name"="Var1"))
newdata <- as.data.frame(ntrue)
p_cal <- function(k, x){
prob <- 1-pbinom(x-1, k, 0.05)
return(prob)
}
p_value <- vector()
for (i in 1:70) {
ki <- newdata$Freq[i]
xi <- newdata$ntrue[i]
p_value[i] <- p_cal(k=ki, x=xi)
}
newdata$p_value <- p_value
pic_name <- newdata$pic_name
setwd("/volumes/5550/turk2_results")
write.csv(tibble(pic_name, p_value), "human_pvalue.csv")
newdata$conclusion <- (newdata$p_value < 0.05)
#newdata$one_choice_con <- (newdata$plot_location == newdata$human_choices)
newdata$conclusion[38] <- TRUE
newdata$conclusion[39] <- TRUE
newdata$conclusion[40] <- TRUE
acc_human <- sum(newdata$conclusion)/70
acc_human_one_choice <- sum(newdata$one_choice_con)/70
newdata$human_choices <- human_choices
difficulty_true <- newdata %>% group_by(difficulty) %>% summarise(pb_true = mean(conclusion))
############################ smallest p picked by cor.test ################
setwd("/Volumes/5550/turk2_txt_files")
turk_lu <- list()
ctest_choices <- vector()
ct_real_pv <- vector()
for (i in 1:70) {
turk_lu[[i]] <- as.character(file$txt[i]) %>% read.delim(sep = " ")
ct_pvalue <- vector()
for (index in 1:20){
X <- turk_lu[[i]][,1]
Y <- turk_lu[[i]][,index+1]
ct_pvalue[index] <- cor.test(X,Y)$p.value
}
ct_real_pv[i] <- ct_pvalue[file$location[i]]
ctest_choices[i] <- ct_pvalue %>% which.min()
}
newdata <- newdata %>% mutate(ctest_choices)
newdata$same_choice_human_ct <- (human_choices==ctest_choices)
sum(newdata$same_choice_human_ct)/70
newdata$right_choices_ct <- (newdata$ctest_choices==newdata$plot_location)
sum(newdata$right_choices_ct)/70
realplot_right_ct <- (ct_real_pv < 0.05)
(sum(realplot_right_ct)+3)/70
########################### 5 epoch dl model
library(keras)
setwd("/Volumes/5550/panda/5epoch_m")
model_5e <- load_model_hdf5("new_100k_each.h5")
########################### 10 epoch dl model ###### the best one is the 4th epoch
setwd("/Volumes/5550/panda/10epoch_m")
library(keras)
model_10e <- load_model_hdf5("weights.06-0.22.hdf5")
base_dir <- "/Volumes/5550/panda"
turk_dir <- file.path(base_dir,"turk70real")
test_datagen <- image_data_generator(rescale = 1/255)
############################################################# crazy test
turk_generator <- flow_images_from_directory(
turk_dir,
test_datagen,
target_size = c(150, 150),
color_mode = "grayscale",
batch_size = 1,
class_mode = "binary"
)
#orders1 <- turk_generator$filenames
#p1 <- model_10e %>% predict_generator(turk_generator, steps = 1)
#pc_predict <- model_10e %>%
# predict_generator(turk_generator, step = 70, verbose = 1)
# (Eye-ball check the prediction)
# (Essentially predict_proba is the probability of the image being norela)
#stat_df <- as.tibble(cbind(pc_predict, turk_generator$filenames, turk_generator$classes)) %>%
# rename(
# predict_proba = V1,
# filename = V2,
# test_label = V3
# ) %>%
# mutate(predicted_label = ifelse(predict_proba > 0.5, 1, 0)) %>%
# sample_n(size= 20) %>%
# mutate(predicted_label = as.integer(predicted_label)) %>%
# mutate(predicted_label_name = ifelse(predicted_label == 0, "cats", "dogs")) %>%
# separate(filename, into=c("true_label","fname"), sep = "[//]" )
#dl10_acc_turk167 <- model_10e %>% evaluate_generator(turk_generator, steps = 67)
| /linear&norela/turk_human_calc.R | no_license | shuofan18/ETF5550 | R | false | false | 4,143 | r |
setwd("/volumes/5550/turk2_results")
# the most common choices by human for each lineup = human_choices
# the actual location of the real plot in each lineup = newdata$plot_location
library(tidyverse)
data <- read.csv("/volumes/5550/turk2_results/data_turk2.csv")
file <- read.csv("/volumes/5550/turk2_results/file.csv")
ntrue <- data %>% group_by(pic_name) %>% summarise(ntrue = sum(response))
count <- table(data$pic_name) %>% data.frame()
#png_name <- as.character(file$png_nolc)
#human_choices <- vector()
#for (i in 1:70) {
# datai <- subset(data, pic_name == png_name[i])
# human_choices[i] <- names(which.max(table(datai$response_no)))
#}
ntrue <- ntrue %>%
left_join(select(data, difficulty, pic_name,
sample_size, beta, plot_location, sigma, replica)) %>%
distinct() %>%
left_join(count, by = c("pic_name"="Var1"))
newdata <- as.data.frame(ntrue)
p_cal <- function(k, x){
prob <- 1-pbinom(x-1, k, 0.05)
return(prob)
}
p_value <- vector()
for (i in 1:70) {
ki <- newdata$Freq[i]
xi <- newdata$ntrue[i]
p_value[i] <- p_cal(k=ki, x=xi)
}
newdata$p_value <- p_value
pic_name <- newdata$pic_name
setwd("/volumes/5550/turk2_results")
write.csv(tibble(pic_name, p_value), "human_pvalue.csv")
newdata$conclusion <- (newdata$p_value < 0.05)
#newdata$one_choice_con <- (newdata$plot_location == newdata$human_choices)
newdata$conclusion[38] <- TRUE
newdata$conclusion[39] <- TRUE
newdata$conclusion[40] <- TRUE
acc_human <- sum(newdata$conclusion)/70
acc_human_one_choice <- sum(newdata$one_choice_con)/70
newdata$human_choices <- human_choices
difficulty_true <- newdata %>% group_by(difficulty) %>% summarise(pb_true = mean(conclusion))
############################ smallest p picked by cor.test ################
setwd("/Volumes/5550/turk2_txt_files")
turk_lu <- list()
ctest_choices <- vector()
ct_real_pv <- vector()
for (i in 1:70) {
turk_lu[[i]] <- as.character(file$txt[i]) %>% read.delim(sep = " ")
ct_pvalue <- vector()
for (index in 1:20){
X <- turk_lu[[i]][,1]
Y <- turk_lu[[i]][,index+1]
ct_pvalue[index] <- cor.test(X,Y)$p.value
}
ct_real_pv[i] <- ct_pvalue[file$location[i]]
ctest_choices[i] <- ct_pvalue %>% which.min()
}
newdata <- newdata %>% mutate(ctest_choices)
newdata$same_choice_human_ct <- (human_choices==ctest_choices)
sum(newdata$same_choice_human_ct)/70
newdata$right_choices_ct <- (newdata$ctest_choices==newdata$plot_location)
sum(newdata$right_choices_ct)/70
realplot_right_ct <- (ct_real_pv < 0.05)
(sum(realplot_right_ct)+3)/70
########################### 5 epoch dl model
library(keras)
setwd("/Volumes/5550/panda/5epoch_m")
model_5e <- load_model_hdf5("new_100k_each.h5")
########################### 10 epoch dl model ###### the best one is the 4th epoch
setwd("/Volumes/5550/panda/10epoch_m")
library(keras)
model_10e <- load_model_hdf5("weights.06-0.22.hdf5")
base_dir <- "/Volumes/5550/panda"
turk_dir <- file.path(base_dir,"turk70real")
test_datagen <- image_data_generator(rescale = 1/255)
############################################################# crazy test
turk_generator <- flow_images_from_directory(
turk_dir,
test_datagen,
target_size = c(150, 150),
color_mode = "grayscale",
batch_size = 1,
class_mode = "binary"
)
#orders1 <- turk_generator$filenames
#p1 <- model_10e %>% predict_generator(turk_generator, steps = 1)
#pc_predict <- model_10e %>%
# predict_generator(turk_generator, step = 70, verbose = 1)
# (Eye-ball check the prediction)
# (Essentially predict_proba is the probability of the image being norela)
#stat_df <- as.tibble(cbind(pc_predict, turk_generator$filenames, turk_generator$classes)) %>%
# rename(
# predict_proba = V1,
# filename = V2,
# test_label = V3
# ) %>%
# mutate(predicted_label = ifelse(predict_proba > 0.5, 1, 0)) %>%
# sample_n(size= 20) %>%
# mutate(predicted_label = as.integer(predicted_label)) %>%
# mutate(predicted_label_name = ifelse(predicted_label == 0, "cats", "dogs")) %>%
# separate(filename, into=c("true_label","fname"), sep = "[//]" )
#dl10_acc_turk167 <- model_10e %>% evaluate_generator(turk_generator, steps = 67)
|
##########################################################
#
##########################################################
nftGeneration <- function(neurons, nftAcceleration, probabilitySaclingFactor, maxNftGrowth, synapseActivityUpdate_nftCutOffMean, nftFlatClearance, nftName = "nft", aAggregateCountName = "aAggregateCount", nftSeedProbability = "nftSeedProbability", activityName = "activity", aDimerName = "aDimer"){
return(mclapply(neurons, function(l){
if(l[["alive"]]){
l[[nftName]] <- pmin(l[[nftName]] + rbinom(length(l[[nftName]]),
pmin(ceiling(sqrt(l[[aAggregateCountName]] + l[[nftName]] + l[[aDimerName]]) / nftAcceleration),
(synapseActivityUpdate_nftCutOffMean * maxNftGrowth)),
l[[nftSeedProbability]]
),
2^31,
na.rm = TRUE
)
# l[[nftName]] <- pmin(l[[nftName]] + rbinom(length(l[[nftName]]),
# ceiling(sqrt(l[[aAggregateCountName]] + l[[nftName]] + l[[aDimerName]]) / nftAcceleration),
# l[[nftSeedProbability]] * probabilitySaclingFactor
# ),
# 2^31,
# na.rm = TRUE
# )
#NFT CLEANING:
l[[nftName]] <- pmax(l[[nftName]] - rbinom(length(l[[nftName]]),
nftFlatClearance,
.5
),
0,
na.rm = TRUE
)
}
return(l)
}))
}
# plot(unlist(lapply(seq(1, 10000, 1), function(x){rbinom(1,
# pmin(ceiling(sqrt(x) / nftAcceleration), (20000 * .002)),
# l[[nftSeedProbability]] * probabilitySaclingFactor
# )})))
| /modelFunctions/nftGeneration.R | no_license | jadenecke/ACHABM | R | false | false | 1,930 | r | ##########################################################
#
##########################################################
nftGeneration <- function(neurons, nftAcceleration, probabilitySaclingFactor, maxNftGrowth, synapseActivityUpdate_nftCutOffMean, nftFlatClearance, nftName = "nft", aAggregateCountName = "aAggregateCount", nftSeedProbability = "nftSeedProbability", activityName = "activity", aDimerName = "aDimer"){
return(mclapply(neurons, function(l){
if(l[["alive"]]){
l[[nftName]] <- pmin(l[[nftName]] + rbinom(length(l[[nftName]]),
pmin(ceiling(sqrt(l[[aAggregateCountName]] + l[[nftName]] + l[[aDimerName]]) / nftAcceleration),
(synapseActivityUpdate_nftCutOffMean * maxNftGrowth)),
l[[nftSeedProbability]]
),
2^31,
na.rm = TRUE
)
# l[[nftName]] <- pmin(l[[nftName]] + rbinom(length(l[[nftName]]),
# ceiling(sqrt(l[[aAggregateCountName]] + l[[nftName]] + l[[aDimerName]]) / nftAcceleration),
# l[[nftSeedProbability]] * probabilitySaclingFactor
# ),
# 2^31,
# na.rm = TRUE
# )
#NFT CLEANING:
l[[nftName]] <- pmax(l[[nftName]] - rbinom(length(l[[nftName]]),
nftFlatClearance,
.5
),
0,
na.rm = TRUE
)
}
return(l)
}))
}
# plot(unlist(lapply(seq(1, 10000, 1), function(x){rbinom(1,
# pmin(ceiling(sqrt(x) / nftAcceleration), (20000 * .002)),
# l[[nftSeedProbability]] * probabilitySaclingFactor
# )})))
|
################################################
# SHINYAPP TEMPLATE * MULTIPLE DIRS * server.R #
################################################
shinyServer(function(input, output, session) {
# HOME (hme) -------------------------------------------------------
# source(file.path("server", "srv_hme.R"), local = TRUE)$value
# LOGIN (lgn) ------------------------------------------------------
# source(file.path("server", "srv_lgn.R"), local = TRUE)$value
# TABLES (tbl) -----------------------------------------------------
source(file.path("server", "srv_tbl.R"), local = TRUE)$value
# CHARTS (plt) -----------------------------------------------------
source(file.path("server", "srv_plt.R"), local = TRUE)$value
# MAPS (mps) -------------------------------------------------------
source(file.path("server", "srv_mps.R"), local = TRUE)$value
# MODELS (mdl) -----------------------------------------------------
# source(file.path("server", "srv_mdl.R"), local = TRUE)$value
# PREDICTION / FORECAST (prd) --------------------------------------
# source(file.path("server", "srv_prd.R"), local = TRUE)$value
# HELP () ----------------------------------------------------------
# source(file.path("server", "srv_hlp.R"), local = TRUE)$value
# ABOUT / CREDITS () -----------------------------------------------
# source(file.path("server", "srv_crd.R"), local = TRUE)$value
})
| /shiny_gender_paygap/apps/multiple_dirs/server.R | permissive | WeR-stats/workshops | R | false | false | 1,417 | r | ################################################
# SHINYAPP TEMPLATE * MULTIPLE DIRS * server.R #
################################################
shinyServer(function(input, output, session) {
# HOME (hme) -------------------------------------------------------
# source(file.path("server", "srv_hme.R"), local = TRUE)$value
# LOGIN (lgn) ------------------------------------------------------
# source(file.path("server", "srv_lgn.R"), local = TRUE)$value
# TABLES (tbl) -----------------------------------------------------
source(file.path("server", "srv_tbl.R"), local = TRUE)$value
# CHARTS (plt) -----------------------------------------------------
source(file.path("server", "srv_plt.R"), local = TRUE)$value
# MAPS (mps) -------------------------------------------------------
source(file.path("server", "srv_mps.R"), local = TRUE)$value
# MODELS (mdl) -----------------------------------------------------
# source(file.path("server", "srv_mdl.R"), local = TRUE)$value
# PREDICTION / FORECAST (prd) --------------------------------------
# source(file.path("server", "srv_prd.R"), local = TRUE)$value
# HELP () ----------------------------------------------------------
# source(file.path("server", "srv_hlp.R"), local = TRUE)$value
# ABOUT / CREDITS () -----------------------------------------------
# source(file.path("server", "srv_crd.R"), local = TRUE)$value
})
|
\name{redalgae}
\alias{redalgae}
\docType{data}
\title{Red algae fossil diversity across the Cenozoic}
\description{Red algae fossil diversity across the Cenozoic inferred from from fossil data}
\usage{data(redalgae)}
\details{
The format is a dataframe with the two following variables:
\describe{
\item{\code{Age}}{a numeric vector corresponding to the geological age, in Myrs before the present}
\item{\code{redalgae fossil diversity}}{a numeric vector corresponding to the diversity of redalgae taxa}
}
}
\references{
D. Lazarus. (1994) Neptune: A marine micropaleontology database. \emph{Mathematical Geology}, 26(7):817–832.}
\examples{
data(redalgae)
plot(redalgae)
}
\keyword{datasets} | /man/redalgae.Rd | no_license | elewitus/PANDA | R | false | false | 708 | rd | \name{redalgae}
\alias{redalgae}
\docType{data}
\title{Red algae fossil diversity across the Cenozoic}
\description{Red algae fossil diversity across the Cenozoic inferred from from fossil data}
\usage{data(redalgae)}
\details{
The format is a dataframe with the two following variables:
\describe{
\item{\code{Age}}{a numeric vector corresponding to the geological age, in Myrs before the present}
\item{\code{redalgae fossil diversity}}{a numeric vector corresponding to the diversity of redalgae taxa}
}
}
\references{
D. Lazarus. (1994) Neptune: A marine micropaleontology database. \emph{Mathematical Geology}, 26(7):817–832.}
\examples{
data(redalgae)
plot(redalgae)
}
\keyword{datasets} |
library(dplyr)
transl <- read.csv2("./Data/translate_matching.csv", colClasses = "character" )
conver <- read.csv2("./Data/CBO2002_SOC.csv", colClasses = "character" )
check <- transl %>% rename(CBO2002 = CODIGO) %>% inner_join(conver, by = ("CBO2002"))
check_f <- check %>% group_by(CBO2002) %>% summarise(freq = n())
check_f %>% select(freq) %>% table(.)
CBO_ok <- check_f %>% filter(freq == 1) %>% left_join(check, by = "CBO2002") %>%
group_by(CBO2002, code) %>% summarise(job_zone = Job_Zone.y)
CBO_miss <- check_f %>% filter(freq != 1) %>% left_join(check, by = "CBO2002") %>%
group_by(CBO2002, code)
CBO_ok2 <- CBO_miss %>% filter(prox > 0.81) %>% ungroup() %>% group_by(CBO2002, code) %>%
summarise(job_zone = first(Job_Zone.x))
CBO_miss <- CBO_miss %>% filter(prox <= 0.81)
length(unique(CBO_miss$CBO2002))
CBO_OK <- rbind(CBO_ok, CBO_ok2)
write.csv2(CBO_OK, row.names = F, file = "./Data/CBO_OK.csv")
# ==============================================================================
library(readxl)
Job_Zones <- read_excel("Data/All_Job_Zones.xls", skip = 3)
a1 <- Job_Zones %>% mutate(Code = substr(Code, 1, 6)) %>% group_by(Code, `Job Zone`) %>% summarise(freq = n())
a1 %>% ungroup() %>% group_by(Code) %>% summarise(freq = n()) %>% filter(freq == 1) %>% select(Code)
a2<- Job_Zones %>% mutate(Code = substr(Code, 1, 4)) %>% group_by(Code, `Job Zone`) %>% summarise(freq = n())
a2 %>% ungroup() %>% group_by(Code) %>% summarise(freq = n()) %>% filter(freq == 1) %>% select(Code)
CBO_miss
| /consistencia para conversao.R | no_license | lamfo-unb/AutomationJobs | R | false | false | 1,563 | r |
library(dplyr)
transl <- read.csv2("./Data/translate_matching.csv", colClasses = "character" )
conver <- read.csv2("./Data/CBO2002_SOC.csv", colClasses = "character" )
check <- transl %>% rename(CBO2002 = CODIGO) %>% inner_join(conver, by = ("CBO2002"))
check_f <- check %>% group_by(CBO2002) %>% summarise(freq = n())
check_f %>% select(freq) %>% table(.)
CBO_ok <- check_f %>% filter(freq == 1) %>% left_join(check, by = "CBO2002") %>%
group_by(CBO2002, code) %>% summarise(job_zone = Job_Zone.y)
CBO_miss <- check_f %>% filter(freq != 1) %>% left_join(check, by = "CBO2002") %>%
group_by(CBO2002, code)
CBO_ok2 <- CBO_miss %>% filter(prox > 0.81) %>% ungroup() %>% group_by(CBO2002, code) %>%
summarise(job_zone = first(Job_Zone.x))
CBO_miss <- CBO_miss %>% filter(prox <= 0.81)
length(unique(CBO_miss$CBO2002))
CBO_OK <- rbind(CBO_ok, CBO_ok2)
write.csv2(CBO_OK, row.names = F, file = "./Data/CBO_OK.csv")
# ==============================================================================
library(readxl)
Job_Zones <- read_excel("Data/All_Job_Zones.xls", skip = 3)
a1 <- Job_Zones %>% mutate(Code = substr(Code, 1, 6)) %>% group_by(Code, `Job Zone`) %>% summarise(freq = n())
a1 %>% ungroup() %>% group_by(Code) %>% summarise(freq = n()) %>% filter(freq == 1) %>% select(Code)
a2<- Job_Zones %>% mutate(Code = substr(Code, 1, 4)) %>% group_by(Code, `Job Zone`) %>% summarise(freq = n())
a2 %>% ungroup() %>% group_by(Code) %>% summarise(freq = n()) %>% filter(freq == 1) %>% select(Code)
CBO_miss
|
mydat <- read.csv("~/BIOL812Poster/06-137FullVRS.csv", header = T) # Not sure how to soft-code this... need to run trhough each lake
library(ggplot2)
# Combine all data into an easily plot-able three-column data frame
stackdat <- as.data.frame(cbind(mydat[,1], stack(mydat[,2:ncol(mydat)])))
names(stackdat) <- c("Freq", "Abs", "Int")
# Plot the actual points:
colfxn <- colorRampPalette(c("goldenrod", "blue"), bias =0.2)
DATcols <- colfxn(nlevels(stackdat$Int))
ggplot(aes(x = Freq, y = Abs), data = stackdat) +
geom_rect(aes(xmin=650, xmax=700, ymin=-Inf, ymax=Inf), fill = "grey80")+ # highlight the PAR for chl a
geom_point(aes(colour=Int), pch = 16, alpha = 0.1, size = 2)+
scale_colour_manual(values = DATcols, guide = F)+
theme_classic()+
xlab("Wavelength (nm)")+
ylab("Absorbance Value")+
theme(text = element_text(size=18, face="bold"),
axis.text.x = element_text(colour="black", face="plain", size=16),
axis.text.y = element_text(colour="black", face="plain", size=16))
# Plot 1: Outline plot (less ink)
ggplot(aes(x = Freq, y = Abs), data = stackdat) +
geom_rect(aes(xmin=650, xmax=700, ymin=-Inf, ymax=Inf), fill = "greenyellow")+ # highlight the PAR for chl a
scale_colour_gradient(low="green",high="red")+
stat_density_2d(aes(colour=..level..), size =1.25) +
theme_classic()+
xlab("Wavelength (nm)")+
ylab("Absorbance Value")+
theme(text = element_text(size=18, face="bold"),
axis.text.x = element_text(colour="black", face="plain", size=16),
axis.text.y = element_text(colour="black", face="plain", size=16),
legend.title = element_text(face="bold", size=12),
legend.text= element_text(face="plain", size=10))
# Plot 2: Raster density plot (more ink, looks cooler)
ggplot(aes(x = Freq, y = Abs), data = stackdat) +
stat_density_2d(geom="raster", aes(fill=..density..), contour = F)+
scale_fill_continuous(low="blue",high="yellow")+
theme_classic()+
xlab("Wavelength (nm)")+
ylab("Absorbance Value")+
theme(text = element_text(size=18, face="bold"),
axis.text.x = element_text(colour="black", face="plain", size=16),
axis.text.y = element_text(colour="black", face="plain", size=16),
legend.title = element_text(face="bold", size=12),
legend.text= element_text(face="plain", size=10))
## Isolating Photosynthetically Active Radiation (PAR) ##
# Refine PAR which is 400-700nm
PAR <- as.numeric(400:700)
PARdat <- mydat[which(mydat$Sample %in% PAR), ]
PARstackdat <- as.data.frame(cbind(PARdat[,1], stack(PARdat[,2:ncol(mydat)])))
names(PARstackdat) <- c("Freq", "Abs", "Int")
# Plot PAR data
# Plot 1: Outline plot (less ink)
ggplot(aes(x = Freq, y = Abs), data = PARstackdat) +
geom_rect(aes(xmin=650, xmax=700, ymin=-Inf, ymax=Inf), fill = "grey40")+ # highlight the PAR for chl a
#geom_rect(aes(xmin=480, xmax=520, ymin=-Inf, ymax=Inf), fill = "grey80")+ # highlight the PAR for echinenone
scale_colour_gradient(low="green",high="red")+
stat_density_2d(aes(colour=..level..), size =1.25) +
theme_classic()+
xlab("Wavelength (nm)")+
ylab("Absorbance Value")+
theme(text = element_text(size=18, face="bold"),
axis.text.x = element_text(colour="black", face="plain", size=16),
axis.text.y = element_text(colour="black", face="plain", size=16),
legend.title = element_text(face="bold", size=12),
legend.text= element_text(face="plain", size=10))
# Plot 2: Raster density plot (more ink, looks cooler)
ggplot(aes(x = Freq, y = Abs), data = PARstackdat) +
stat_density_2d(geom="raster", aes(fill=..density..), contour = F)+ #having fun with density fills
scale_fill_continuous(low="blue",high="yellow")+
theme_classic()+
xlab("Wavelength (nm)")+
ylab("Absorbance Value")+
theme(text = element_text(size=18, face="bold"),
axis.text.x = element_text(colour="black", face="plain", size=16),
axis.text.y = element_text(colour="black", face="plain", size=16),
legend.title = element_text(face="bold", size=12),
legend.text= element_text(face="plain", size=10))
## Plot the actual points:
# Set the palette
colfxn <- colorRampPalette(c("goldenrod", "darkmagenta"), bias =0.2) # Set the top and bottom of the gradient
PARcols <- colfxn(nlevels(PARstackdat$Int)) # Number of colours = number of intervals
ggplot(aes(x = Freq, y = Abs), data = PARstackdat) +
geom_rect(aes(xmin=650, xmax=700, ymin=-Inf, ymax=Inf), fill = "grey80")+ # highlight the PAR for chl a
geom_point(aes(colour=Int), pch = 16, alpha = 0.3, size = 3)+ # Points (lines won't work)
scale_colour_manual(values = PARcols, guide_legend(title = "Midpoint (cm)"))+ # Add manual scale to improve look and interpretation
theme_classic()+ # Bye-bye extra ink
xlab("Wavelength (nm)")+
ylab("Absorbance Value")+
theme(text = element_text(size=18, face="bold"),
axis.text.x = element_text(colour="black", face="plain", size=16),
axis.text.y = element_text(colour="black", face="plain", size=16),
legend.text = element_text(face="plain", size = 8),
#legend.position = "top",
legend.title = element_text(face="bold", size = 12)) # Customizing the text size so it's legible on a poster
| /SpectraPlots.R | no_license | bsimmatis/BIOL812Poster | R | false | false | 5,348 | r | mydat <- read.csv("~/BIOL812Poster/06-137FullVRS.csv", header = T) # Not sure how to soft-code this... need to run trhough each lake
library(ggplot2)
# Combine all data into an easily plot-able three-column data frame
stackdat <- as.data.frame(cbind(mydat[,1], stack(mydat[,2:ncol(mydat)])))
names(stackdat) <- c("Freq", "Abs", "Int")
# Plot the actual points:
colfxn <- colorRampPalette(c("goldenrod", "blue"), bias =0.2)
DATcols <- colfxn(nlevels(stackdat$Int))
ggplot(aes(x = Freq, y = Abs), data = stackdat) +
geom_rect(aes(xmin=650, xmax=700, ymin=-Inf, ymax=Inf), fill = "grey80")+ # highlight the PAR for chl a
geom_point(aes(colour=Int), pch = 16, alpha = 0.1, size = 2)+
scale_colour_manual(values = DATcols, guide = F)+
theme_classic()+
xlab("Wavelength (nm)")+
ylab("Absorbance Value")+
theme(text = element_text(size=18, face="bold"),
axis.text.x = element_text(colour="black", face="plain", size=16),
axis.text.y = element_text(colour="black", face="plain", size=16))
# Plot 1: Outline plot (less ink)
ggplot(aes(x = Freq, y = Abs), data = stackdat) +
geom_rect(aes(xmin=650, xmax=700, ymin=-Inf, ymax=Inf), fill = "greenyellow")+ # highlight the PAR for chl a
scale_colour_gradient(low="green",high="red")+
stat_density_2d(aes(colour=..level..), size =1.25) +
theme_classic()+
xlab("Wavelength (nm)")+
ylab("Absorbance Value")+
theme(text = element_text(size=18, face="bold"),
axis.text.x = element_text(colour="black", face="plain", size=16),
axis.text.y = element_text(colour="black", face="plain", size=16),
legend.title = element_text(face="bold", size=12),
legend.text= element_text(face="plain", size=10))
# Plot 2: Raster density plot (more ink, looks cooler)
ggplot(aes(x = Freq, y = Abs), data = stackdat) +
stat_density_2d(geom="raster", aes(fill=..density..), contour = F)+
scale_fill_continuous(low="blue",high="yellow")+
theme_classic()+
xlab("Wavelength (nm)")+
ylab("Absorbance Value")+
theme(text = element_text(size=18, face="bold"),
axis.text.x = element_text(colour="black", face="plain", size=16),
axis.text.y = element_text(colour="black", face="plain", size=16),
legend.title = element_text(face="bold", size=12),
legend.text= element_text(face="plain", size=10))
## Isolating Photosynthetically Active Radiation (PAR) ##
# Refine PAR which is 400-700nm
PAR <- as.numeric(400:700)
PARdat <- mydat[which(mydat$Sample %in% PAR), ]
PARstackdat <- as.data.frame(cbind(PARdat[,1], stack(PARdat[,2:ncol(mydat)])))
names(PARstackdat) <- c("Freq", "Abs", "Int")
# Plot PAR data
# Plot 1: Outline plot (less ink)
ggplot(aes(x = Freq, y = Abs), data = PARstackdat) +
geom_rect(aes(xmin=650, xmax=700, ymin=-Inf, ymax=Inf), fill = "grey40")+ # highlight the PAR for chl a
#geom_rect(aes(xmin=480, xmax=520, ymin=-Inf, ymax=Inf), fill = "grey80")+ # highlight the PAR for echinenone
scale_colour_gradient(low="green",high="red")+
stat_density_2d(aes(colour=..level..), size =1.25) +
theme_classic()+
xlab("Wavelength (nm)")+
ylab("Absorbance Value")+
theme(text = element_text(size=18, face="bold"),
axis.text.x = element_text(colour="black", face="plain", size=16),
axis.text.y = element_text(colour="black", face="plain", size=16),
legend.title = element_text(face="bold", size=12),
legend.text= element_text(face="plain", size=10))
# Plot 2: Raster density plot (more ink, looks cooler)
ggplot(aes(x = Freq, y = Abs), data = PARstackdat) +
stat_density_2d(geom="raster", aes(fill=..density..), contour = F)+ #having fun with density fills
scale_fill_continuous(low="blue",high="yellow")+
theme_classic()+
xlab("Wavelength (nm)")+
ylab("Absorbance Value")+
theme(text = element_text(size=18, face="bold"),
axis.text.x = element_text(colour="black", face="plain", size=16),
axis.text.y = element_text(colour="black", face="plain", size=16),
legend.title = element_text(face="bold", size=12),
legend.text= element_text(face="plain", size=10))
## Plot the actual points:
# Set the palette
colfxn <- colorRampPalette(c("goldenrod", "darkmagenta"), bias =0.2) # Set the top and bottom of the gradient
PARcols <- colfxn(nlevels(PARstackdat$Int)) # Number of colours = number of intervals
ggplot(aes(x = Freq, y = Abs), data = PARstackdat) +
geom_rect(aes(xmin=650, xmax=700, ymin=-Inf, ymax=Inf), fill = "grey80")+ # highlight the PAR for chl a
geom_point(aes(colour=Int), pch = 16, alpha = 0.3, size = 3)+ # Points (lines won't work)
scale_colour_manual(values = PARcols, guide_legend(title = "Midpoint (cm)"))+ # Add manual scale to improve look and interpretation
theme_classic()+ # Bye-bye extra ink
xlab("Wavelength (nm)")+
ylab("Absorbance Value")+
theme(text = element_text(size=18, face="bold"),
axis.text.x = element_text(colour="black", face="plain", size=16),
axis.text.y = element_text(colour="black", face="plain", size=16),
legend.text = element_text(face="plain", size = 8),
#legend.position = "top",
legend.title = element_text(face="bold", size = 12)) # Customizing the text size so it's legible on a poster
|
# Alfredo Rojas
# BIOS 611: UMD data tidy
# 9.21.19
# """
# This code takes the Urban Ministries data and uses tidyr
# to tidy the dataset for analysis
# """
library(dplyr)
library(tidyr)
library(stringr)
library(tidyverse)
library(GGally)
library(gridExtra)
#""""
# Function for normalization
# From: https://datasharkie.com/how-to-normalize-data-in-r/
#""""
normalize <- function(x) {
return((x - min(x, na.rm = T)) / (max(x, na.rm = T) - min(x, na.rm = T)))
}
# read in data
UMD_data = read_tsv("data/UMD_Services_Provided_20190719.tsv")
head(UMD_data)
# Check the `Field 1 - 3` variables, are they empty?
# from: https://www.quora.com/How-do-I-get-a-frequency-count-based-on-two-columns-variables-in-an-R-dataframe
summarise(group_by(UMD_data, `Field1`, `Field2`, `Field3`), count = n())
# Remove Field1, Field2, and Field3 since they are all NAs
UMD_data2 <- UMD_data %>%
select(-`Client File Merge`, -`Field1`, -`Field2`, -`Field3`)
# change date format so R can interpret it
UMD_data2$Date <- as.Date(UMD_data2$Date, "%m/%d/%Y")
# search for outliers and remove
outliers <- boxplot(UMD_data2$`Food Pounds`)$out
# food data, select target variables, drop NAs, filter for 2000 - 2019, change date format
UMD_food <- UMD_data2 %>%
select(Date, `Client File Number`, `Food Provided for`, `Food Pounds`) %>%
drop_na(`Food Provided for`,`Food Pounds`) %>%
filter(Date >= "2000-01-01", Date <= "2019-12-31", `Food Pounds` < 100, `Food Provided for` < 100) %>%
separate(Date, into = c("Year", "Month", "Day"), sep = "-")
# summarize data by group, count gets number of observations per day
food_summary <- UMD_food %>%
group_by(Year, Month, Day) %>%
summarise(
count = n(),
lbs_per_prsn = sum(`Food Pounds`) / sum(`Food Provided for`),
food_pounds_sum = sum(`Food Pounds`, na.rm = TRUE),
ppl_sum = sum(`Food Provided for`, na.rm = TRUE),
ppl_avg = mean(`Food Provided for`, na.rm = TRUE)
)
# clothes data, select relevant variables, change date format
clths_smmry <- UMD_data2 %>%
select(Date, `Client File Number`, `Clothing Items`) %>%
drop_na(`Client File Number`, `Clothing Items`) %>%
filter(Date >= "2000-01-01", Date <= "2019-12-31") %>%
separate(Date, into = c("Year", "Month", "Day"), sep = "-") %>%
group_by(Year, Month) %>%
summarise(
count = n(),
sum_clths = sum(`Clothing Items`)
)
#############################
# Plotting yearly food lbs per year on a monthly basis, using bar graph
# Also plotting yearly clothing items on a montly basis, using bar graph
# help from:
# https://www.earthdatascience.org/courses/earth-analytics/time-series-data/summarize-time-series-by-month-in-r/
# create new monthly variable for bar graph
# 2019 and 01 are dummy numbers, just trying to use format for month
#############################
# plot food pounds annually in a bar graph
food_summary %>%
mutate(month2 = as.Date(paste0("2019-", Month, "-01"), "%Y-%m-%d")) %>%
ggplot(mapping = aes(x = month2, y = food_pounds_sum)) +
geom_bar(stat = "identity", fill = "darkseagreen4") +
facet_wrap(~ Year, ncol = 4) +
labs(title = "Monthly Pounds of Food, 2000 - 2019",
subtitle = "Data plotted by year",
x = "Month",
y = "Food Pounds") + theme_bw(base_size = 15) +
scale_x_date(date_labels = "%b")
# plot clothing items annually in a bar graph
clths_smmry %>%
mutate(month2 = as.Date(paste0("2019-", Month, "-01"), "%Y-%m-%d")) %>%
ggplot(mapping = aes(x = month2, y = sum_clths)) +
geom_bar(stat = "identity", fill = "darkslateblue") +
facet_wrap(~ Year, ncol = 4) +
labs(title = "Clothes Items Provided, 2000 - 2019 (Monthly)",
subtitle = "Data plotted by year",
x = "Month",
y = "Clothes Items") + theme_bw(base_size = 15) +
scale_x_date(date_labels = "%b")
###########################
# COMPARE CLOTHES and FOOD
# create data frame containing food and clothes data together
###########################
food_clths <- UMD_data2 %>%
select(Date, `Client File Number`, `Clothing Items`, `Food Pounds`, `Food Provided for`) %>%
drop_na(`Client File Number`, `Clothing Items`, `Food Pounds`, `Food Provided for`) %>%
filter(Date >= "2000-01-01", Date <= "2019-12-31", `Food Pounds` < 100)
# Summary table of food_clths grouped by Date
food_clths_smry <- food_clths %>%
group_by(Date) %>%
summarise(
count = n(),
sum_food = sum(`Food Pounds`),
sum_clths = sum(`Clothing Items`),
sum_ppl_food = sum(`Food Provided for`),
food_per_prsn = sum_food / sum_ppl_food,
clths_per_vist = sum_clths / count
)
# now explore relationship between clothes items and food pounds on a daily basis
# find correlation coefficient, rounded to nearest 3 decimals
round(cor(food_clths_smry$sum_clths, food_clths_smry$sum_food, method = "pearson"), 3)
# plot sums of clothes and food pounds
p1 <- food_clths_smry %>%
ggplot(aes(x = sum_clths, y = sum_food)) +
geom_point(color = "darkslateblue", alpha = 1/3, position = "jitter") +
geom_smooth(se = FALSE, color = "deeppink3") +
labs(title = "Daily Sums of Clothes Items\nand Food lbs, 2000 - 2019",
subtitle = "Each observation is one day",
x = "Sum of clothes items",
y = "Sum of food lbs.") +
annotate("text", x = 150, y = 30, label = "r = 0.786")
# Normalize the variables
# correlation coefficient of food/person and clothes/visit
round(cor(food_clths_smry$food_per_prsn, food_clths_smry$clths_per_vist,
method = "pearson", use = "complete.obs"), 3)
# plot food per person & and clothes per visit
p2 <- food_clths_smry %>%
ggplot(aes(x = clths_per_vist, y = food_per_prsn)) +
geom_point(color = "darkslateblue", alpha = 1/3) +
geom_smooth(se = FALSE, color = "deeppink3") +
labs(title = "Daily Clothing Items and Food Pounds \nper Person, 2000 - 2019",
subtitle = "Each observation is for one day",
x = "Sum of clothing items per daily visits \n(multiple clients)",
y = "Food lbs. provided per person") +
annotate("text", x = 30, y = 2.5, label = "r = - 0.484")
# side-by-side plot
grid.arrange(p1, p2, nrow = 1)
# Summary of food_clths table grouped by Client
client_smry <- food_clths %>%
group_by(`Client File Number`) %>%
summarise(
count = n(),
sum_food = sum(`Food Pounds`),
sum_clths = sum(`Clothing Items`),
sum_ppl_food = sum(`Food Provided for`),
food_per_prsn = sum_food / sum_ppl_food,
clths_per_clnt = sum_clths / count
)
# find correlation coefficient for sum of clothes and food per client
round(cor(client_smry$sum_clths, client_smry$sum_food,
method = "pearson", use = "complete.obs"), 3)
p3 <- client_smry %>%
ggplot(aes(x = sum_clths, y = sum_food)) +
geom_point(color = "darkslateblue", alpha = 1/3, position = "jitter") +
geom_smooth(se = FALSE, color = "deeppink3") +
labs(title = "Sums of Clothes Items and Food lbs \nper Client Number, 2000 - 2019",
subtitle = "Each observation is one client",
x = "Sum of clothes items",
y = "Sum of food lbs.") +
annotate("text", x = 1000, y = 100, label = "r = 0.924")
# Normalize the variables
# correlation coefficient, log transformed
round(cor(client_smry$clths_per_clnt, client_smry$food_per_prsn,
method = "pearson", use = "complete.obs"), 3)
# same variables, but log transformed
p4 <- client_smry %>%
ggplot(aes(x = clths_per_clnt, y = food_per_prsn)) +
geom_point(color = "darkslateblue", alpha = 1/3) +
geom_smooth(se = FALSE, color = "deeppink3") +
labs(title = "Clothing Items and Food lbs per Person \for each Client Number, 2000 - 2019",
subtitle = "Each observation is for one client",
x = "Sum of clothing items per visit \n(for one client)",
y = "Food lbs. provided per person") +
annotate("text", x = 30, y = 12, label = "r = - 0.419")
# side-by-side plot
grid.arrange(p3, p4, nrow = 1)
# Explore the relationship between clothes and food lbs on a yearly basis, notice positive trend
# for most years
food_clths %>%
separate(Date, into = c("Year", "Month", "Day"), sep = "-") %>%
ggplot(aes(x = food_per_prsn, y = sum_food)) +
geom_point(color = "darkslateblue", alpha = 1/3) +
geom_smooth(se = FALSE, color = "deeppink3") +
facet_wrap(~ Year, ncol = 4) +
labs(title = "Logged Clothing Items & Food Pounds per day, 2000 - 2019",
subtitle = "Data plotted by year",
x = "Clothing items per day",
y = "Food pounds per day") + theme_bw(base_size = 15)
| /project_1/scripts/Project_1_tidy.R | no_license | ajrojas1/bios-611-data-sci | R | false | false | 8,552 | r | # Alfredo Rojas
# BIOS 611: UMD data tidy
# 9.21.19
# """
# This code takes the Urban Ministries data and uses tidyr
# to tidy the dataset for analysis
# """
library(dplyr)
library(tidyr)
library(stringr)
library(tidyverse)
library(GGally)
library(gridExtra)
#""""
# Function for normalization
# From: https://datasharkie.com/how-to-normalize-data-in-r/
#""""
normalize <- function(x) {
return((x - min(x, na.rm = T)) / (max(x, na.rm = T) - min(x, na.rm = T)))
}
# read in data
UMD_data = read_tsv("data/UMD_Services_Provided_20190719.tsv")
head(UMD_data)
# Check the `Field 1 - 3` variables, are they empty?
# from: https://www.quora.com/How-do-I-get-a-frequency-count-based-on-two-columns-variables-in-an-R-dataframe
summarise(group_by(UMD_data, `Field1`, `Field2`, `Field3`), count = n())
# Remove Field1, Field2, and Field3 since they are all NAs
UMD_data2 <- UMD_data %>%
select(-`Client File Merge`, -`Field1`, -`Field2`, -`Field3`)
# change date format so R can interpret it
UMD_data2$Date <- as.Date(UMD_data2$Date, "%m/%d/%Y")
# search for outliers and remove
outliers <- boxplot(UMD_data2$`Food Pounds`)$out
# food data, select target variables, drop NAs, filter for 2000 - 2019, change date format
UMD_food <- UMD_data2 %>%
select(Date, `Client File Number`, `Food Provided for`, `Food Pounds`) %>%
drop_na(`Food Provided for`,`Food Pounds`) %>%
filter(Date >= "2000-01-01", Date <= "2019-12-31", `Food Pounds` < 100, `Food Provided for` < 100) %>%
separate(Date, into = c("Year", "Month", "Day"), sep = "-")
# summarize data by group, count gets number of observations per day
food_summary <- UMD_food %>%
group_by(Year, Month, Day) %>%
summarise(
count = n(),
lbs_per_prsn = sum(`Food Pounds`) / sum(`Food Provided for`),
food_pounds_sum = sum(`Food Pounds`, na.rm = TRUE),
ppl_sum = sum(`Food Provided for`, na.rm = TRUE),
ppl_avg = mean(`Food Provided for`, na.rm = TRUE)
)
# clothes data, select relevant variables, change date format
clths_smmry <- UMD_data2 %>%
select(Date, `Client File Number`, `Clothing Items`) %>%
drop_na(`Client File Number`, `Clothing Items`) %>%
filter(Date >= "2000-01-01", Date <= "2019-12-31") %>%
separate(Date, into = c("Year", "Month", "Day"), sep = "-") %>%
group_by(Year, Month) %>%
summarise(
count = n(),
sum_clths = sum(`Clothing Items`)
)
#############################
# Plotting yearly food lbs per year on a monthly basis, using bar graph
# Also plotting yearly clothing items on a montly basis, using bar graph
# help from:
# https://www.earthdatascience.org/courses/earth-analytics/time-series-data/summarize-time-series-by-month-in-r/
# create new monthly variable for bar graph
# 2019 and 01 are dummy numbers, just trying to use format for month
#############################
# plot food pounds annually in a bar graph
food_summary %>%
mutate(month2 = as.Date(paste0("2019-", Month, "-01"), "%Y-%m-%d")) %>%
ggplot(mapping = aes(x = month2, y = food_pounds_sum)) +
geom_bar(stat = "identity", fill = "darkseagreen4") +
facet_wrap(~ Year, ncol = 4) +
labs(title = "Monthly Pounds of Food, 2000 - 2019",
subtitle = "Data plotted by year",
x = "Month",
y = "Food Pounds") + theme_bw(base_size = 15) +
scale_x_date(date_labels = "%b")
# plot clothing items annually in a bar graph
clths_smmry %>%
mutate(month2 = as.Date(paste0("2019-", Month, "-01"), "%Y-%m-%d")) %>%
ggplot(mapping = aes(x = month2, y = sum_clths)) +
geom_bar(stat = "identity", fill = "darkslateblue") +
facet_wrap(~ Year, ncol = 4) +
labs(title = "Clothes Items Provided, 2000 - 2019 (Monthly)",
subtitle = "Data plotted by year",
x = "Month",
y = "Clothes Items") + theme_bw(base_size = 15) +
scale_x_date(date_labels = "%b")
###########################
# COMPARE CLOTHES and FOOD
# create data frame containing food and clothes data together
###########################
food_clths <- UMD_data2 %>%
select(Date, `Client File Number`, `Clothing Items`, `Food Pounds`, `Food Provided for`) %>%
drop_na(`Client File Number`, `Clothing Items`, `Food Pounds`, `Food Provided for`) %>%
filter(Date >= "2000-01-01", Date <= "2019-12-31", `Food Pounds` < 100)
# Summary table of food_clths grouped by Date
food_clths_smry <- food_clths %>%
group_by(Date) %>%
summarise(
count = n(),
sum_food = sum(`Food Pounds`),
sum_clths = sum(`Clothing Items`),
sum_ppl_food = sum(`Food Provided for`),
food_per_prsn = sum_food / sum_ppl_food,
clths_per_vist = sum_clths / count
)
# now explore relationship between clothes items and food pounds on a daily basis
# find correlation coefficient, rounded to nearest 3 decimals
round(cor(food_clths_smry$sum_clths, food_clths_smry$sum_food, method = "pearson"), 3)
# plot sums of clothes and food pounds
p1 <- food_clths_smry %>%
ggplot(aes(x = sum_clths, y = sum_food)) +
geom_point(color = "darkslateblue", alpha = 1/3, position = "jitter") +
geom_smooth(se = FALSE, color = "deeppink3") +
labs(title = "Daily Sums of Clothes Items\nand Food lbs, 2000 - 2019",
subtitle = "Each observation is one day",
x = "Sum of clothes items",
y = "Sum of food lbs.") +
annotate("text", x = 150, y = 30, label = "r = 0.786")
# Normalize the variables
# correlation coefficient of food/person and clothes/visit
round(cor(food_clths_smry$food_per_prsn, food_clths_smry$clths_per_vist,
method = "pearson", use = "complete.obs"), 3)
# plot food per person & and clothes per visit
p2 <- food_clths_smry %>%
ggplot(aes(x = clths_per_vist, y = food_per_prsn)) +
geom_point(color = "darkslateblue", alpha = 1/3) +
geom_smooth(se = FALSE, color = "deeppink3") +
labs(title = "Daily Clothing Items and Food Pounds \nper Person, 2000 - 2019",
subtitle = "Each observation is for one day",
x = "Sum of clothing items per daily visits \n(multiple clients)",
y = "Food lbs. provided per person") +
annotate("text", x = 30, y = 2.5, label = "r = - 0.484")
# side-by-side plot
grid.arrange(p1, p2, nrow = 1)
# Summary of food_clths table grouped by Client
client_smry <- food_clths %>%
group_by(`Client File Number`) %>%
summarise(
count = n(),
sum_food = sum(`Food Pounds`),
sum_clths = sum(`Clothing Items`),
sum_ppl_food = sum(`Food Provided for`),
food_per_prsn = sum_food / sum_ppl_food,
clths_per_clnt = sum_clths / count
)
# find correlation coefficient for sum of clothes and food per client
round(cor(client_smry$sum_clths, client_smry$sum_food,
method = "pearson", use = "complete.obs"), 3)
p3 <- client_smry %>%
ggplot(aes(x = sum_clths, y = sum_food)) +
geom_point(color = "darkslateblue", alpha = 1/3, position = "jitter") +
geom_smooth(se = FALSE, color = "deeppink3") +
labs(title = "Sums of Clothes Items and Food lbs \nper Client Number, 2000 - 2019",
subtitle = "Each observation is one client",
x = "Sum of clothes items",
y = "Sum of food lbs.") +
annotate("text", x = 1000, y = 100, label = "r = 0.924")
# Normalize the variables
# correlation coefficient, log transformed
round(cor(client_smry$clths_per_clnt, client_smry$food_per_prsn,
method = "pearson", use = "complete.obs"), 3)
# same variables, but log transformed
p4 <- client_smry %>%
ggplot(aes(x = clths_per_clnt, y = food_per_prsn)) +
geom_point(color = "darkslateblue", alpha = 1/3) +
geom_smooth(se = FALSE, color = "deeppink3") +
labs(title = "Clothing Items and Food lbs per Person \for each Client Number, 2000 - 2019",
subtitle = "Each observation is for one client",
x = "Sum of clothing items per visit \n(for one client)",
y = "Food lbs. provided per person") +
annotate("text", x = 30, y = 12, label = "r = - 0.419")
# side-by-side plot
grid.arrange(p3, p4, nrow = 1)
# Explore the relationship between clothes and food lbs on a yearly basis, notice positive trend
# for most years
food_clths %>%
separate(Date, into = c("Year", "Month", "Day"), sep = "-") %>%
ggplot(aes(x = food_per_prsn, y = sum_food)) +
geom_point(color = "darkslateblue", alpha = 1/3) +
geom_smooth(se = FALSE, color = "deeppink3") +
facet_wrap(~ Year, ncol = 4) +
labs(title = "Logged Clothing Items & Food Pounds per day, 2000 - 2019",
subtitle = "Data plotted by year",
x = "Clothing items per day",
y = "Food pounds per day") + theme_bw(base_size = 15)
|
/Droga/Papieze/papieze.r | no_license | pbiecek/Eseje | R | false | false | 6,253 | r | ||
#(a) Naive Monte Carlo
j<-1
Y <- rep(0,100000)
N <- rpois(100000,5)
repeat{
if(sum(rnorm(N[j],0,1)) > 10){
Y[j] <- 1
}
j=j+1
if(j==100001){
break
}
}
Probability <- sum(Y)/length(N)
Variance <- 100000*Probability*(1-Probability)
#(b)
j<-1
Y <- rep(0,100000)
N <- rpois(100000,5)
repeat{
if(sum(rnorm(N[j],0,1)) > 10){
Y[j] <- 1
}
j=j+1
if(j==100001){
break
}
}
c <- -cov(Y,N)/var(N)
C_V_reduction <- var(Y+c(N-mean(N)))
Mean <- mean(Y+c(N-mean(N)))
#(c)
N <- rpois(100000,5)
K <- pnorm(10,0,sqrt(N),lower.tail = F)
Probability <- mean(K)
Variance <- var(K)
#(d)
f_N <- function(x){
return(exp(-5)*5^x/factorial(x))
}
g_N <- function(x){
return(exp(-10)*10^x/factorial(x))
}
N <- rpois(100000,10)
X<-rep(0,100000)
for(i in 1:length(N)){
X[i] <- pnorm(10,0,sqrt(N[i]),lower.tail = FALSE)*f_N(N[i])/g_N(N[i])
}
mean(X)
var(X)
#(e)
f_g <- function(x){
return(exp(-2*x+2))
}
X<-rep(0,100000)
N <- rpois(100000,5)
j<-1
repeat{
Y <- rnorm(N[j],2,1)
if(sum(Y)>10){
X[j]<- prod(f_g(Y))
}
j<-j+1
if(j==100001){
break
}
}
mean(X)
var(X)
#(f)
h_l <- function(n){
return(exp(5)/2^n)
}
f_g <- function(x){
return(exp(-2*x+2))
}
X<-rep(0,100000)
N <- rpois(100000,10)
j<-1
repeat{
Y <- rnorm(N[j],2,1)
if(sum(Y)>10){
X[j]<- prod(f_g(Y))*h_l(N[j])
}
j<-j+1
if(j==100001){
break
}
}
mean(X)
var(X)
| /Problem2.R | no_license | Pinwei-Yu/Statistical-Computing | R | false | false | 1,380 | r | #(a) Naive Monte Carlo
j<-1
Y <- rep(0,100000)
N <- rpois(100000,5)
repeat{
if(sum(rnorm(N[j],0,1)) > 10){
Y[j] <- 1
}
j=j+1
if(j==100001){
break
}
}
Probability <- sum(Y)/length(N)
Variance <- 100000*Probability*(1-Probability)
#(b)
j<-1
Y <- rep(0,100000)
N <- rpois(100000,5)
repeat{
if(sum(rnorm(N[j],0,1)) > 10){
Y[j] <- 1
}
j=j+1
if(j==100001){
break
}
}
c <- -cov(Y,N)/var(N)
C_V_reduction <- var(Y+c(N-mean(N)))
Mean <- mean(Y+c(N-mean(N)))
#(c)
N <- rpois(100000,5)
K <- pnorm(10,0,sqrt(N),lower.tail = F)
Probability <- mean(K)
Variance <- var(K)
#(d)
f_N <- function(x){
return(exp(-5)*5^x/factorial(x))
}
g_N <- function(x){
return(exp(-10)*10^x/factorial(x))
}
N <- rpois(100000,10)
X<-rep(0,100000)
for(i in 1:length(N)){
X[i] <- pnorm(10,0,sqrt(N[i]),lower.tail = FALSE)*f_N(N[i])/g_N(N[i])
}
mean(X)
var(X)
#(e)
f_g <- function(x){
return(exp(-2*x+2))
}
X<-rep(0,100000)
N <- rpois(100000,5)
j<-1
repeat{
Y <- rnorm(N[j],2,1)
if(sum(Y)>10){
X[j]<- prod(f_g(Y))
}
j<-j+1
if(j==100001){
break
}
}
mean(X)
var(X)
#(f)
h_l <- function(n){
return(exp(5)/2^n)
}
f_g <- function(x){
return(exp(-2*x+2))
}
X<-rep(0,100000)
N <- rpois(100000,10)
j<-1
repeat{
Y <- rnorm(N[j],2,1)
if(sum(Y)>10){
X[j]<- prod(f_g(Y))*h_l(N[j])
}
j<-j+1
if(j==100001){
break
}
}
mean(X)
var(X)
|
## File Name: tam_mml_calc_prob.R
## File Version: 9.26
#####################################################################
# calc_prob
# Calculation of probabilities
tam_mml_calc_prob <- function(iIndex, A, AXsi, B, xsi, theta,
nnodes, maxK, recalc=TRUE)
{
if(recalc){
LI <- length(iIndex)
LXsi <- dim(A)[3]
AXsi.tmp <- array( 0 , dim = c( LI , maxK , nnodes ) )
for (kk in 1:maxK){
A_kk <- matrix( A[ iIndex , kk , ] , nrow = LI , ncol = LXsi )
AXsi.tmp[, kk , 1:nnodes ] <- A_kk %*% xsi
}
AXsi[iIndex,] = AXsi.tmp[,,1]
} else {
# AXsi.tmp <- array( AXsi, dim = c( length(iIndex) , maxK , nnodes ) )
AXsi.tmp <- array( AXsi[ iIndex, ] , dim = c( length(iIndex) , maxK , nnodes ) )
}
Btheta <- array(0, dim = c(length(iIndex) , maxK , nnodes) )
for( dd in 1:ncol(theta) ){
Btheta <- Btheta + array(B[iIndex,,dd ,drop = FALSE] %o% theta[,dd] , dim = dim(Btheta))
}
#*** subtract maximum in Rcpp to avoid numerical overflow
rr0 <- Btheta + AXsi.tmp
rr1 <- tam_calc_prob_helper_subtract_max( rr0=rr0 )
rr <- exp(rr1)
rprobs <- rr / aperm( array( rep( colSums( aperm( rr , c(2,1,3) ) ,
dims=1 , na.rm = TRUE) , maxK ), dim=dim(rr)[c(1,3,2)] ) , c(1,3,2) )
#---- output
res <- list("rprobs" = rprobs, "AXsi" = AXsi)
return(res)
}
########################################################################
calc_prob.v5 <- tam_mml_calc_prob
tam_calc_prob <- tam_mml_calc_prob
| /R/tam_mml_calc_prob.R | no_license | yaozeyang90/TAM | R | false | false | 1,438 | r | ## File Name: tam_mml_calc_prob.R
## File Version: 9.26
#####################################################################
# calc_prob
# Calculation of probabilities
tam_mml_calc_prob <- function(iIndex, A, AXsi, B, xsi, theta,
nnodes, maxK, recalc=TRUE)
{
if(recalc){
LI <- length(iIndex)
LXsi <- dim(A)[3]
AXsi.tmp <- array( 0 , dim = c( LI , maxK , nnodes ) )
for (kk in 1:maxK){
A_kk <- matrix( A[ iIndex , kk , ] , nrow = LI , ncol = LXsi )
AXsi.tmp[, kk , 1:nnodes ] <- A_kk %*% xsi
}
AXsi[iIndex,] = AXsi.tmp[,,1]
} else {
# AXsi.tmp <- array( AXsi, dim = c( length(iIndex) , maxK , nnodes ) )
AXsi.tmp <- array( AXsi[ iIndex, ] , dim = c( length(iIndex) , maxK , nnodes ) )
}
Btheta <- array(0, dim = c(length(iIndex) , maxK , nnodes) )
for( dd in 1:ncol(theta) ){
Btheta <- Btheta + array(B[iIndex,,dd ,drop = FALSE] %o% theta[,dd] , dim = dim(Btheta))
}
#*** subtract maximum in Rcpp to avoid numerical overflow
rr0 <- Btheta + AXsi.tmp
rr1 <- tam_calc_prob_helper_subtract_max( rr0=rr0 )
rr <- exp(rr1)
rprobs <- rr / aperm( array( rep( colSums( aperm( rr , c(2,1,3) ) ,
dims=1 , na.rm = TRUE) , maxK ), dim=dim(rr)[c(1,3,2)] ) , c(1,3,2) )
#---- output
res <- list("rprobs" = rprobs, "AXsi" = AXsi)
return(res)
}
########################################################################
calc_prob.v5 <- tam_mml_calc_prob
tam_calc_prob <- tam_mml_calc_prob
|
##############
##TETRACHORIC#
##############
library(foreign)
rm(list=ls())
dataset = read.spss("C:\\Users\\pfakhari\\Google Drive\\Courses\\S690\\Assignment 4\\Data.sav", , to.data.frame=TRUE)
levels(dataset$T056301)
levels(dataset$DisabilityLevel)
# change yes no to 0 and 1
names=colnames(dataset)
numcol = ncol(dataset)
numrow = nrow(dataset)
dataset3 = matrix(0, numrow, numcol-5)
# convert the first 54 columns to binary 0 and 1
for (i in 1:(numcol-5)){
dataset3[,i] =as.numeric(dataset[,i])
}
#install.packages("psych")
#install.packages("polycor")
#install.packages("bindata")
library(psych)
library(polycor)
library(bindata)
library(psy)
x <- as.data.frame(dataset3)
x1<-na.omit(x) # remove NA
x2=t(x1)
ind = matrix(1, nrow=nrow(x2))
# find columns with all 1 or all 2
VecSum = colSums (x1, na.rm = FALSE, dims = 1)
ind[VecSum==nrow(x1)]=0;ind[VecSum==2*nrow(x1)]=0;
x3=t(subset(x2,as.logical(ind)))
#**************************************************#
# There are many variables that are all 2 for all subjects
# except 1 or 2 subjects, So I exclude them as well
# I have 251 subjects so it the column sum should be less
# than 500.
x4=t(x3)
ind = matrix(1, nrow=nrow(x4))
VecSum = colSums (x3, na.rm = FALSE, dims = 1)
ind[VecSum>499]=0;
x5=t(subset(x4,as.logical(ind)))
tetrachoric(x3)
R <- tetrachoric(x3)$rho
fit = factanal(covmat = R, factors = 2, rotation = "varimax")
print(fit, digits = 2, cutoff = .2, sort = TRUE)
A <- factanal(x = x3, covmat = R, factors = 2, rotation = "varimax")$loadings[,1:2]
as.matrix(x3)%*%solve(R)%*%A
scale(as.matrix(x3))%*%solve(R)%*%A
# Estimating factor scores:
scores <- factanal(x3,factors=2, rotation="varimax",scores="regression")$scores
# EFA separately on PD and TS:
xPD = x1[, 5:11]
xPD = cbind(xPD, x1[,26:37, 54])
xTS = x1[, 12:25]
xTS = cbind(xTS, x1[,38:53])
RPD <- tetrachoric(xPD)$rho
RTS <- tetrachoric(xTS)$rho
fitPD = factanal(covmat = RPD, factors = 3, rotation = "varimax")
fitTS = factanal(covmat = RTS, factors = 2, rotation = "varimax")
print(fitPD, digits = 2, cutoff = .2, sort = TRUE)
print(fitTS, digits = 2, cutoff = .2, sort = TRUE)
APD <- factanal(x = xPD, covmat = RPD, factors = 3, rotation = "varimax")$loadings[,1:2]
ATS <- factanal(x = xTS, covmat = RTS, factors = 2, rotation = "varimax")$loadings[,1:2]
as.matrix(x3)%*%solve(R)%*%A
scale(as.matrix(x3))%*%solve(R)%*%A
| /Counselling/FactorAnalysis/Code/TetraChoricTest.R | no_license | PegahFakhari/Projects | R | false | false | 2,391 | r | ##############
##TETRACHORIC#
##############
library(foreign)
rm(list=ls())
dataset = read.spss("C:\\Users\\pfakhari\\Google Drive\\Courses\\S690\\Assignment 4\\Data.sav", , to.data.frame=TRUE)
levels(dataset$T056301)
levels(dataset$DisabilityLevel)
# change yes no to 0 and 1
names=colnames(dataset)
numcol = ncol(dataset)
numrow = nrow(dataset)
dataset3 = matrix(0, numrow, numcol-5)
# convert the first 54 columns to binary 0 and 1
for (i in 1:(numcol-5)){
dataset3[,i] =as.numeric(dataset[,i])
}
#install.packages("psych")
#install.packages("polycor")
#install.packages("bindata")
library(psych)
library(polycor)
library(bindata)
library(psy)
x <- as.data.frame(dataset3)
x1<-na.omit(x) # remove NA
x2=t(x1)
ind = matrix(1, nrow=nrow(x2))
# find columns with all 1 or all 2
VecSum = colSums (x1, na.rm = FALSE, dims = 1)
ind[VecSum==nrow(x1)]=0;ind[VecSum==2*nrow(x1)]=0;
x3=t(subset(x2,as.logical(ind)))
#**************************************************#
# There are many variables that are all 2 for all subjects
# except 1 or 2 subjects, So I exclude them as well
# I have 251 subjects so it the column sum should be less
# than 500.
x4=t(x3)
ind = matrix(1, nrow=nrow(x4))
VecSum = colSums (x3, na.rm = FALSE, dims = 1)
ind[VecSum>499]=0;
x5=t(subset(x4,as.logical(ind)))
tetrachoric(x3)
R <- tetrachoric(x3)$rho
fit = factanal(covmat = R, factors = 2, rotation = "varimax")
print(fit, digits = 2, cutoff = .2, sort = TRUE)
A <- factanal(x = x3, covmat = R, factors = 2, rotation = "varimax")$loadings[,1:2]
as.matrix(x3)%*%solve(R)%*%A
scale(as.matrix(x3))%*%solve(R)%*%A
# Estimating factor scores:
scores <- factanal(x3,factors=2, rotation="varimax",scores="regression")$scores
# EFA separately on PD and TS:
xPD = x1[, 5:11]
xPD = cbind(xPD, x1[,26:37, 54])
xTS = x1[, 12:25]
xTS = cbind(xTS, x1[,38:53])
RPD <- tetrachoric(xPD)$rho
RTS <- tetrachoric(xTS)$rho
fitPD = factanal(covmat = RPD, factors = 3, rotation = "varimax")
fitTS = factanal(covmat = RTS, factors = 2, rotation = "varimax")
print(fitPD, digits = 2, cutoff = .2, sort = TRUE)
print(fitTS, digits = 2, cutoff = .2, sort = TRUE)
APD <- factanal(x = xPD, covmat = RPD, factors = 3, rotation = "varimax")$loadings[,1:2]
ATS <- factanal(x = xTS, covmat = RTS, factors = 2, rotation = "varimax")$loadings[,1:2]
as.matrix(x3)%*%solve(R)%*%A
scale(as.matrix(x3))%*%solve(R)%*%A
|
| pc = 0xc002 | a = 0x0a | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110100 |
| pc = 0xc004 | a = 0x0a | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110100 | MEM[0x0080] = 0x0a |
| pc = 0xc006 | a = 0x01 | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110100 |
| pc = 0xc008 | a = 0x01 | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110100 | MEM[0x0081] = 0x01 |
| pc = 0xc00a | a = 0xfe | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 10110100 |
| pc = 0xc00d | a = 0xfe | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 10110100 | MEM[0x010a] = 0xfe |
| pc = 0xc00f | a = 0x01 | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110100 |
| pc = 0xc011 | a = 0x01 | x = 0x01 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110100 |
| pc = 0xc012 | a = 0x01 | x = 0x01 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110100 |
| pc = 0xc014 | a = 0xff | x = 0x01 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 10110100 | MEM[0x010a] = 0xfe |
| /res/adc_ind_x.r | permissive | JSpuri/EmuParadise | R | false | false | 964 | r | | pc = 0xc002 | a = 0x0a | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110100 |
| pc = 0xc004 | a = 0x0a | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110100 | MEM[0x0080] = 0x0a |
| pc = 0xc006 | a = 0x01 | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110100 |
| pc = 0xc008 | a = 0x01 | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110100 | MEM[0x0081] = 0x01 |
| pc = 0xc00a | a = 0xfe | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 10110100 |
| pc = 0xc00d | a = 0xfe | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 10110100 | MEM[0x010a] = 0xfe |
| pc = 0xc00f | a = 0x01 | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110100 |
| pc = 0xc011 | a = 0x01 | x = 0x01 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110100 |
| pc = 0xc012 | a = 0x01 | x = 0x01 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110100 |
| pc = 0xc014 | a = 0xff | x = 0x01 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 10110100 | MEM[0x010a] = 0xfe |
|
## makeCacheMatrix takes in a square invertible matrix as a parameter,
## and returns a list containing the functions set(), get(), setInverse(),
## and getInverse().
## set() is a function that takes in a matrix and can override the
## the matrix x is set to.
## get() is a function that returns x.
## setInverse() takes in an inverse matrix and stores it in m.
## getInverse() returns m.
## Then a list is returned that stores the four functions.
makeCacheMatrix <- function(x = matrix())
{
m <- NULL
set <- function(y = matrix())
{
x <<- y
m <<- NULL
}
get <- function() x
setInverse <- function(inverseMatrix) m <<- inverseMatrix
getInverse <- function() m
list(set=set, get=get, setInverse=setInverse,
getInverse=getInverse)
}
## cacheSolve takes in x which is the list returned from makeCacheMatrix().
## m calls the getInverse(), then an if statement checks to see if
## m is null, if m is not null then the matrix stored in m
## it is returned.
## The matix variable is equal to what is returned by get().
## Then the m is set to the inverse of the matrix variable.
## Then the inverse matrix m is set, and returned at the end of
## the function.
cacheSolve <- function(x, ...)
{
m <- x$getInverse()
if(!is.null(m))
{
message("getting cached matrix")
return(m)
}
matrix <- x$get()
m <- solve(matrix, ...)
x$setInverse(m)
m
}
| /cachematrix.R | no_license | aric7/ProgrammingAssignment2 | R | false | false | 1,381 | r | ## makeCacheMatrix takes in a square invertible matrix as a parameter,
## and returns a list containing the functions set(), get(), setInverse(),
## and getInverse().
## set() is a function that takes in a matrix and can override the
## the matrix x is set to.
## get() is a function that returns x.
## setInverse() takes in an inverse matrix and stores it in m.
## getInverse() returns m.
## Then a list is returned that stores the four functions.
makeCacheMatrix <- function(x = matrix())
{
m <- NULL
set <- function(y = matrix())
{
x <<- y
m <<- NULL
}
get <- function() x
setInverse <- function(inverseMatrix) m <<- inverseMatrix
getInverse <- function() m
list(set=set, get=get, setInverse=setInverse,
getInverse=getInverse)
}
## cacheSolve takes in x which is the list returned from makeCacheMatrix().
## m calls the getInverse(), then an if statement checks to see if
## m is null, if m is not null then the matrix stored in m
## it is returned.
## The matix variable is equal to what is returned by get().
## Then the m is set to the inverse of the matrix variable.
## Then the inverse matrix m is set, and returned at the end of
## the function.
cacheSolve <- function(x, ...)
{
m <- x$getInverse()
if(!is.null(m))
{
message("getting cached matrix")
return(m)
}
matrix <- x$get()
m <- solve(matrix, ...)
x$setInverse(m)
m
}
|
OrderAlgorithms.maxsat <- function(uni.alg){
# Arrange algorithms by algorithms stratagy
#
# Args:
# uni.alg: A vector or a list of algorithms need to be ordered.
#
# Return:
# A vector or list of algorithms.
uni.alg <- unique(uni.alg) %>% sort()
uni.alg.Crs = uni.alg[grep("Crs", uni.alg)]
uni.alg.noCrs = uni.alg[-grep("Crs", uni.alg)]
uni.alg = c(uni.alg.noCrs, uni.alg.Crs)
return (uni.alg)
}
OrderInstance.tsp <- function(instances){
library(stringr)
instances <- unique(instances)
numIns = str_extract_all(instances,"[0-9]+")
x <- do.call(rbind,numIns)
dataOrderMatrix <-cbind(as.vector(instances),unlist(numIns))
colnames(dataOrderMatrix) <- c("datafile","num")
dataOrderMatrix = dataOrderMatrix[order(dataOrderMatrix[,2]),]
return(dataOrderMatrix[,"datafile"])
}
OrderAlgorithms.tsp <- function(algorithm){
# Reorder algorithms name
#
# Args:
# algorithm: A vector of list names.
uni.algorithm <- unique(algorithm)
uni.algorithm <- sort(uni.algorithm)
uni.algorithm.0b = uni.algorithm[grep("0b",uni.algorithm)]
uni.algorithm.0f = uni.algorithm[grep("0f",uni.algorithm)]
uni.algorithm = c(uni.algorithm.0b,uni.algorithm.0f)
return(uni.algorithm)
}
LoadMaxsat<-function(file.path = "./modelresults/LM.maxsat/10_decayModelpositive_model.csv"){
data.table = read.csv(file.path, header = TRUE)
pre.length = nrow(data.table)
colnames(data.table)
# if(length(which(data.table[,"residuals"] == 1000000)) > 0)
# data.table = data.table[-which(data.table[,"residuals"] == 1000000),]
# #data.table <- data.table[,-ncol(data.table)]
# after.length = nrow(data.table)
# print("Correct rate:")
# print(after.length/pre.length *100)
data.matrix <- data.table
uni.algorithm = do.call(rbind, strsplit(as.vector(data.matrix$instance_file),"/"))[, 1] %>% unique() %>% OrderAlgorithms.maxsat()
uni.datafile = do.call(rbind, strsplit(as.vector(data.matrix$instance_file),"/"))[, 2]
instance.size = do.call(rbind, strsplit(uni.datafile,"-"))[, 1] %>% unique()
data.matrix$algorithms <- do.call(rbind, strsplit(as.vector(data.matrix$instance_file),"/"))[, 1] %>% factor(levels = uni.algorithm, ordered = TRUE)
data.matrix$instances <- do.call(rbind, strsplit(uni.datafile,"-"))[, 1] %>% factor(levels = instance.size, ordered = TRUE)
return(data.matrix)
}
LoadTsp<-function(file.path = "./modelresults/LM.tsp/1_y/10_gompertzModelpositive_model.csv"){
data.table = read.csv(file.path,header = TRUE)
pre.length = nrow(data.table)
colnames(data.table)
#if(length(which(data.table[,"residuals"] > 1000)) > 0)
# data.table = data.table[-which(data.table[,"residuals"] > 1000),]
#data.table <- data.table[,-ncol(data.table)]
after.length = nrow(data.table)
print("Correct rate:")
print(after.length/pre.length *100)
data.matrix <- data.table
uni.algorithm = do.call(rbind, strsplit(as.vector(data.matrix$instance_file),"/|-"))[, 1] %>% unique() %>% OrderAlgorithms.tsp()
uni.datafile = do.call(rbind, strsplit(as.vector(data.matrix$instance_file),"/|-"))[, 3] %>% OrderInstance.tsp()
instance.size = do.call(rbind, strsplit(uni.datafile,"-"))[, 1] %>% unique()
data.matrix$algorithms <- do.call(rbind, strsplit(as.vector(data.matrix$instance_file),"/|-"))[, 1] %>% factor(levels = uni.algorithm, ordered = TRUE)
data.matrix$instances <- do.call(rbind, strsplit(as.vector(data.matrix$instance_file),"/|-"))[, 3] %>% factor(levels = uni.datafile, ordered = TRUE)
return(data.matrix)
}
LoadBbob <- function(file.path = "./modelresults/LM.bbob.pre/100percentleft/10_all_model.csv"){
library(dplyr)
library(stringr)
data.matrix <- read.csv(file.path)
pre.length <- nrow(data.matrix)
alg <- do.call(rbind, strsplit(as.vector(data.matrix$instance_file), "/"))[, 1]
alg <- alg %>% factor()
data.matrix$alg <- alg
func <- do.call(rbind, strsplit(as.vector(data.matrix$instance_file), "/"))[, 2]
func <- func %>% factor()
func.level <- levels(func)[levels(func) %>% str_extract_all("[0-9]+", simplify = TRUE) %>% as.numeric %>% order()]
func <- factor(func, levels = func.level, ordered = TRUE)
data.matrix$func <- func
dim <- do.call(rbind, strsplit(as.vector(data.matrix$instance_file), "/"))[, 3]
dim <- do.call(rbind, strsplit(as.vector(dim), "-"))[, 1]
dim.level <- paste("DIM", str_extract_all(dim, "[0-9]+", simplify = TRUE) %>% unique %>% as.numeric %>% sort, sep = "")
data.matrix$dim <- factor(dim, levels = dim.level, ordered = TRUE)
data.matrix <- data.matrix[order(data.matrix$dim), ]
data.matrix <- data.matrix[order(data.matrix$func), ]
func.lab <- data.matrix$func %>% as.vector()
#5 function labels.
#1 f1:f5 2 f6:f9 3 f10:f14 4 f19:f17 5 f20:f24
func.lab[which(func.lab %in% c("f1", "f2", "f3", "f4", "f5"))] <- 1
func.lab[which(func.lab %in% c("f6", "f7", "f8", "f9"))] <- 2
func.lab[which(func.lab %in% c("f10", "f11", "f12", "f13", "f14"))] <- 3
func.lab[which(func.lab %in% c("f15", "f16", "f17", "f18", "f19"))] <- 4
func.lab[which(func.lab %in% c("f20", "f21", "f22", "f23", "f24"))] <- 5
func.lab <- factor(func.lab)
data.matrix$func.lab <- func.lab
return(data.matrix)
}
| /base-funcs/maxsattsp_load_and_order_func.R | no_license | qiqi-helloworld/Numeric-Represents-on-Evolutionary-Fitness-Results | R | false | false | 5,694 | r | OrderAlgorithms.maxsat <- function(uni.alg){
# Arrange algorithms by algorithms stratagy
#
# Args:
# uni.alg: A vector or a list of algorithms need to be ordered.
#
# Return:
# A vector or list of algorithms.
uni.alg <- unique(uni.alg) %>% sort()
uni.alg.Crs = uni.alg[grep("Crs", uni.alg)]
uni.alg.noCrs = uni.alg[-grep("Crs", uni.alg)]
uni.alg = c(uni.alg.noCrs, uni.alg.Crs)
return (uni.alg)
}
OrderInstance.tsp <- function(instances){
library(stringr)
instances <- unique(instances)
numIns = str_extract_all(instances,"[0-9]+")
x <- do.call(rbind,numIns)
dataOrderMatrix <-cbind(as.vector(instances),unlist(numIns))
colnames(dataOrderMatrix) <- c("datafile","num")
dataOrderMatrix = dataOrderMatrix[order(dataOrderMatrix[,2]),]
return(dataOrderMatrix[,"datafile"])
}
OrderAlgorithms.tsp <- function(algorithm){
# Reorder algorithms name
#
# Args:
# algorithm: A vector of list names.
uni.algorithm <- unique(algorithm)
uni.algorithm <- sort(uni.algorithm)
uni.algorithm.0b = uni.algorithm[grep("0b",uni.algorithm)]
uni.algorithm.0f = uni.algorithm[grep("0f",uni.algorithm)]
uni.algorithm = c(uni.algorithm.0b,uni.algorithm.0f)
return(uni.algorithm)
}
LoadMaxsat<-function(file.path = "./modelresults/LM.maxsat/10_decayModelpositive_model.csv"){
data.table = read.csv(file.path, header = TRUE)
pre.length = nrow(data.table)
colnames(data.table)
# if(length(which(data.table[,"residuals"] == 1000000)) > 0)
# data.table = data.table[-which(data.table[,"residuals"] == 1000000),]
# #data.table <- data.table[,-ncol(data.table)]
# after.length = nrow(data.table)
# print("Correct rate:")
# print(after.length/pre.length *100)
data.matrix <- data.table
uni.algorithm = do.call(rbind, strsplit(as.vector(data.matrix$instance_file),"/"))[, 1] %>% unique() %>% OrderAlgorithms.maxsat()
uni.datafile = do.call(rbind, strsplit(as.vector(data.matrix$instance_file),"/"))[, 2]
instance.size = do.call(rbind, strsplit(uni.datafile,"-"))[, 1] %>% unique()
data.matrix$algorithms <- do.call(rbind, strsplit(as.vector(data.matrix$instance_file),"/"))[, 1] %>% factor(levels = uni.algorithm, ordered = TRUE)
data.matrix$instances <- do.call(rbind, strsplit(uni.datafile,"-"))[, 1] %>% factor(levels = instance.size, ordered = TRUE)
return(data.matrix)
}
LoadTsp<-function(file.path = "./modelresults/LM.tsp/1_y/10_gompertzModelpositive_model.csv"){
data.table = read.csv(file.path,header = TRUE)
pre.length = nrow(data.table)
colnames(data.table)
#if(length(which(data.table[,"residuals"] > 1000)) > 0)
# data.table = data.table[-which(data.table[,"residuals"] > 1000),]
#data.table <- data.table[,-ncol(data.table)]
after.length = nrow(data.table)
print("Correct rate:")
print(after.length/pre.length *100)
data.matrix <- data.table
uni.algorithm = do.call(rbind, strsplit(as.vector(data.matrix$instance_file),"/|-"))[, 1] %>% unique() %>% OrderAlgorithms.tsp()
uni.datafile = do.call(rbind, strsplit(as.vector(data.matrix$instance_file),"/|-"))[, 3] %>% OrderInstance.tsp()
instance.size = do.call(rbind, strsplit(uni.datafile,"-"))[, 1] %>% unique()
data.matrix$algorithms <- do.call(rbind, strsplit(as.vector(data.matrix$instance_file),"/|-"))[, 1] %>% factor(levels = uni.algorithm, ordered = TRUE)
data.matrix$instances <- do.call(rbind, strsplit(as.vector(data.matrix$instance_file),"/|-"))[, 3] %>% factor(levels = uni.datafile, ordered = TRUE)
return(data.matrix)
}
LoadBbob <- function(file.path = "./modelresults/LM.bbob.pre/100percentleft/10_all_model.csv"){
library(dplyr)
library(stringr)
data.matrix <- read.csv(file.path)
pre.length <- nrow(data.matrix)
alg <- do.call(rbind, strsplit(as.vector(data.matrix$instance_file), "/"))[, 1]
alg <- alg %>% factor()
data.matrix$alg <- alg
func <- do.call(rbind, strsplit(as.vector(data.matrix$instance_file), "/"))[, 2]
func <- func %>% factor()
func.level <- levels(func)[levels(func) %>% str_extract_all("[0-9]+", simplify = TRUE) %>% as.numeric %>% order()]
func <- factor(func, levels = func.level, ordered = TRUE)
data.matrix$func <- func
dim <- do.call(rbind, strsplit(as.vector(data.matrix$instance_file), "/"))[, 3]
dim <- do.call(rbind, strsplit(as.vector(dim), "-"))[, 1]
dim.level <- paste("DIM", str_extract_all(dim, "[0-9]+", simplify = TRUE) %>% unique %>% as.numeric %>% sort, sep = "")
data.matrix$dim <- factor(dim, levels = dim.level, ordered = TRUE)
data.matrix <- data.matrix[order(data.matrix$dim), ]
data.matrix <- data.matrix[order(data.matrix$func), ]
func.lab <- data.matrix$func %>% as.vector()
#5 function labels.
#1 f1:f5 2 f6:f9 3 f10:f14 4 f19:f17 5 f20:f24
func.lab[which(func.lab %in% c("f1", "f2", "f3", "f4", "f5"))] <- 1
func.lab[which(func.lab %in% c("f6", "f7", "f8", "f9"))] <- 2
func.lab[which(func.lab %in% c("f10", "f11", "f12", "f13", "f14"))] <- 3
func.lab[which(func.lab %in% c("f15", "f16", "f17", "f18", "f19"))] <- 4
func.lab[which(func.lab %in% c("f20", "f21", "f22", "f23", "f24"))] <- 5
func.lab <- factor(func.lab)
data.matrix$func.lab <- func.lab
return(data.matrix)
}
|
#' @name AplumbeusOcc
#' @title UK occurrence data for Anopheles plumbeus as taken from GBIF
#' @description This is an example occurrence only data for the species
#' Anopheles plumbeus. The data are taken from GBIF and restricted to the
#' UK. These data are used in the module UKAnophelesPlumbeus which makes for
#' a quick running occurrence module for testing and playing with zoon.
#' @docType data
#' @format data.frame with five columns, longitude, latitude, value (1 for
#' presence), type (presence) and a column of 1s indicating this is
#' training data not external validation data.
#' @source GBIF
#' @author Tim Lucas September 2014
NULL
#' @name UKAirRas
#' @title UK Air temperature raster layer.
#' @description This is an example environmental covariate raster layer. It is
#' surface temperatures for the UK taken from NCEP
#' @docType data
#' @format Raster layer
#' @source NCEP
#' @author Tim Lucas September 2014
NULL
#' @name CWBZim
#' @title Presence/absence of the coffee white stem borer in Zimbabwe 2003
#' @description This is an example presence/absence dataset for the
#' coffee white stem borer \emph{Monochamus leuconotus} P. taken from an
#' open access dataset on the Dryad data repository.
#' The data are made available by those authors under a Creative Commons CC0
#' These data are used in the module CWBZimbabwe which can be used for running
#' toy presence/absence species distribution models.
#' @docType data
#' @format data.frame with five columns, longitude, latitude, value (1 for
#' presence), type (presence) and a column of 1s indicating this is
#' training data not external validation data.
#' @source Original publication:
#' Kutywayo D, Chemura A, Kusena W, Chidoko P, Mahoya C (2013) The impact of
#' climate change on the potential distribution of agricultural pests: the case
#' of the coffee white stem borer (\emph{Monochamus leuconotus} P.) in
#' Zimbabwe. PLoS ONE 8(8): e73432.
#' Dryad data package:
#' Kutywayo D, Chemura A, Kusena W, Chidoko P, Mahoya C (2013) Data from:
#' The impact of climate change on the potential distribution of agricultural
#' pests: the case of the coffee white stem borer (\emph{Monochamus
#' leuconotus} P.) in Zimbabwe. Dryad Digital Repository.
#' \url{http://dx.doi.org/10.1371/journal.pone.0073432}
#'
#' @author Nick Golding August 2015
NULL
| /R/DataDocumentation.R | permissive | Tangaroaluka/zoon | R | false | false | 2,408 | r |
#' @name AplumbeusOcc
#' @title UK occurrence data for Anopheles plumbeus as taken from GBIF
#' @description This is an example occurrence only data for the species
#' Anopheles plumbeus. The data are taken from GBIF and restricted to the
#' UK. These data are used in the module UKAnophelesPlumbeus which makes for
#' a quick running occurrence module for testing and playing with zoon.
#' @docType data
#' @format data.frame with five columns, longitude, latitude, value (1 for
#' presence), type (presence) and a column of 1s indicating this is
#' training data not external validation data.
#' @source GBIF
#' @author Tim Lucas September 2014
NULL
#' @name UKAirRas
#' @title UK Air temperature raster layer.
#' @description This is an example environmental covariate raster layer. It is
#' surface temperatures for the UK taken from NCEP
#' @docType data
#' @format Raster layer
#' @source NCEP
#' @author Tim Lucas September 2014
NULL
#' @name CWBZim
#' @title Presence/absence of the coffee white stem borer in Zimbabwe 2003
#' @description This is an example presence/absence dataset for the
#' coffee white stem borer \emph{Monochamus leuconotus} P. taken from an
#' open access dataset on the Dryad data repository.
#' The data are made available by those authors under a Creative Commons CC0
#' These data are used in the module CWBZimbabwe which can be used for running
#' toy presence/absence species distribution models.
#' @docType data
#' @format data.frame with five columns, longitude, latitude, value (1 for
#' presence), type (presence) and a column of 1s indicating this is
#' training data not external validation data.
#' @source Original publication:
#' Kutywayo D, Chemura A, Kusena W, Chidoko P, Mahoya C (2013) The impact of
#' climate change on the potential distribution of agricultural pests: the case
#' of the coffee white stem borer (\emph{Monochamus leuconotus} P.) in
#' Zimbabwe. PLoS ONE 8(8): e73432.
#' Dryad data package:
#' Kutywayo D, Chemura A, Kusena W, Chidoko P, Mahoya C (2013) Data from:
#' The impact of climate change on the potential distribution of agricultural
#' pests: the case of the coffee white stem borer (\emph{Monochamus
#' leuconotus} P.) in Zimbabwe. Dryad Digital Repository.
#' \url{http://dx.doi.org/10.1371/journal.pone.0073432}
#'
#' @author Nick Golding August 2015
NULL
|
% Generated by roxygen2 (4.0.1): do not edit by hand
\name{simple_error}
\alias{simple_error}
\title{Call base::stop with interpolated arguments.}
\usage{
simple_error(message, ..., call. = TRUE)
}
\arguments{
\item{message}{character. The message to call \code{base::stop} with.}
\item{...}{any instances of \code{"\%s"} in \code{message} will be replaced
with the respective additional arguments.}
\item{call.}{logical. Whether or not to show a stack trace. The default is
\code{TRUE}.}
}
\description{
Call base::stop with interpolated arguments.
}
\examples{
\dontrun{
stopifnot(identical('hello world', tryCatch(error = function(e) e$message,
simple_error("hello \%s", "world"))))
}
}
| /man/simple_error.Rd | no_license | robertzk/refclass | R | false | false | 695 | rd | % Generated by roxygen2 (4.0.1): do not edit by hand
\name{simple_error}
\alias{simple_error}
\title{Call base::stop with interpolated arguments.}
\usage{
simple_error(message, ..., call. = TRUE)
}
\arguments{
\item{message}{character. The message to call \code{base::stop} with.}
\item{...}{any instances of \code{"\%s"} in \code{message} will be replaced
with the respective additional arguments.}
\item{call.}{logical. Whether or not to show a stack trace. The default is
\code{TRUE}.}
}
\description{
Call base::stop with interpolated arguments.
}
\examples{
\dontrun{
stopifnot(identical('hello world', tryCatch(error = function(e) e$message,
simple_error("hello \%s", "world"))))
}
}
|
#' @title Comparing Correlations between independent studies with Bootstrapping
#' @description A function to compare standardized mean differences (SMDs) between studies. This function is intended to be used to compare the compatibility of original studies with replication studies (lower p-values indicating lower compatibility)
#' @param x1,y1 numeric vectors of data values. x and y must have the same length from study 1.
#' @param x2,y2 numeric vectors of data values. x and y must have the same length from study 2.
#' @inheritParams boot_cor_test
#' @return A list with class "htest" containing the following components:
#' \describe{
#' \item{\code{"p.value"}}{numeric scalar containing the p-value for the test under the null hypothesis.}
#' \item{\code{"estimate"}}{difference in correlations between studies.}
#' \item{\code{"conf.int"}}{percentile (bootstrap) confidence interval for difference in correlations.}
#' \item{\code{"null.value"}}{the specified hypothesized value for the null hypothesis.}
#' \item{\code{"alternative"}}{character string indicating the alternative hypothesis (the value of the input argument alternative). Possible values are "greater", "less", or "two-sided".}
#' \item{\code{"method"}}{a character string indicating how the association was measured.}
#' \item{\code{"data.name"}}{Names of input values..}
#' \item{\code{"boot_res"}}{List of bootstrapped results.}
#' \item{\code{"call"}}{the matched call.}
#' }
#' @name boot_compare_cor
#' @importFrom stats median
#' @export boot_compare_cor
#'
boot_compare_cor <- function(
x1,y1,
x2,y2,
alternative = c("two.sided","less", "greater",
"equivalence", "minimal.effect"),
method = c("pearson", "kendall", "spearman",
"winsorized", "bendpercent"),
alpha = 0.05,
null = 0,
R = 1999,
...){
DNAME <- paste(deparse(substitute(x1)), "and", deparse(substitute(y1)),
"vs.",
deparse(substitute(x2)), "and", deparse(substitute(y2)))
nboot = R
null.value = null
if(!is.vector(x1) || !is.vector(x2) || !is.vector(y1) || !is.vector(y2)){
stop("x1, x2, y1, y2 must be vectors.")
}
if(length(x1)!=length(y1)){
stop("x1 and y1 do not have equal lengths.")
}
if(length(x2)!=length(y2)){
stop("x2 and y2 do not have equal lengths.")
}
#if(TOST && null <=0){
# stop("positive value for null must be supplied if using TOST.")
#}
#if(TOST){
# alternative = "less"
#}
if(alternative %in% c("equivalence", "minimal.effect")){
if(length(null) == 1){
null = c(null, -1*null)
}
TOST = TRUE
} else {
if(length(null) > 1){
stop("null can only have 1 value for non-TOST procedures")
}
TOST = FALSE
}
if(alternative != "two.sided"){
ci = 1 - alpha*2
intmult = c(1,1)
} else {
ci = 1 - alpha
if(TOST){
intmult = c(1,1)
} else if(alternative == "less"){
intmult = c(1,NA)
} else {
intmult = c(NA,1)
}
}
alternative = match.arg(alternative)
method = match.arg(method)
df <- cbind(x1,y1)
df <- df[complete.cases(df), ]
n1 <- nrow(df)
x1 <- df[,1]
y1 <- df[,2]
df <- cbind(x2,y2)
df <- df[complete.cases(df), ]
n2 <- nrow(df)
x2 <- df[,1]
y2 <- df[,2]
if(method %in% c("bendpercent","winsorized")){
if(method == "bendpercent"){
r1 <- pbcor(x1, y1, ...)
r2 <- pbcor(x2, y2, ...)
# bootstrap
data1 <- matrix(sample(n1, size=n1*nboot, replace=TRUE), nrow=nboot)
bvec1 <- apply(data1, 1, .corboot_pbcor, x1, y1, ...) # A 1 by nboot matrix.
data2 <- matrix(sample(n2, size=n2*nboot, replace=TRUE), nrow=nboot)
bvec2 <- apply(data2, 1, .corboot_pbcor, x2, y2, ...) # A 1 by nboot matrix.
}
if(method == "winsorized"){
r1 <- wincor(x1, y1, ...)
r2 <- wincor(x2, y2, ...)
# bootstrap
data1 <- matrix(sample(n1, size=n1*nboot, replace=TRUE), nrow=nboot)
bvec1 <- apply(data1, 1, .corboot_wincor, x1, y1, ...) # A 1 by nboot matrix.
data2 <- matrix(sample(n2, size=n2*nboot, replace=TRUE), nrow=nboot)
bvec2 <- apply(data2, 1, .corboot_wincor, x2, y2, ...) # A 1 by nboot matrix
}
} else {
# correlations
r1 <- cor(x1,y1,method = method)
r2 <- cor(x2,y2,method = method)
# bootstrap
data1 <- matrix(sample(n1, size=n1*nboot, replace=TRUE), nrow=nboot)
bvec1 <- apply(data1, 1, .corboot, x1, y1, method = method) # A 1 by nboot matrix.
data2 <- matrix(sample(n2, size=n2*nboot, replace=TRUE), nrow=nboot)
bvec2 <- apply(data2, 1, .corboot, x2, y2, method = method) # A 1 by nboot matrix.
}
bvec <- bvec1 - bvec2
bsort <- sort(bvec)
boot.cint = quantile(bvec, c((1 - ci) / 2, 1 - (1 - ci) / 2))
attr(boot.cint, "conf.level") <- ci
# p value
if(alternative == "two.sided"){
phat <- (sum(bvec < null.value)+.5*sum(bvec==0))/nboot
sig <- 2 * min(phat, 1 - phat)
}
if(alternative == "greater"){
sig <- 1 - sum(bvec >= null.value)/nboot
}
if(alternative == "less"){
sig <- 1 - sum(bvec <= null.value)/nboot
}
if(alternative == "equivalence"){
#sig2 <- 1 - sum(bvec >= -1*null.value)/nboot
#sig = max(sig,sig2)
sig1 = 1 - sum(bvec >= min(null.value))/nboot
sig2 = 1 - sum(bvec <= max(null.value))/nboot
sig = max(sig1,sig2)
}
if(alternative == "minimal.effect"){
#sig2 <- 1 - sum(bvec >= -1*null.value)/nboot
#sig = max(sig,sig2)
sig1 = 1 - sum(bvec >= max(null.value))/nboot
sig2 = 1 - sum(bvec <= min(null.value))/nboot
sig = min(sig1,sig2)
}
if (method == "pearson") {
# Pearson # Fisher
method2 <- "Bootstrapped difference in Pearson's correlation"
names(null.value) = "difference in correlation"
rfinal = c(cor = r1-r2)
}
if (method == "spearman") {
method2 <- "Bootstrapped difference in Spearman's rho"
# # Fieller adjusted
rfinal = c(rho = r1-r2)
names(null.value) = "difference in rho"
}
if (method == "kendall") {
method2 <- "Bootstrapped difference in Kendall's tau"
# # Fieller adjusted
rfinal = c(tau = r1-r2)
names(null.value) = "difference in tau"
}
if (method == "bendpercent") {
method2 <- "Bootstrapped difference in percentage bend correlation pb"
# # Fieller adjusted
rfinal = c(pb = r1-r2)
names(null.value) = "difference in pb"
}
if (method == "winsorized") {
method2 <- "Bootstrapped difference in Winsorized correlation wincor"
# # Fieller adjusted
rfinal = c(wincor = r1-r2)
names(null.value) = "differnce in wincor"
}
N = c(n1 = n1, n2 = n2)
# Store as htest
rval <- list(p.value = sig,
parameter = N,
conf.int = boot.cint,
estimate = rfinal,
stderr = sd(bvec,na.rm=TRUE),
null.value = null.value,
alternative = alternative,
method = method2,
data.name = DNAME,
boot_res = list(diff = bvec,
r1 = bvec1,
r2 = bvec2),
call = match.call())
class(rval) <- "htest"
return(rval)
}
| /R/boot_compare_cor.R | no_license | cran/TOSTER | R | false | false | 7,179 | r | #' @title Comparing Correlations between independent studies with Bootstrapping
#' @description A function to compare standardized mean differences (SMDs) between studies. This function is intended to be used to compare the compatibility of original studies with replication studies (lower p-values indicating lower compatibility)
#' @param x1,y1 numeric vectors of data values. x and y must have the same length from study 1.
#' @param x2,y2 numeric vectors of data values. x and y must have the same length from study 2.
#' @inheritParams boot_cor_test
#' @return A list with class "htest" containing the following components:
#' \describe{
#' \item{\code{"p.value"}}{numeric scalar containing the p-value for the test under the null hypothesis.}
#' \item{\code{"estimate"}}{difference in correlations between studies.}
#' \item{\code{"conf.int"}}{percentile (bootstrap) confidence interval for difference in correlations.}
#' \item{\code{"null.value"}}{the specified hypothesized value for the null hypothesis.}
#' \item{\code{"alternative"}}{character string indicating the alternative hypothesis (the value of the input argument alternative). Possible values are "greater", "less", or "two-sided".}
#' \item{\code{"method"}}{a character string indicating how the association was measured.}
#' \item{\code{"data.name"}}{Names of input values..}
#' \item{\code{"boot_res"}}{List of bootstrapped results.}
#' \item{\code{"call"}}{the matched call.}
#' }
#' @name boot_compare_cor
#' @importFrom stats median
#' @export boot_compare_cor
#'
boot_compare_cor <- function(
x1,y1,
x2,y2,
alternative = c("two.sided","less", "greater",
"equivalence", "minimal.effect"),
method = c("pearson", "kendall", "spearman",
"winsorized", "bendpercent"),
alpha = 0.05,
null = 0,
R = 1999,
...){
DNAME <- paste(deparse(substitute(x1)), "and", deparse(substitute(y1)),
"vs.",
deparse(substitute(x2)), "and", deparse(substitute(y2)))
nboot = R
null.value = null
if(!is.vector(x1) || !is.vector(x2) || !is.vector(y1) || !is.vector(y2)){
stop("x1, x2, y1, y2 must be vectors.")
}
if(length(x1)!=length(y1)){
stop("x1 and y1 do not have equal lengths.")
}
if(length(x2)!=length(y2)){
stop("x2 and y2 do not have equal lengths.")
}
#if(TOST && null <=0){
# stop("positive value for null must be supplied if using TOST.")
#}
#if(TOST){
# alternative = "less"
#}
if(alternative %in% c("equivalence", "minimal.effect")){
if(length(null) == 1){
null = c(null, -1*null)
}
TOST = TRUE
} else {
if(length(null) > 1){
stop("null can only have 1 value for non-TOST procedures")
}
TOST = FALSE
}
if(alternative != "two.sided"){
ci = 1 - alpha*2
intmult = c(1,1)
} else {
ci = 1 - alpha
if(TOST){
intmult = c(1,1)
} else if(alternative == "less"){
intmult = c(1,NA)
} else {
intmult = c(NA,1)
}
}
alternative = match.arg(alternative)
method = match.arg(method)
df <- cbind(x1,y1)
df <- df[complete.cases(df), ]
n1 <- nrow(df)
x1 <- df[,1]
y1 <- df[,2]
df <- cbind(x2,y2)
df <- df[complete.cases(df), ]
n2 <- nrow(df)
x2 <- df[,1]
y2 <- df[,2]
if(method %in% c("bendpercent","winsorized")){
if(method == "bendpercent"){
r1 <- pbcor(x1, y1, ...)
r2 <- pbcor(x2, y2, ...)
# bootstrap
data1 <- matrix(sample(n1, size=n1*nboot, replace=TRUE), nrow=nboot)
bvec1 <- apply(data1, 1, .corboot_pbcor, x1, y1, ...) # A 1 by nboot matrix.
data2 <- matrix(sample(n2, size=n2*nboot, replace=TRUE), nrow=nboot)
bvec2 <- apply(data2, 1, .corboot_pbcor, x2, y2, ...) # A 1 by nboot matrix.
}
if(method == "winsorized"){
r1 <- wincor(x1, y1, ...)
r2 <- wincor(x2, y2, ...)
# bootstrap
data1 <- matrix(sample(n1, size=n1*nboot, replace=TRUE), nrow=nboot)
bvec1 <- apply(data1, 1, .corboot_wincor, x1, y1, ...) # A 1 by nboot matrix.
data2 <- matrix(sample(n2, size=n2*nboot, replace=TRUE), nrow=nboot)
bvec2 <- apply(data2, 1, .corboot_wincor, x2, y2, ...) # A 1 by nboot matrix
}
} else {
# correlations
r1 <- cor(x1,y1,method = method)
r2 <- cor(x2,y2,method = method)
# bootstrap
data1 <- matrix(sample(n1, size=n1*nboot, replace=TRUE), nrow=nboot)
bvec1 <- apply(data1, 1, .corboot, x1, y1, method = method) # A 1 by nboot matrix.
data2 <- matrix(sample(n2, size=n2*nboot, replace=TRUE), nrow=nboot)
bvec2 <- apply(data2, 1, .corboot, x2, y2, method = method) # A 1 by nboot matrix.
}
bvec <- bvec1 - bvec2
bsort <- sort(bvec)
boot.cint = quantile(bvec, c((1 - ci) / 2, 1 - (1 - ci) / 2))
attr(boot.cint, "conf.level") <- ci
# p value
if(alternative == "two.sided"){
phat <- (sum(bvec < null.value)+.5*sum(bvec==0))/nboot
sig <- 2 * min(phat, 1 - phat)
}
if(alternative == "greater"){
sig <- 1 - sum(bvec >= null.value)/nboot
}
if(alternative == "less"){
sig <- 1 - sum(bvec <= null.value)/nboot
}
if(alternative == "equivalence"){
#sig2 <- 1 - sum(bvec >= -1*null.value)/nboot
#sig = max(sig,sig2)
sig1 = 1 - sum(bvec >= min(null.value))/nboot
sig2 = 1 - sum(bvec <= max(null.value))/nboot
sig = max(sig1,sig2)
}
if(alternative == "minimal.effect"){
#sig2 <- 1 - sum(bvec >= -1*null.value)/nboot
#sig = max(sig,sig2)
sig1 = 1 - sum(bvec >= max(null.value))/nboot
sig2 = 1 - sum(bvec <= min(null.value))/nboot
sig = min(sig1,sig2)
}
if (method == "pearson") {
# Pearson # Fisher
method2 <- "Bootstrapped difference in Pearson's correlation"
names(null.value) = "difference in correlation"
rfinal = c(cor = r1-r2)
}
if (method == "spearman") {
method2 <- "Bootstrapped difference in Spearman's rho"
# # Fieller adjusted
rfinal = c(rho = r1-r2)
names(null.value) = "difference in rho"
}
if (method == "kendall") {
method2 <- "Bootstrapped difference in Kendall's tau"
# # Fieller adjusted
rfinal = c(tau = r1-r2)
names(null.value) = "difference in tau"
}
if (method == "bendpercent") {
method2 <- "Bootstrapped difference in percentage bend correlation pb"
# # Fieller adjusted
rfinal = c(pb = r1-r2)
names(null.value) = "difference in pb"
}
if (method == "winsorized") {
method2 <- "Bootstrapped difference in Winsorized correlation wincor"
# # Fieller adjusted
rfinal = c(wincor = r1-r2)
names(null.value) = "differnce in wincor"
}
N = c(n1 = n1, n2 = n2)
# Store as htest
rval <- list(p.value = sig,
parameter = N,
conf.int = boot.cint,
estimate = rfinal,
stderr = sd(bvec,na.rm=TRUE),
null.value = null.value,
alternative = alternative,
method = method2,
data.name = DNAME,
boot_res = list(diff = bvec,
r1 = bvec1,
r2 = bvec2),
call = match.call())
class(rval) <- "htest"
return(rval)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.